index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
13,300 | 4f0288bdc3f1b27a95392909cb3c1c8bb4cc1139 | # Linear Search - going through each and every element in a list.
# Binary Search - splitting the list in half and searching through the half before moving on
print("Example 1: Linear Search")
# find if 0 is present in the list
list_a = [1,3,5,7,0,44,6,2,4]
def linear_search(list):
for i in list:
if i == 0:
return "0 Found!"
return "Nothing found"
print(linear_search(list_a))
print("Example 2: Binary Search") # for a sorted list
# The idea is to keep comparing the element with the middle value. Thus, each search we eliminate one half of the list.
# 1. Two Pointers, First and Last (these increment or decrement to limit the part of list to be searched
# 2. Find the middle element = [length of list] / 2
# 3. compare the middle element with the value to be found
# 4. check if the middle element is lesser to the value to be found
# - IF yes, element must lie on the second half of the list
# - IF no, element must lie on the first half of the list.
print("Find whether 14 is present in the given list\n")
given = [2,3,4,5,6,7,8,9,14,54]
def binary_search(arr, target): # search_for = What number we are searching for
# Initialize Variables for binary search
si = 0 # Starting Index
ei = len(arr)-1 # Ending Index
searching = True
while searching:
# Start in the middle
mi = (si+ei)//2 # Middle Index
# IF Value is found
if arr[mi] == target:
print(f"Found {target} at index {mi}")
searching = False # stop searching
return mi # Terminates the program
elif si+1 == ei: # If we searched the entire array and can't find the target
print(f"{target} could not be found")
searching = False
# Else continue searching
else:
if arr[mi] > target: # What we're searching for is a smaller number
# Go left
ei = mi # Middle index is now the new Ending Index or Ceiling
else: # What we're searching for is a larger number
# Go Right
si = mi # middle index is the new Starting index or Floor
binary_search(given, 7) # Test Value 7 should be found at index 5
|
13,301 | 7cbc7525733d9319a9da875a2f841624123c7f17 | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 18 13:40:37 2017
@author: Raphael
"""
import random as rd
import time
import Batalha as btl
import json
import os
import Save as sav
import Misc as msc
def Village():
TTD = ["1)Shop","2)Promote","3)Train","4)Challenge","5)Leave"]
while True:
with open ("Chars.json") as chars:
Char = json.load(chars)
with open ("Player.json") as play:
player = json.load(play)
command="aaaa"
os.system("cls")#ClearScreen
print('"Welcome to our village, take a look around and see if you anything interesting."')
while command not in range(1,6):
print("What will you do?")
for z in TTD:
time.sleep(0.5)
print(z)
command = (input(""))
try:
command = int(command)
except ValueError:
command = "aaa"
if command == 1:
print("We are currently not open for business, but have these as a token of my gratitude for coming by.")
time.sleep(2)
print("You got 5 potions!!")
time.sleep(2)
with open("Inventario.json","r") as inv:
Inv = json.load(inv)
Inv["Potion"]["quant"] += 5
sav.SaveGameI(Inv)
pass
elif command == 2:
sav.Promote()
pass
elif command == 3:
btl.Begin(player,Char["Trainer"])
elif command == 4:
u = input("Are you sure? You're up for a great challenge.(Y or N) ")
if u.upper() == "Y":
rand = rd.randint(0,3)
if rand == 0:
btl.Begin(player,Char["Agnes"])
elif rand == 1:
btl.Begin(player,Char["Borin"])
elif rand == 2:
btl.Begin(player,Char["Lala"])
else:
print("No one came...")
time.sleep(1)
elif command == 5:
break
def Passar(loc):
if loc == "Apple Woods":
print("Through an opening amongst the trees, you have arrived at the legendary Caves of Light.")
time.sleep(3)
return "Caves of Light"
elif loc == "Caves of Light":
print("After endless venturing through the mazelike caves, \nyou finally arrive at the exit to the Sundown Plateau.")
time.sleep(3)
print("At the plateau, you find the small village of Bertunia.")
time.sleep(2)
Village()
return "Sundown Plateau"
elif loc == "Sundown Plateau":
print("On the horizon you spot your next destination, Mt. Legory, and make your way there.")
time.sleep(3)
return "Mt. Legory"
elif loc == "Mt. Legory":
print("You find an entrance on the cliffside and follow it until you discover the Core Cavern.")
time.sleep(3)
return "Core Cavern"
elif loc == "Core Cavern":
print("At the exit of the Cavern you find yourself at the Lost Swamp,\n a place few have ever escaped from.")
time.sleep(3)
print("Next to swamp there lies Muggle Town, a small fishing village.")
time.sleep(2)
Village()
return "Lost Swamp"
elif loc == "Lost Swamp":
print("Treading through the accursed swamp, you find your final destination,\n the Arcmat Ruins, said to be the birthplace of all magic.")
time.sleep(3)
return "Arcmat Ruins"
elif loc == "Arcmat Ruins":
print("After finishing your epic quest,\n you decide to chill and kill monsters and let off some steam at the ARENA!!!")
time.sleep(3)
return "Apple Woods"#"The Arena"
with open ("Weapons.json") as wpns:
Wpn = json.load(wpns)
def Encount(jog,loc):
with open ("Chars.json") as chars:
Char = json.load(chars)
rend = rd.randint(0, len(loc["inimigos"])-1)
print("You found a {0}!".format(loc['inimigos'][rend]))
time.sleep(2)
btl.Begin(jog,Char[loc['inimigos'][rend]])
def Passear(jog,loc,step):
if step < 3:
print("You are currently at {0}.".format(loc["nome"]))
time.sleep(2)
print("You walk around for a while")
time.sleep(2)
rand = rd.randint(0, 10)
if rand <= loc["encounter"]:
Encount(jog,loc)
step+=1
elif rand == loc['loot']:
a = input("You found a treasure chest, do you wish to open it?(Y ou N?) ")
if a == 'Y':
randi = rd.randint(0, len(loc['treasure'])-1)
print("You found {0}!".format(loc['treasure'][randi]))
with open("Weapons.json","r") as wps:
Wps = json.load(wps)
time.sleep(1)
if loc['treasure'][randi] in Wps:
msc.Equip(jog,Wps[loc['treasure'][randi]])
else:
book = loc['treasure'][randi]
book = book.split( )
jog['Techs'].append(book[-1])
print("You learned {}!!!".format(book[-1]))
time.sleep(3)
sav.SaveGameP(jog)
step += 1
return step
else:
print("You walked for hours with nothing to show for it.")
time.sleep(2)
if step == 0:
step += 1
if step == 1:
print("There is still a long path ahead of you.\nYou walk forward towards your objective.")
time.sleep(2)
step = 1
return step
elif step == 2:
print("You feel you are halfway there, you continue following the path at ease.")
time.sleep(2)
step = 2
return step
elif step == 3:
print("You can already see the next area, the exit is only a small journey away!!\nYou feel an evil presence watching you...")
time.sleep(2)
step = 3
return step
else:
CT = input("You foresee a big battle ahead. Do you really wish to proceed? (Y or N) ")
if CT.upper() == "N":
return 3
else:
with open ("Chars.json") as chars:
Char = json.load(chars)
print("The big boss of {1}, {0} is in front of you.".format(loc["boss"],loc["nome"]))
time.sleep(3)
btl.Begin(jog,Char[loc['boss']])
return 4
if step == 4:
print("You see the exit right in front of you, so you carry on.")
time.sleep(2)
return 4 |
13,302 | 0ebe3298f660421e582967a274ff385af668abc5 | # Launch training
import argparse
import logging
from pathlib import Path
import pandas as pd
from tqdm import tqdm
from lucanode.training import evaluation
from lucanode import loader
from lucanode import nodule_candidates
# Configure tensorflow for memory growth (instead of preallocating upfront)
import tensorflow as tf
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Retrieve nodule candidates for the given annotations')
parser.add_argument('annotations_csv', type=str, help="path to the annotations csv file, with ground truth nodules")
parser.add_argument('dataset_path', type=str, help="path where equalized_spacing, equalized_spacing_lung_masks "
"and equalized_spacing_nodule_masks can be found")
parser.add_argument('model_weights', type=str, help="path where the model weights are stored")
parser.add_argument('candidates_csv', type=str, help="path where the csv with the candidates will be written")
parser.add_argument('--plane', dest='plane', default="axial")
args = parser.parse_args()
print("""
#######################################################
######### lucanode nodule candidate retrieval #########
#######################################################
""")
annotations_df = pd.read_csv(args.annotations_csv)
seriesuid_list = set(annotations_df.seriesuid)
candidates_list = []
pbar = tqdm(seriesuid_list)
pbar.set_description("Retrieving nodule candidates")
for seriesuid in pbar:
ct_scan_path = Path(args.dataset_path) / "equalized_spacing" / (seriesuid + ".nii.gz")
lung_mask_path = Path(args.dataset_path) / "equalized_spacing_lung_masks" / (seriesuid + ".nii.gz")
nodule_mask_path = Path(args.dataset_path) / "equalized_spacing_nodule_masks" / (seriesuid + ".nii.gz")
if not (ct_scan_path.exists() and lung_mask_path.exists() and nodule_mask_path.exists()):
logging.warning("Could not find scan for seriesuid " + seriesuid)
continue
dataset_metadata_df, dataset_array = loader.load_scan_in_training_format(
seriesuid,
ct_scan_path,
lung_mask_path,
nodule_mask_path
)
dataset_metadata_df = dataset_metadata_df[dataset_metadata_df["plane"] == args.plane]
_, predictions = evaluation.evaluate_generator(
dataset_metadata_df,
dataset_array,
args.model_weights,
test_split_min=0.0,
test_split_max=1.0,
sort_by_loss=False,
only_predictions=True
)
candidates_df = nodule_candidates.retrieve_candidates(dataset_metadata_df, predictions, args.plane)
candidates_list.append(candidates_df)
candidates_merged_df = pd.concat(candidates_list, ignore_index=True)
candidates_merged_df.to_csv(args.candidates_csv, index=False)
|
13,303 | d90a984a65a4151c4e2aae16dd136664a8eacfb3 | def coroutine(f):
def wrap(*args,**kwargs):
gen = f(*args,**kwargs)
gen.send(None)
return gen
return wrap
@coroutine
def multiplier():
counter = 1
while True:
user_input = (yield)
result = counter * user_input
print(result)
counter += 1
|
13,304 | 27155230d5a2453311a57545f0c88c397a4a9512 | from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os
import logging
from bmc.bmc_spec import BMCSpec, InvarStatus
from . import sal_op_parser
from . import pwa2salconverter as pwa2sal
import fileops as fops
import utils as U
import err
logger = logging.getLogger(__name__)
SAL_PATH = 'SAL_PATH'
SAL_INF_BMC = '''/bin/sal-inf-bmc'''
class SALBMCError(Exception):
pass
class SalOpts():
def __init__(self):
self.yices = 2
self.verbosity = 3
self.iterative = False
self.preserve_tmp_files = True
# Must separate the arguements. i.e., -v 3 should be given as ['-v', '3']
# This can be avoided by using shell=True, but that is a security risk
def sal_run_cmd(sal_path, depth, sal_file, prop_name, opts=SalOpts()):
cmd = [
sal_path,
'-v', str(opts.verbosity),
'-d', str(depth),
#'{}.sal'.format(module_name),
sal_file,
prop_name
]
if opts.yices == 2:
cmd.extend(['-s', 'yices2'])
if opts.preserve_tmp_files:
cmd.append('--preserve-tmp-files')
if opts.iterative:
cmd.append('-it')
print(' '.join(cmd))
return cmd
class BMC(BMCSpec):
def __init__(self, vs, pwa_graph, init_cons, final_cons,
init_ps, final_ps, fname_constructor, module_name, model_type,
smt_engine):
"""__init__
Parameters
----------
vs : list of variables. Order is important.
pwa_graph :
init_cons :
final_cons :
module_name :
model_type :
Returns
-------
Notes
------
"""
self.prop_name = 'safety'
self.fname_constructor = fname_constructor
self.module_name = module_name
fname = module_name + '.sal'
self.sal_file = fname_constructor(fname)
self.trace = None
self.vs = vs
self.init_ps = init_ps
self.final_ps = final_ps
if model_type == 'dft':
self.pwa2sal = pwa2sal.Pwa2Sal(
module_name, init_cons,
final_cons, pwa_graph, vs,
init_ps, final_ps)
self.sal_trans_sys = self.pwa2sal.trans_sys()
elif model_type == 'dmt':
raise NotImplementedError
dts = pwa_graph.keys()
self.sal_trans_sys = BMC.sal_module_dmt(
dts, vs, pwa_graph, init_cons, final_cons, module_name)
elif model_type == 'ct':
raise NotImplementedError
elif model_type == 'rel':
raise NotImplementedError
else:
raise SALBMCError('unknown model type')
return
def trace_generator(self, depth):
for i in range(1):
status = self.check(depth)
if status == InvarStatus.Unsafe:
yield self.trace, self.get_pwa_trace()
return
def check(self, depth):
yices2_not_found = 'yices2: not found'
self.dump()
try:
sal_path_ = os.environ[SAL_PATH] + SAL_INF_BMC
except KeyError:
raise err.Fatal("SAL environment variable is not defined. It\n"
"should point to sal's top-level directory")
#raise KeyError
sal_path = fops.sanitize_path(sal_path_)
sal_cmd = sal_run_cmd(
sal_path,
depth,
self.sal_file,
self.prop_name,
)
try:
sal_op = U.strict_call_get_op(sal_cmd)
except U.CallError as e:
if yices2_not_found in e.message:
print('SAL can not find yices2. Trying with yices...')
opts = SalOpts()
opts.yices = 1
sal_cmd = sal_run_cmd(
sal_path,
depth,
self.sal_file,
self.prop_name,
opts)
sal_op = U.strict_call_get_op(sal_cmd)
else:
raise err.Fatal('unknown SAL error!')
print(sal_op)
self.trace = sal_op_parser.parse_trace(sal_op, self.vs)
if self.trace is None:
print('BMC failed to find a CE')
return InvarStatus.Unknown
else:
#self.trace.set_vars(self.vs)
print('#'*40)
print('# Cleaned up trace')
print('#'*40)
print(self.trace)
print('#'*40)
return InvarStatus.Unsafe
def dump(self):
fops.write_data(self.sal_file, str(self.sal_trans_sys).encode())
return
def get_trace(self):
raise NotImplementedError
"""Returns the last trace found or None if no trace exists."""
return self.trace
def get_last_traces(self):
raise NotImplementedError
# Code works, but should be removed due to change in
# interfaces
raise NotImplementedError
if self.trace is not None:
return self.trace.to_array(), self.get_last_pwa_trace()
else:
return None, None
def get_pwa_trace(self):
"""Converts a bmc trace to a sequence of sub_models in the original pwa.
Parameters
----------
Returns
-------
pwa_trace = [sub_model_0, sub_model_1, ... ,sub_model_n]
pwa_trace =
models = [m01, m12, ... , m(n-1)n]
partitions = [p0, p1, p2, ..., pn]
Notes
------
For now, pwa_trace is only a list of sub_models, as relational
modeling is being done with KMIN = 1. Hence, there is no
ambiguity.
"""
# # each step, but the last, corresponds to a transition
# for step in steps[:-1]:
# part_id = self.sal2pwa_map[step.assignments['cell']]
# sub_model = self.sal2pwa_map[step.tid]
# # Assumption of trace building is that each submodel only
# # has 1 unique next location. If this violated, we need to
# # add cell ids/part ids to resolve the ambiguity.
# assert(len(sub_model.pnexts) == 1)
# assert(sub_model.p.ID == part_id)
# # this is still untested, so in case assert is off...
# assert(sub_model.p.ID == part_id)
# #err.warn('gone case')
# #pwa_trace.extend((part_id, sub_model))
# pwa_trace.append(sub_model)
if self.trace is None:
return None
steps = self.trace
transitions = [step.tid for step in steps[:-1]]
return self.pwa2sal.trace(transitions)
def gen_new_disc_trace(self):
raise NotImplementedError
"""makes trace = None, signifying no more traces..."""
self.trace = None
return
################################################
# ############# CEMETERY #######################
################################################
# @staticmethod
# def sal_module_dmt(dts, vs, pwa_models, init_set, final_cons, module_name):
# sal_trans_sys = slt_dmt.SALTransSysDMT(dts, module_name, vs, init_set, final_cons)
# for dt, pwa_model in pwa_models.iteritems():
# # replace decimal point with _ else SAL will throw an
# # error due to incorrect identifier
# dt_str = str(dt).replace('.', '_')
# for idx, sub_model in enumerate(pwa_model):
# g = slt_dmt.Guard(sub_model.p.C, sub_model.p.d)
# r = slt_dmt.Reset(sub_model.m.A, sub_model.m.b)
# t = slt_dmt.Transition(
# dt, dts, 'C_{}_{}'.format(idx, dt_str), g, r)
# sal_trans_sys.add_transition(t)
# return sal_trans_sys
|
13,305 | 2ddf5f77e7cc75ce0df31ad4f017e70dc60c395b | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 26 20:40:47 2021
@author: Justin Sheen
@description: script used to create community simulations, with and
without an enacted NPI intervention.
"""
# Import libraries and set seeds ----------------------------------------------
import numpy as np
import random
import matplotlib.pyplot as plt
import statistics
import networkx as nx
from collections import defaultdict, Counter
import EoN
import math
from pathlib import Path
home = str(Path.home())
random.seed(1)
gen = np.random.Generator(np.random.PCG64(1))
# Create parameter sets to run ------------------------------------------------
rts = [1.5, 2]
overdispersions = [0.1, 0.4, 0.7]
clusters = [1000, 10000]
effects = [0.2, 0.4]
eits = [0.005]
param_sets = []
for i in rts:
for j in overdispersions:
for k in clusters:
for l in effects:
for m in eits:
if (k == 10000 and j == 0.1):
param_sets.append([i, j, k, l , 0.0045])
else:
param_sets.append([i, j, k, l , m])
clusters = [100]
eits = [0.02]
param_sets = []
for i in rts:
for j in overdispersions:
for k in clusters:
for l in effects:
for m in eits:
param_sets.append([i, j, k, l , m])
# For each parameter set, create 3,000 simulations ----------------------------
for param_set in param_sets:
tgt_R0 = param_set[0]
k_overdispersion = param_set[1]
N_cluster = param_set[2]
effect = param_set[3]
expected_It_N = param_set[4]
mean_degree = 15
initial_infections_per_cluster = None
if N_cluster == 100:
initial_infections_per_cluster = 1
elif N_cluster == 1000:
initial_infections_per_cluster = 4
else:
initial_infections_per_cluster = 40
incperiod_shape = 5.807
incperiod_rate = 1 / 0.948
infperiod_shape = 1.13
infperiod_rate = 0.226
ave_inc_period = incperiod_shape / incperiod_rate
ave_inf_period = infperiod_shape / infperiod_rate
one_gen_time = ave_inc_period + ave_inf_period
if expected_It_N <= initial_infections_per_cluster / N_cluster:
raise NameError("Script assumes expected It / N strictly < initial infections per cluster / N.")
# Joel C Miller's methods to estimate_R0 ----------------------------------
def get_Pk(G):
Nk = Counter(dict(G.degree()).values())
Pk = {x:Nk[x]/float(G.order()) for x in Nk.keys()}
return Pk
def get_PGFPrime(Pk):
maxk = max(Pk.keys())
ks = np.linspace(0,maxk, maxk+1)
Pkarray = np.array([Pk.get(k,0) for k in ks])
return lambda x: Pkarray.dot(ks*x**(ks-1))
def get_PGFDPrime(Pk):
maxk = max(Pk.keys())
ks = np.linspace(0,maxk, maxk+1)
Pkarray = np.array([Pk.get(k,0) for k in ks])
return lambda x: Pkarray.dot(ks*(ks-1)*x**(ks-2))
def estimate_R0(G, tau = None, gamma = None):
transmissibility = tau/(tau+gamma)
Pk = get_Pk(G)
psiDPrime = get_PGFDPrime(Pk)
psiPrime = get_PGFPrime(Pk)
return transmissibility * psiDPrime(1.)/psiPrime(1.)
# Find median beta that leads to desired tgt_R0 (500 sims) ----------------
p = 1.0 - mean_degree / (mean_degree + k_overdispersion)
beta_lst = []
for i in range(500):
if (i % 100 == 0):
print(i)
continue_loop = True
while (continue_loop):
z = []
for i in range(N_cluster):
deg = 0
deg = np.random.negative_binomial(k_overdispersion, p)
z.append(deg)
for i in range(len(z)):
if (z[i] == 0):
z[i] == 1
if (sum(z) % 2 == 0):
continue_loop = False
G=nx.configuration_model(z)
G=nx.Graph(G)
G.remove_edges_from(nx.selfloop_edges(G))
est_R0=3.3
beta=0.04
while est_R0 > tgt_R0:
beta = beta - 0.0001
est_R0 = estimate_R0(G, tau=beta, gamma=1/ave_inf_period)
beta_lst.append(beta)
plt.hist(beta_lst)
print("Median beta value for tgt_R0: " + str(statistics.median(beta_lst)))
beta = statistics.median(beta_lst)
# Create graphs for input to Gillespie algorithm --------------------------
H = nx.DiGraph()
H.add_node('S')
H.add_edge('E', 'I', rate = 1 / ave_inc_period, weight_label='expose2infect_weight')
H.add_edge('I', 'R', rate = 1 / ave_inf_period)
return_statuses = ('S', 'E', 'I', 'R')
J = nx.DiGraph()
J.add_edge(('I', 'S'), ('I', 'E'), rate = beta, weight_label='transmission_weight')
J_treat = nx.DiGraph()
J_treat.add_edge(('I', 'S'), ('I', 'E'), rate = ((1 - effect) * beta), weight_label='transmission_weight')
# Find day on average when expected_It_N of active infections (1000 sims) -
nsim = 1000
I_series = []
while (len(I_series) < nsim):
if (len(I_series) % 100 == 0):
print(len(I_series))
continue_loop = True
while (continue_loop):
z = []
for i in range(N_cluster):
deg = 0
deg = np.random.negative_binomial(k_overdispersion, p)
z.append(deg)
for i in range(len(z)):
if (z[i] == 0):
z[i] == 1
if (sum(z) % 2 == 0):
continue_loop = False
G=nx.configuration_model(z)
G=nx.Graph(G)
G.remove_edges_from(nx.selfloop_edges(G))
node_attribute_dict = {node: 1 for node in G.nodes()}
edge_attribute_dict = {edge: 1 for edge in G.edges()}
nx.set_node_attributes(G, values=node_attribute_dict, name='expose2infect_weight')
nx.set_edge_attributes(G, values=edge_attribute_dict, name='transmission_weight')
IC = defaultdict(lambda: 'S')
for node in range(initial_infections_per_cluster):
IC[node] = 'I'
t, S, E, I, R = EoN.Gillespie_simple_contagion(G, H, J, IC, return_statuses, tmax = 200)
next_t = 0
to_add_row = []
for t_dex in range(len(t)):
if t[t_dex] >= next_t:
to_add_row.append(I[t_dex])
next_t += 1
I_series.append(to_add_row)
med_t_one_pct = None
# Find first day of sim where the ave. num. of infects >= expected_It_N ---
for day_dex in range(nsim):
focal_dist = []
for I_series_dex in range(len(I_series)):
if len(I_series[I_series_dex]) > day_dex:
focal_dist.append(I_series[I_series_dex][day_dex] / N_cluster)
if len(focal_dist) <= 100:
raise NameError("Not enough simulations (<10%) to get average number of infections on this day.")
print(len(focal_dist))
print(statistics.mean(focal_dist))
if statistics.mean(focal_dist) >= expected_It_N:
med_t_one_pct = day_dex
break
# Set threshold value of number of infections at time t -------------------
threshold = 1
if N_cluster == 1000:
threshold = 1
elif N_cluster == 100:
threshold = 1
elif N_cluster == 10000:
threshold = 1
# Simulate epidemics with/without treatment of effect reduction in beta ---
nsim = 3000
It_It1con_It1trt = []
sim_ctr = 0
E_It = []
while (len(It_It1con_It1trt) < nsim):
if (len(It_It1con_It1trt) % 100 == 0):
print(len(It_It1con_It1trt))
sim_ctr += 1
continue_loop = True
while (continue_loop):
z = []
for i in range(N_cluster):
deg = 0
deg = np.random.negative_binomial(k_overdispersion, p)
z.append(deg)
for i in range(len(z)):
if (z[i] == 0):
z[i] == 1
if (sum(z) % 2 == 0):
continue_loop = False
G=nx.configuration_model(z)
G=nx.Graph(G)
G.remove_edges_from(nx.selfloop_edges(G))
node_attribute_dict = {node: 1 for node in G.nodes()}
edge_attribute_dict = {edge: 1 for edge in G.edges()}
nx.set_node_attributes(G, values=node_attribute_dict, name='expose2infect_weight')
nx.set_edge_attributes(G, values=edge_attribute_dict, name='transmission_weight')
IC = defaultdict(lambda: 'S')
for node in range(initial_infections_per_cluster):
IC[node] = 'I'
full_first_half = EoN.Gillespie_simple_contagion(G, H, J, IC, return_statuses, tmax = math.ceil(med_t_one_pct), return_full_data=True)
t_first_half = full_first_half.t()
S_first_half = full_first_half.S()
E_first_half = full_first_half.summary()[1]['E']
I_first_half = full_first_half.I()
R_first_half = full_first_half.R()
if I_first_half[-1] >= threshold:
E_It.append(I_first_half[-1])
nodes_first_half_final = full_first_half.get_statuses(list(G.nodes()), t_first_half[-1])
curr_IC = defaultdict(lambda: 'S')
for node in G.nodes():
status = nodes_first_half_final[node]
curr_IC[node] = status
full_second_half_con = EoN.Gillespie_simple_contagion(G, H, J, curr_IC, return_statuses, tmax = np.ceil(one_gen_time) * 3, return_full_data=True)
t_second_half_con = full_second_half_con.t()
S_second_half_con = full_second_half_con.S()
E_second_half_con = full_second_half_con.summary()[1]['E']
I_second_half_con = full_second_half_con.I()
R_second_half_con = full_second_half_con.R()
full_second_half_trt = EoN.Gillespie_simple_contagion(G, H, J_treat, curr_IC, return_statuses, tmax = np.ceil(one_gen_time) * 3, return_full_data=True)
t_second_half_trt = full_second_half_trt.t()
S_second_half_trt = full_second_half_trt.S()
E_second_half_trt = full_second_half_trt.summary()[1]['E']
I_second_half_trt = full_second_half_trt.I()
R_second_half_trt = full_second_half_trt.R()
one_gen_S_con, one_gen_E_con, one_gen_I_con, one_gen_R_con = (None,) * 4
two_gen_S_con, two_gen_E_con, two_gen_I_con, two_gen_R_con = (None,) * 4
three_gen_S_con, three_gen_E_con, three_gen_I_con, three_gen_R_con = (None,) * 4
one_gen_S_trt, one_gen_E_trt, one_gen_I_trt, one_gen_R_trt = (None,) * 4
two_gen_S_trt, two_gen_E_trt, two_gen_I_trt, two_gen_R_trt = (None,) * 4
three_gen_S_trt, three_gen_E_trt, three_gen_I_trt, three_gen_R_trt = (None,) * 4
for t_dex_con in range(len(t_second_half_con)):
if t_second_half_con[t_dex_con] >= np.ceil(one_gen_time) and (one_gen_S_con is None):
one_gen_S_con = S_second_half_con[t_dex_con]
one_gen_E_con = E_second_half_con[t_dex_con]
one_gen_I_con = I_second_half_con[t_dex_con]
one_gen_R_con = R_second_half_con[t_dex_con]
if t_second_half_con[t_dex_con] >= np.ceil(one_gen_time) * 2 and (two_gen_S_con is None):
two_gen_S_con = S_second_half_con[t_dex_con]
two_gen_E_con = E_second_half_con[t_dex_con]
two_gen_I_con = I_second_half_con[t_dex_con]
two_gen_R_con = R_second_half_con[t_dex_con]
three_gen_S_con = S_second_half_con[-1]
three_gen_E_con = E_second_half_con[-1]
three_gen_I_con = I_second_half_con[-1]
three_gen_R_con = R_second_half_con[-1]
if one_gen_S_con is None: one_gen_S_con = S_second_half_con[-1]
if one_gen_E_con is None: one_gen_E_con = E_second_half_con[-1]
if one_gen_I_con is None: one_gen_I_con = I_second_half_con[-1]
if one_gen_R_con is None: one_gen_R_con = R_second_half_con[-1]
if two_gen_S_con is None: two_gen_S_con = S_second_half_con[-1]
if two_gen_E_con is None: two_gen_E_con = E_second_half_con[-1]
if two_gen_I_con is None: two_gen_I_con = I_second_half_con[-1]
if two_gen_R_con is None: two_gen_R_con = R_second_half_con[-1]
for t_dex_trt in range(len(t_second_half_trt)):
if t_second_half_trt[t_dex_trt] >= np.ceil(one_gen_time) and (one_gen_S_trt is None):
one_gen_S_trt = S_second_half_trt[t_dex_trt]
one_gen_E_trt = E_second_half_trt[t_dex_trt]
one_gen_I_trt = I_second_half_trt[t_dex_trt]
one_gen_R_trt = R_second_half_trt[t_dex_trt]
if t_second_half_trt[t_dex_trt] >= np.ceil(one_gen_time) * 2 and (two_gen_S_trt is None):
two_gen_S_trt = S_second_half_trt[t_dex_trt]
two_gen_E_trt = E_second_half_trt[t_dex_trt]
two_gen_I_trt = I_second_half_trt[t_dex_trt]
two_gen_R_trt = R_second_half_trt[t_dex_trt]
three_gen_S_trt = S_second_half_trt[-1]
three_gen_E_trt = E_second_half_trt[-1]
three_gen_I_trt = I_second_half_trt[-1]
three_gen_R_trt = R_second_half_trt[-1]
if one_gen_S_trt is None: one_gen_S_trt = S_second_half_trt[-1]
if one_gen_E_trt is None: one_gen_E_trt = E_second_half_trt[-1]
if one_gen_I_trt is None: one_gen_I_trt = I_second_half_trt[-1]
if one_gen_R_trt is None: one_gen_R_trt = R_second_half_trt[-1]
if two_gen_S_trt is None: two_gen_S_trt = S_second_half_trt[-1]
if two_gen_E_trt is None: two_gen_E_trt = E_second_half_trt[-1]
if two_gen_I_trt is None: two_gen_I_trt = I_second_half_trt[-1]
if two_gen_R_trt is None: two_gen_R_trt = R_second_half_trt[-1]
to_add_row = [S_first_half[-1], E_first_half[-1], I_first_half[-1], R_first_half[-1],
one_gen_S_con, one_gen_E_con, one_gen_I_con, one_gen_R_con,
two_gen_S_con, two_gen_E_con, two_gen_I_con, two_gen_R_con,
three_gen_S_con, three_gen_E_con, three_gen_I_con, three_gen_R_con,
one_gen_S_trt, one_gen_E_trt, one_gen_I_trt, one_gen_R_trt,
two_gen_S_trt, two_gen_E_trt, two_gen_I_trt, two_gen_R_trt,
three_gen_S_trt, three_gen_E_trt, three_gen_I_trt, three_gen_R_trt]
It_It1con_It1trt.append(to_add_row)
filename = home + "/NPI/code_output/res/" + str(tgt_R0) + "_" + str(N_cluster) + "_" + str(k_overdispersion) + "_" + str(effect) + "_" + str(expected_It_N) + ".csv"
with open(filename, 'w') as out_f:
for sim_dex in range(len(It_It1con_It1trt)):
for entry_dex in range(len(It_It1con_It1trt[0])):
out_f.write(str(It_It1con_It1trt[sim_dex][entry_dex]))
out_f.write(",")
out_f.write("\n")
out_f.write(str(sim_ctr))
out_f.write(",")
out_f.write(str(statistics.mean(E_It)))
out_f.write(",")
out_f.write(str(beta))
out_f.write(",")
out_f.write(str(med_t_one_pct))
|
13,306 | 66f222cba8d7626a70ce45c3ab8643372b8837c8 | # -*- coding: utf-8 -*-
import json
import logging
import scrapy
from scrapy import log
from old_house.items import city_status_Item
from scrapy.selector import Selector
from scrapy.http import Request
import time
class shenyang_house_spider(scrapy.Spider):
name = "reset"
start_urls = [
"http://www.anjuke.com/sy-city.html"
]
custom_settings = {
'ITEM_PIPELINES': {
'old_house.pipelines.StatusPipeline': 300,
},
}
allowed_domains = ["anjuke.com"]
def parse(self, response):
sel = Selector(response)
for x in range(1, 3):
for i in range(1, 12):
for k in range(1, 39):
xpathStr = '//*[@id="content"]/div[4]/div[' + str(x) + ']/dl[' + str(i) + ']/dd/a[' + str(k) + ']'
url = sel.xpath(xpathStr + '/@href').extract()
citypath = sel.xpath(xpathStr + '/text()').extract()
if url != []:
city = citypath[0].encode("utf-8")
next_url = url[0] + '/sale/'
yield Request(next_url, callback=self.country_parse, meta={"city": city})
else:
break
def country_parse(self, response):
city = response.meta["city"]
log.msg("city " + city + " into country parse")
sel = Selector(response)
counties = sel.xpath('//div[@class="items"][1]/span[@class="elems-l"]/a')
for county in counties:
county_url = county.xpath('@href').extract()[0].encode("utf-8")
county_name=county.xpath('text()').extract()[0].encode("utf-8")
yield Request(county_url, callback=self.place_parse, meta={"city": city,"county":county_name})
def place_parse(self, response):
city = response.meta["city"]
county=response.meta["county"]
log.msg("city " + city + " into place parse")
sel = Selector(response)
places = sel.xpath('//div[@class="items"][1]/span[@class="elems-l"]/div[@class="sub-items"]/a')
for place in places:
url = place.xpath('@href').extract()[0].encode("utf-8")
place_name=place.xpath('text()').extract()[0].encode("utf-8")
item=city_status_Item()
item["city"]=city
item["county"]=county
item["place"]=place_name
item["url"]=url
yield item
|
13,307 | 0c23405b102ae3ab8a4f1ac1f473b70eda442a64 | import sys, numpy
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import QApplication
from PyQt5.QtGui import QPalette, QColor, QFont
from orangewidget import widget, gui
from oasys.widgets import gui as oasysgui
from orangewidget.settings import Setting
from oasys.widgets.widget import OWWidget
from orangecontrib.shadow.util.shadow_objects import ShadowBeam
from orangecontrib.shadow.util.shadow_util import ShadowCongruence
class MergeBeams(OWWidget):
name = "Merge Shadow Beam"
description = "Display Data: Merge Shadow Beam"
icon = "icons/merge.png"
maintainer = "Luca Rebuffi"
maintainer_email = "lrebuffi(@at@)anl.gov"
priority = 4
category = "Data Display Tools"
keywords = ["data", "file", "load", "read"]
inputs = [("Input Beam # 1" , ShadowBeam, "setBeam1" ),
("Input Beam # 2" , ShadowBeam, "setBeam2" ),
("Input Beam # 3" , ShadowBeam, "setBeam3" ),
("Input Beam # 4" , ShadowBeam, "setBeam4" ),
("Input Beam # 5" , ShadowBeam, "setBeam5" ),
("Input Beam # 6" , ShadowBeam, "setBeam6" ),
("Input Beam # 7" , ShadowBeam, "setBeam7" ),
("Input Beam # 8" , ShadowBeam, "setBeam8" ),
("Input Beam # 9" , ShadowBeam, "setBeam9" ),
("Input Beam # 10", ShadowBeam, "setBeam10"),]
outputs = [{"name":"Beam",
"type":ShadowBeam,
"doc":"Shadow Beam",
"id":"beam"}]
want_main_area=0
want_control_area = 1
input_beam1=None
input_beam2=None
input_beam3=None
input_beam4=None
input_beam5=None
input_beam6=None
input_beam7=None
input_beam8=None
input_beam9=None
input_beam10=None
use_weights = Setting(0)
weight_input_beam1=Setting(0.0)
weight_input_beam2=Setting(0.0)
weight_input_beam3=Setting(0.0)
weight_input_beam4=Setting(0.0)
weight_input_beam5=Setting(0.0)
weight_input_beam6=Setting(0.0)
weight_input_beam7=Setting(0.0)
weight_input_beam8=Setting(0.0)
weight_input_beam9=Setting(0.0)
weight_input_beam10=Setting(0.0)
def __init__(self, show_automatic_box=True):
super().__init__()
self.runaction = widget.OWAction("Merge Beams", self)
self.runaction.triggered.connect(self.merge_beams)
self.addAction(self.runaction)
self.setFixedWidth(470)
self.setFixedHeight(470)
gen_box = gui.widgetBox(self.controlArea, "Merge Shadow Beams", addSpace=True, orientation="vertical")
button_box = oasysgui.widgetBox(gen_box, "", addSpace=False, orientation="horizontal")
button = gui.button(button_box, self, "Merge Beams and Send", callback=self.merge_beams)
font = QFont(button.font())
font.setBold(True)
button.setFont(font)
palette = QPalette(button.palette()) # make a copy of the palette
palette.setColor(QPalette.ButtonText, QColor('Dark Blue'))
button.setPalette(palette) # assign new palette
button.setFixedHeight(45)
weight_box = oasysgui.widgetBox(gen_box, "Relative Weights", addSpace=False, orientation="vertical")
gui.comboBox(weight_box, self, "use_weights", label="Use Relative Weights?", labelWidth=350,
items=["No", "Yes"],
callback=self.set_UseWeights, sendSelectedValue=False, orientation="horizontal")
gui.separator(weight_box, height=10)
self.le_weight_input_beam1 = oasysgui.lineEdit(weight_box, self, "weight_input_beam1", "Input Beam 1 weight",
labelWidth=300, valueType=float, orientation="horizontal")
self.le_weight_input_beam2 = oasysgui.lineEdit(weight_box, self, "weight_input_beam2", "Input Beam 2 weight",
labelWidth=300, valueType=float, orientation="horizontal")
self.le_weight_input_beam3 = oasysgui.lineEdit(weight_box, self, "weight_input_beam3", "Input Beam 3 weight",
labelWidth=300, valueType=float, orientation="horizontal")
self.le_weight_input_beam4 = oasysgui.lineEdit(weight_box, self, "weight_input_beam4", "Input Beam 4 weight",
labelWidth=300, valueType=float, orientation="horizontal")
self.le_weight_input_beam5 = oasysgui.lineEdit(weight_box, self, "weight_input_beam5", "Input Beam 5 weight",
labelWidth=300, valueType=float, orientation="horizontal")
self.le_weight_input_beam6 = oasysgui.lineEdit(weight_box, self, "weight_input_beam6", "Input Beam 6 weight",
labelWidth=300, valueType=float, orientation="horizontal")
self.le_weight_input_beam7 = oasysgui.lineEdit(weight_box, self, "weight_input_beam7", "Input Beam 7 weight",
labelWidth=300, valueType=float, orientation="horizontal")
self.le_weight_input_beam8 = oasysgui.lineEdit(weight_box, self, "weight_input_beam8", "Input Beam 8 weight",
labelWidth=300, valueType=float, orientation="horizontal")
self.le_weight_input_beam9 = oasysgui.lineEdit(weight_box, self, "weight_input_beam9", "Input Beam 9 weight",
labelWidth=300, valueType=float, orientation="horizontal")
self.le_weight_input_beam10 = oasysgui.lineEdit(weight_box, self, "weight_input_beam10", "Input Beam 10 weight",
labelWidth=300, valueType=float, orientation="horizontal")
self.le_weight_input_beam1.setEnabled(False)
self.le_weight_input_beam2.setEnabled(False)
self.le_weight_input_beam3.setEnabled(False)
self.le_weight_input_beam4.setEnabled(False)
self.le_weight_input_beam5.setEnabled(False)
self.le_weight_input_beam6.setEnabled(False)
self.le_weight_input_beam7.setEnabled(False)
self.le_weight_input_beam8.setEnabled(False)
self.le_weight_input_beam9.setEnabled(False)
self.le_weight_input_beam10.setEnabled(False)
def setBeam1(self, beam):
self.le_weight_input_beam1.setEnabled(False)
self.input_beam1 = None
if ShadowCongruence.checkEmptyBeam(beam):
if ShadowCongruence.checkGoodBeam(beam):
self.input_beam1 = beam
if self.use_weights==1: self.le_weight_input_beam1.setEnabled(True)
else:
QtWidgets.QMessageBox.critical(self, "Error",
"Data #1 not displayable: No good rays or bad content",
QtWidgets.QMessageBox.Ok)
def setBeam2(self, beam):
self.le_weight_input_beam2.setEnabled(False)
self.input_beam2 = None
if ShadowCongruence.checkEmptyBeam(beam):
if ShadowCongruence.checkGoodBeam(beam):
self.input_beam2 = beam
if self.use_weights==1: self.le_weight_input_beam2.setEnabled(True)
else:
QtWidgets.QMessageBox.critical(self, "Error",
"Data #2 not displayable: No good rays or bad content",
QtWidgets.QMessageBox.Ok)
def setBeam3(self, beam):
self.le_weight_input_beam3.setEnabled(False)
self.input_beam3 = None
if ShadowCongruence.checkEmptyBeam(beam):
if ShadowCongruence.checkGoodBeam(beam):
self.input_beam3 = beam
if self.use_weights==1: self.le_weight_input_beam3.setEnabled(True)
else:
QtWidgets.QMessageBox.critical(self, "Error",
"Data #3 not displayable: No good rays or bad content",
QtWidgets.QMessageBox.Ok)
def setBeam4(self, beam):
self.le_weight_input_beam4.setEnabled(False)
self.input_beam4 = None
if ShadowCongruence.checkEmptyBeam(beam):
if ShadowCongruence.checkGoodBeam(beam):
self.input_beam4 = beam
if self.use_weights==1: self.le_weight_input_beam4.setEnabled(True)
else:
QtWidgets.QMessageBox.critical(self, "Error",
"Data #4 not displayable: No good rays or bad content",
QtWidgets.QMessageBox.Ok)
def setBeam5(self, beam):
self.le_weight_input_beam5.setEnabled(False)
self.input_beam5 = None
if ShadowCongruence.checkEmptyBeam(beam):
if ShadowCongruence.checkGoodBeam(beam):
self.input_beam5 = beam
if self.use_weights==1: self.le_weight_input_beam5.setEnabled(True)
else:
QtWidgets.QMessageBox.critical(self, "Error",
"Data #5 not displayable: No good rays or bad content",
QtWidgets.QMessageBox.Ok)
def setBeam6(self, beam):
self.le_weight_input_beam6.setEnabled(False)
self.input_beam6 = None
if ShadowCongruence.checkEmptyBeam(beam):
if ShadowCongruence.checkGoodBeam(beam):
self.input_beam6 = beam
if self.use_weights==1: self.le_weight_input_beam6.setEnabled(True)
else:
QtWidgets.QMessageBox.critical(self, "Error",
"Data #6 not displayable: No good rays or bad content",
QtWidgets.QMessageBox.Ok)
def setBeam7(self, beam):
self.le_weight_input_beam7.setEnabled(False)
self.input_beam7 = None
if ShadowCongruence.checkEmptyBeam(beam):
if ShadowCongruence.checkGoodBeam(beam):
self.input_beam7 = beam
if self.use_weights==1: self.le_weight_input_beam7.setEnabled(True)
else:
QtWidgets.QMessageBox.critical(self, "Error",
"Data #7 not displayable: No good rays or bad content",
QtWidgets.QMessageBox.Ok)
def setBeam8(self, beam):
self.le_weight_input_beam8.setEnabled(False)
self.input_beam8 = None
if ShadowCongruence.checkEmptyBeam(beam):
if ShadowCongruence.checkGoodBeam(beam):
self.input_beam8 = beam
if self.use_weights==1: self.le_weight_input_beam8.setEnabled(True)
else:
QtWidgets.QMessageBox.critical(self, "Error",
"Data #8 not displayable: No good rays or bad content",
QtWidgets.QMessageBox.Ok)
def setBeam9(self, beam):
self.le_weight_input_beam9.setEnabled(False)
self.input_beam9 = None
if ShadowCongruence.checkEmptyBeam(beam):
if ShadowCongruence.checkGoodBeam(beam):
self.input_beam9 = beam
if self.use_weights==1: self.le_weight_input_beam9.setEnabled(True)
else:
QtWidgets.QMessageBox.critical(self, "Error",
"Data #9 not displayable: No good rays or bad content",
QtWidgets.QMessageBox.Ok)
def setBeam10(self, beam):
self.le_weight_input_beam10.setEnabled(False)
self.input_beam10 = None
if ShadowCongruence.checkEmptyBeam(beam):
if ShadowCongruence.checkGoodBeam(beam):
self.input_beam10 = beam
if self.use_weights==1: self.le_weight_input_beam10.setEnabled(True)
else:
QtWidgets.QMessageBox.critical(self, "Error",
"Data #10 not displayable: No good rays or bad content",
QtWidgets.QMessageBox.Ok)
def merge_beams(self):
merged_beam = None
if self.use_weights == 1:
total_intensity = 0.0
for index in range(1, 11):
current_beam = getattr(self, "input_beam" + str(index))
if not current_beam is None:
total_intensity += current_beam._beam.rays[:, 6]**2 + current_beam._beam.rays[:, 7]**2 + current_beam._beam.rays[:, 8]**2 + \
current_beam._beam.rays[:, 15]**2 + current_beam._beam.rays[:, 16]**2 + current_beam._beam.rays[:, 17]**2
for index in range(1, 11):
current_beam = getattr(self, "input_beam" + str(index))
if not current_beam is None:
current_beam = current_beam.duplicate()
if self.use_weights == 1:
current_intensity = current_beam._beam.rays[:, 6]**2 + current_beam._beam.rays[:, 7]**2 + current_beam._beam.rays[:, 8]**2 + \
current_beam._beam.rays[:, 15]**2 + current_beam._beam.rays[:, 16]**2 + current_beam._beam.rays[:, 17]**2
current_weight = current_intensity/total_intensity
new_weight = getattr(self, "weight_input_beam" + str(index))
ratio = new_weight/current_weight
current_beam._beam.rays[:, 6] *= numpy.sqrt(ratio)
current_beam._beam.rays[:, 7] *= numpy.sqrt(ratio)
current_beam._beam.rays[:, 8] *= numpy.sqrt(ratio)
current_beam._beam.rays[:, 15] *= numpy.sqrt(ratio)
current_beam._beam.rays[:, 16] *= numpy.sqrt(ratio)
current_beam._beam.rays[:, 17] *= numpy.sqrt(ratio)
if merged_beam is None: merged_beam = current_beam
else: merged_beam = ShadowBeam.mergeBeams(merged_beam, current_beam, which_flux=3, merge_history=0)
self.send("Beam", merged_beam)
def set_UseWeights(self):
self.le_weight_input_beam1.setEnabled(self.use_weights == 1 and not self.input_beam1 is None)
self.le_weight_input_beam2.setEnabled(self.use_weights == 1 and not self.input_beam2 is None)
self.le_weight_input_beam3.setEnabled(self.use_weights == 1 and not self.input_beam3 is None)
self.le_weight_input_beam4.setEnabled(self.use_weights == 1 and not self.input_beam4 is None)
self.le_weight_input_beam5.setEnabled(self.use_weights == 1 and not self.input_beam5 is None)
self.le_weight_input_beam6.setEnabled(self.use_weights == 1 and not self.input_beam6 is None)
self.le_weight_input_beam7.setEnabled(self.use_weights == 1 and not self.input_beam7 is None)
self.le_weight_input_beam8.setEnabled(self.use_weights == 1 and not self.input_beam8 is None)
self.le_weight_input_beam9.setEnabled(self.use_weights == 1 and not self.input_beam9 is None)
self.le_weight_input_beam10.setEnabled(self.use_weights == 1 and not self.input_beam10 is None)
if __name__ == "__main__":
a = QApplication(sys.argv)
ow = MergeBeams()
ow.show()
a.exec_()
ow.saveSettings()
|
13,308 | afcc4e1b097a1de62f666a85206a32283c7404e1 | from django.urls import path
from . import headViews, teacherViews, studentViews
from .views import index_page, login_page, login_action, GetUserDetails, logout_user
urlpatterns = [
path('', login_page, name="show_login"),
path('index', index_page),
path('login', login_action, name="login"),
path('get_user_details', GetUserDetails),
path('logout_user', logout_user, name="logout"),
path('headteacher_home', headViews.headteacher_home, name="headteacher_home"),
path('add_teacher', headViews.add_teacher, name="add_teacher"),
path('add_teacher_save', headViews.add_teacher_save, name="add_teacher_save"),
path('add_class', headViews.add_class, name="add_class"),
path('add_class_save', headViews.add_class_save, name="add_class_save"),
path('add_student', headViews.add_student, name="add_student"),
path('add_student_save', headViews.add_student_save, name="add_student_save"),
path('add_subject', headViews.add_subject, name="add_subject"),
path('add_subject_save', headViews.add_subject_save, name="add_subject_save"),
path('manage_teachers', headViews.manage_teachers, name="manage_teachers"),
path('manage_students', headViews.manage_students, name="manage_students"),
path('manage_classes', headViews.manage_classes, name="manage_classes"),
path('manage_subjects', headViews.manage_subjects, name="manage_subjects"),
path('edit_teacher/<str:teacher_id>', headViews.edit_teacher, name="edit_teacher"),
path('edit_teacher_save', headViews.edit_teacher_save, name="edit_teacher_save"),
path('edit_student/<str:student_id>', headViews.edit_student, name="edit_student"),
path('edit_student_save', headViews.edit_student_save, name="edit_student_save"),
path('edit_subject/<str:subject_id>', headViews.edit_subject, name="edit_subject"),
path('edit_subject_save', headViews.edit_subject_save, name="edit_subject_save"),
path('edit_class/<str:class_id>', headViews.edit_class, name="edit_class"),
path('edit_class_save', headViews.edit_class_save, name="edit_class_save"),
path('manage_terms', headViews.manage_terms, name="manage_terms"),
path('add_term_save', headViews.add_term_save, name="add_term_save"),
# Urls for teachers
path('teacher_home', teacherViews.teacher_home, name="teacher_home"),
path('take_attendance', teacherViews.take_attendance, name="take_attendance"),
path('get_students_attendance', teacherViews.get_students_attendance, name="get_students_attendance"),
path('save_attendance_data', teacherViews.save_attendance_data, name="save_attendance_data"),
path('get_attendance_dates', teacherViews.get_attendance_dates, name="get_attendance_dates"),
path('update_attendance_data', teacherViews.update_attendance_data, name="update_attendance_data"),
# Urls for students
path('student_home', studentViews.student_home, name="student_home")
]
|
13,309 | c0776b38e9edd8d3920e274deb3f1565e348aed4 | # -*- encoding: utf-8 -*-
# @Contact : ynatsu233@outlook.com
# @Time : 2019/5/5 19:03
# @Author : Natsu Yuki
# pip install Flask-Script
from flask_script import Manager
# 本地文件
from flaskScript import app
from dbScripts import dbManager
from flask_migrate import Migrate, MigrateCommand
from exts import db
from models_sql import Book
manager = Manager(app=app)
migrate = Migrate(app=app, db=db)
manager.add_command('db', MigrateCommand)
# python manage.py db init
# python manage.py db migrate
# python manage.py db upgrade
@manager.command
def runserver():
print('running ...')
# python manage.py runserver
manager.add_command('dbM', dbManager)
# python manage.py dbM init
manager.run()
|
13,310 | 96ca975133f41dd632af2b0c41750b9c33e52b06 | from django.urls import path
from bills.views import BillGenericView, BillDetailGenericView
app_name = 'bills'
urlpatterns = [
path('', BillGenericView.as_view()),
path('<pk>/', BillDetailGenericView.as_view())
]
|
13,311 | 02cb44770af6b2c427ed582ecfbcff9618834ebd | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import commands,sys,os,shlex,subprocess,re,datetime,locale
from multiprocessing import Process,Pool
proc = 12
APK_DIR = "../test_apk2/"
CHECK_SMALI_DIR = "../check_smali/"
TEMP_DIR = "../temp/"
OUTPUT_DIR = "./output/"
def findAllFiles(directory):
for root, dirs, files in os.walk(directory):
yield root
for file in files:
yield os.path.join(root, file)
def deleteWhiteSpace(text):
text = text.strip()
return text
def checkDeleteTargets(text):
dlist =[".local",".locals",".line"]
for dtext in dlist:
if dtext in text:
text = ""
break
return text
def checkMultipleLines(smali,text):
tx = open(text,'r')
text_lines = tx.readlines()
tx.close()
text_index = 0
sl = open(smali,'r')
smali_lines = sl.readlines()
sl.close()
getData = []
for smali_line in smali_lines:
smali_line = deleteWhiteSpace(smali_line)
smali_line = checkDeleteTargets(smali_line)
#skip no text
if not smali_line:
continue
if deleteWhiteSpace(text_lines[text_index])in smali_line:
text_index +=1
if ".param" in smali_line:
#print smali_line
text = smali_line.strip()
text = text.replace(',','')
getData.append(text)
if text_index +1 >= len(text_lines):
getData.append(smali_lines[0].strip())
return True,getData
else:
text_index = 0
getData = []
return False,getData
def checkSmali_Multi(apk,targetSmalis,codeSnippets):
countNum = 0
for smali in targetSmalis:
for codeSnippet in codeSnippets:
getData =[]
flag,getData = checkMultipleLines(smali,codeSnippet)
if flag:
countNum += 1
appName = apk.lstrip(APK_DIR)
writeOutput(apk,smali,getData,codeSnippet,str(countNum))
def doApktool(apkPath):
print "apktool:" + apkPath
cmd = 'apktool d -q -f %s'%(apkPath)
subprocess.call(shlex.split(cmd))
def checkTrue(line):
line = line.split(",")[-1]
flag = line.find('0x1')
return flag
def getCheckSmaliPath():
lists = []
for file in findAllFiles(CHECK_SMALI_DIR):
root,ext = os.path.splitext(str(file))
if ext == ".txt":
lists.append(str(file))
return lists
def getAppSmaliPath(path):
lists = []
for file in findAllFiles(path):
root,ext = os.path.splitext(str(file))
if ext == ".smali":
lists.append(str(file))
return lists
def getApkPath():
lists = []
for file in findAllFiles(APK_DIR):
root,ext = os.path.splitext(str(file))
if ext == ".apk":
lists.append(str(file))
return lists
def deleteTmpFolder(path):
cmd = 'rm -r %s'%(path)
subprocess.call(shlex.split(cmd))
def writeOutput(apk,smali,getData,codeSnippet,countNum):
global outfName
outPath = OUTPUT_DIR + outfName+ "-output.csv"
if not os.path.isdir(OUTPUT_DIR):
cmd = 'mkdir %s'%(OUTPUT_DIR)
subprocess.call(shlex.split(cmd))
outPutFile = open(outPath,'a')
dataText = ""
#print getData
for data in getData:
dataText += data + ","
apk = apk.lstrip(APK_DIR)
smali = smali.lstrip("./")
codeSnippet = codeSnippet.lstrip(CHECK_SMALI_DIR)
codeSnippet = codeSnippet.strip()
text = apk +","+smali+","+dataText+codeSnippet+","+countNum+os.linesep
#text = text.replace(";",os.linesep)
print text
outPutFile.write(text)
outPutFile.close()
def checkSmali(apk,targetSmalis,codeSnippets):
countNum = 0
for smali in targetSmalis:
for codeSnippet in codeSnippets:
checkSnippetNum = 0
totalSnippetNum = 0
for cCode in open(codeSnippet,'r'):
regflag = 0
totalSnippetNum += 1
reg = cCode.split(",")[0]
cCode = cCode.rstrip("¥n")
cCode = cCode.strip()
if reg == "regexp":
regflag = 1
cCode = cCode.lstrip("regexp,")
cCode = cCode.rstrip("¥n")
cCode = cCode.strip()
checkText = re.compile(cCode)
else:
cCode = cCode.rstrip("¥n")
cCode = cCode.strip()
checkText = cCode
line_list = []
for line in open(smali,'r'):
line_list.append(str(line))
#with open(smali,'r') as smali_file:
for index,cSmali in enumerate(line_list):
#for cSmali in open(smali,'r'):
cSmali = cSmali.rstrip("¥n")
cSmali = cSmali.strip()
if regflag:
#print str(checkText)
flag = checkText.match(cSmali)
else:
if checkText in cSmali:
flag = 1
else:
flag = 0
if flag:
#print "aaaa"
trueFlag =checkTrue(str(line_list[index-2]))
if trueFlag:
checkSnippetNum += 1
#print "bbbb"
break
if checkSnippetNum >= totalSnippetNum:
countNum += 1
appName = apk.lstrip(APK_DIR)
writeOutput(apk,smali,codeSnippet,str(countNum))
def doDecompile(apk,checkLists,index):
print 'process id:' + str(os.getpid())
#フォルダ名取得
root,ext = os.path.splitext(apk)
fName = root.split("/")[-1]
doApktool(apk)
smaliFName = str("./" + fName + "/smali/")
smaliPathLists = getAppSmaliPath(smaliFName)
checkSmali_Multi(apk,smaliPathLists,checkLists)
deleteTmpFolder(fName)
print "%s/%s"%(index+1,totalNum)
def argwrapper(args):
return args[0](*args[1:])
if __name__ == "__main__":
global outfName
d = datetime.datetime.today()
outfName = d.strftime("%Y-%m-%d-%H:%M:%S")
doneNum = 0
apkLists = getApkPath()
print "APK Num:"+ str(len(apkLists))
global totalNum
totalNum = len(apkLists)
checkLists = getCheckSmaliPath()
print "Check Num:"+ str(len(checkLists))
#デコンパイルループ
p = Pool()
func_args = []
for index,apk in enumerate(apkLists):
func_args.append((doDecompile,apk,checkLists,index))
p.map(argwrapper,func_args)
#argsList = []
#argsList.append(apk)
#argsList.append(checkLists)
#p.map(doDecompile,argsList)
#job = Process(target=doDecompile,args=(apk,checkLists))
#jobs.append(job)
#job.start()
#doneNum += 1
#print "%s/%s"%(doneNum,totalNum)
#[job.join() for job in jobs]
#print "Finish!"
|
13,312 | 30bea73a4f5e37309f55025145fda3880abf2020 | import pandas as pd
from pandas.util.testing import assert_frame_equal
pd.options.mode.chained_assignment = None # warning disable
from dot_function import addDot_icd9, addDot_icd10
# from multitasking import parallelize_dataframe, hello
def generic_clean_2018_data(ICD_data=None, I9_desc=None, I10_desc=None, column1=None, column2=None):
df = pd.read_csv(ICD_data,
names=[column1, column2, "unwanted"],
dtype={'unwanted': object})
description9 = pd.read_excel(I9_desc, names=['ICD9', 'Long_text_9', 'Short_text_9'])
description10 = pd.read_csv(I10_desc, dtype={'ICD9': object},
names=['ICD9_c', 'ICD10', 'unknown', 'Short_text', 'Long_text'])
df = df.merge(description9, on='ICD9', how='left')
df = df.merge(description10, on='ICD10', how='left')
df['APPROX'] = df['unwanted'].str[0]
df['NOMAP'] = df['unwanted'].str[1]
df['COMBO'] = df['unwanted'].str[2]
df['SCENARIO'] = df['unwanted'].str[3]
df['CHOICE'] = df['unwanted'].str[4]
df['Obsolete'] = 0
df['ICD9_Desc'] = df['Short_text_9']
df['ICD10_Desc'] = df['Short_text']
df['ICD9'] = df.ICD9.map(lambda x: addDot_icd9(x))
df['ICD10'] = df.ICD10.map(lambda x: addDot_icd10(x))
return df
def check_icd_with_previous_year(new_data=None, old_data=None, column=None):
if new_data is None:
return 0
else:
old_icd_data = pd.read_csv(old_data, sep=',')
try:
assert_frame_equal(old_icd_data, new_data)
return "Both Frames are Identical"
except:
# ICD codes that are not in new data
icd_not_in_new_df = old_icd_data[~old_icd_data[column].isin(new_data[column])]
icd_not_in_new_df['Obsolete'] = 1
if icd_not_in_new_df.empty:
print("New " + column + " list has all old " + column + " codes in it !")
return new_data
else:
updated_df = new_data.append(icd_not_in_new_df, ignore_index=True)
return updated_df
|
13,313 | 27ad3944ed7db2520cf272ccc346e34256ea1287 | from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from .views import (
PickingbillAssignView,
WaybillCompleteView,
LineChartJSONView,
PickingbillStatView,
)
urlpatterns = [
url(r'^pickingbillassign$', PickingbillAssignView.as_view(), name='pickingbill_assign'),
url(r'^waybillcomplete$', WaybillCompleteView.as_view(), name='waybill_assign'),
url(r'^pickingbillstat$', PickingbillStatView.as_view(), name='pickingbill_stat'),
url(r'^linechartjason1$', LineChartJSONView.as_view(), name='line_chart_json1'),
] |
13,314 | ba28d999e3646deff88b2efbbcaf41d4124a62ba | # Generated by Django 3.1.4 on 2021-01-04 15:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('resume', '0003_info_profile_photo'),
]
operations = [
migrations.AlterField(
model_name='info',
name='bio',
field=models.TextField(max_length=512, verbose_name='Bio'),
),
migrations.AlterField(
model_name='info',
name='interests',
field=models.TextField(max_length=512, verbose_name='Interests'),
),
]
|
13,315 | 0f729cd2aba0979b5545c314e14bd2fb91543a7e | # helper class to simplify email related tasks
import smtplib
from email.message import EmailMessage
class EmailHelper:
def __init__(self):
self.server = None
# login to e-mail server (google gmail)
def login(self, user, pwd):
try:
self.server = smtplib.SMTP_SSL('smtp.gmail.com', 465)
self.server.ehlo()
self.server.login(user, pwd)
except:
print ("failed to login to e-mail server")
# send email message
def send(self, src, dest, msg):
if self.server:
self.server.sendmail(src, [dest], msg.as_string())
# terminate connection to server
def close(self):
if self.server:
self.server.quit()
|
13,316 | 4786bf9be19a70e6ad4595efe5a5da10071a00a6 | import bnf
# Language ::= "[hH]ello, [wW]orld!"
class Language(bnf.Identifier):
__default_regex__ = r"[hH]ello, [wW]orld!"
def onMatch(self, context):
print "Match", self.id
|
13,317 | c88c80eb8f322f742a449c5ed5f046026cf75a9b | import json
import redis
class Semaphore(object):
def __init__(self, width):
self.r = redis.Redis(host="localhost", port=6378, db=0)
self.r.set("semaphore", width)
def __gt__(self, operand):
self.r.set_response_callback("GET", int)
return self.r.get("semaphore") > operand
def decr(self):
self.r.decr("semaphore", 1)
def incr(self):
self.r.incr("semaphore", 1)
class Stack(object):
def __init__(self):
self.r = redis.Redis(host="localhost", port=6378, db=0)
def rpush(self, data):
self.r.rpush("queue", json.dumps(data))
def blpop(self):
data = self.r.blpop(["queue"], timeout=1)
return None if not data else json.loads(data[1].decode("utf-8"))
|
13,318 | 672b870f3c89efcb172305a1837401d20d241895 | import pydot
import os
os.system("cls")
dot_txt = 'digraph G {\n start -> end;\n}'
graph, = pydot.graph_from_dot_data(dot_txt)
graph.write_png('f.png') |
13,319 | ec218570430c8a3a0136cfeee096e7b34fc4424a | from numba import njit, prange
from time import time
import numpy as np
@njit(parallel=True, fastmath=True)
def fast_norm(vec):
return np.array([np.sqrt(np.sum(np.power(vec[i,:], 2))) for i in prange(vec.shape[0])])
class KMeansMultithreading:
def __init__(self, k=4, max_iter=15, tolerance=1e-4):
self.k = k
self.max_iter = max_iter
self.tolerance = tolerance
def get_centers(self):
return self.__centers
def get_partition(self):
return self.__w
def __init_centers(self):
random_centers = np.random.randint(low=0, high=self.__no_samples, size=self.k)
return self.__data[random_centers]
def __update_w(self):
self.__w[...] = 0
distances = np.stack([j for j in KMeansMultithreading.__fast_distances(self.k, self.__data, self.__centers)], axis=-1)
np.put_along_axis(self.__w, np.argmin(distances, axis=-1)[..., None], 1, axis=-1)
def __centroids_unchanged(self):
differences = np.abs(np.subtract(self.__centers, self.__previous_centers))
return (differences <= self.tolerance).sum() == np.prod(differences.shape)
def fit(self, data):
self.__data = data
self.__no_samples = self.__data.shape[0]
self.__w = np.zeros(shape = (self.__no_samples, self.k), dtype = np.uint8)
self.__centers = self.__init_centers()
t1 = time()
for i in range(self.max_iter):
self.__update_w()
self.__previous_centers = self.__centers.copy()
KMeansMultithreading.__fast_update_centroids(self.k, self.__w, self.__data, self.__centers)
if self.__centroids_unchanged():
print(f"Algorithm stopped at iteration: {i}")
break
t2 = time()
print(f"KMeans(multi-threading) time = {t2-t1}")
@staticmethod
@njit(parallel = True, fastmath=True)
def __fast_distances(k, data, centers):
return [(fast_norm(data - centers[i])) for i in prange(k)]
@staticmethod
@njit(parallel = True, fastmath=True)
def __fast_update_centroids(k, w, data, centers):
for i in prange(k):
centers[i] = np.divide(np.sum(data[w[:,i] !=0 ], axis=0), w[:,i].sum())
class CMeansMultithreading:
def __init__(self, C=3, m=2, max_iter=15, tolerance=1e-4):
self.C = C
self.m = m
self.max_iter = max_iter
self.tolerance = tolerance
def get_centers(self):
return self.__centers
def get_partition(self):
return self.__w
@staticmethod
@njit(parallel=True, fastmath=True)
def __fast_update_centroids(c, m, data, w, centers):
for k in prange(c):
num = np.zeros_like(centers[k])
denom = 0.0
for x in prange(data.shape[0]):
num += data[x]*(w[x,k])**m
denom += (w[x,k])**m
centers[k] = num/denom
@staticmethod
@njit(parallel=True, fastmath=True)
def __fast_update_w(c, m, data, w, centers):
for i in prange(data.shape[0]):
for j in prange(c):
s = 0.
num = np.linalg.norm(data[i] - centers[j])
for k in prange(c):
denom = np.linalg.norm(data[i] - centers[k])
fraction = num/denom
s+=(fraction)**(2/(m-1))
w[i,j] = 1/s
def __centroids_unchanged(self):
differences = np.abs(np.subtract(self.__centers, self.__previous_centers))
return (differences <= self.tolerance).sum() == np.prod(differences.shape)
def fit(self, data):
self.__data = data
self.__no_samples = self.__data.shape[0]
self.__features = self.__data.shape[1]
self.__w = np.random.rand(self.__no_samples, self.C)
self.__centers = np.empty(shape = (self.C, self.__features))
t1 = time()
for i in range(self.max_iter):
self.__previous_centers = self.__centers.copy()
CMeansMultithreading.__fast_update_centroids(self.C, self.m, self.__data, self.__w, self.__centers)
CMeansMultithreading.__fast_update_w(self.C, self.m, self.__data, self.__w, self.__centers)
if self.__centroids_unchanged():
print(f"Algorithm stopped at iteration: {i}")
break
t2 = time()
print(f"CMeans(multi-threading) time = {t2-t1}")
class KMeans:
def __init__(self, k=4, max_iter=15, tolerance=1e-4):
self.k = k
self.max_iter = max_iter
self.tolerance = tolerance
def get_centers(self):
return self.__centers
def get_partition(self):
return self.__w
def __init_centers(self):
random_centers = np.random.randint(low=0, high=self.__no_samples, size=self.k)
return self.__data[random_centers]
def __update_w(self):
self.__w[...] = 0
distances = np.stack([np.linalg.norm(self.__data - self.__centers[i], axis=-1) for i in range(self.k)], axis=-1)
np.put_along_axis(self.__w, np.argmin(distances, axis = -1)[..., None], 1, axis=-1)
def __update_centroids(self):
self.__centers = np.stack([ np.divide(np.sum(self.__data[self.__w[:,i] !=0 ], axis=0), self.__w[:, i].sum()) for i in range(self.k) ], axis=0)
def __centroids_unchanged(self):
differences = np.abs(np.subtract(self.__centers, self.__previous_centers))
return (differences <= self.tolerance).sum() == np.prod(differences.shape)
def fit(self, data):
self.__data = data
self.__no_samples = self.__data.shape[0]
self.__w = np.zeros(shape = (self.__no_samples, self.k), dtype=np.uint8)
self.__centers = self.__init_centers()
t1 = time()
for i in range(self.max_iter):
self.__update_w()
self.__previous_centers = self.__centers
self.__update_centroids()
if self.__centroids_unchanged():
print(f"Algorithm stopped at iteration: {i}")
break
t2 = time()
print(f"KMeans(iterative) time = {t2-t1}")
class CMeans:
def __init__(self, C = 3, m = 2, max_iter = 15, tolerance = 1e-4):
self.C = C
self.m = m
self.max_iter = max_iter
self.tolerance = tolerance
def get_centers(self):
return self.__centers
def get_partition(self):
return self.__w
def __update_centroids(self):
self.__centers = np.stack([np.divide(np.sum(np.multiply(self.__data, np.power(self.__w[:,i], self.m)[..., None]) ,axis = 0), np.sum(np.power(self.__w[:,i], self.m))) for i in range(self.C)], axis= 0)
def __update_w(self):
self.__w = np.divide(1, np.stack([np.array([np.power(np.divide(np.linalg.norm(self.__data - self.__centers[j], axis=-1), np.linalg.norm(self.__data - self.__centers[k], axis = -1)), 2/(self.m-1)) for k in range(self.C)]).sum(axis = 0) for j in range(self.C)], axis = -1))
def __centroids_unchanged(self):
differences = np.abs(np.subtract(self.__centers, self.__previous_centers))
return (differences <= self.tolerance).sum() == np.prod(differences.shape)
def fit(self, data):
self.__data = data
self.__no_samples = self.__data.shape[0]
self.__features = self.__data.shape[1]
self.__w = np.random.rand(self.__no_samples, self.C)
self.__centers = np.empty(shape = (self.C, self.__features))
t1 = time()
for i in range(self.max_iter):
self.__previous_centers = self.__centers.copy()
self.__update_centroids()
self.__update_w()
if self.__centroids_unchanged():
print(f"Algorithm stopped at iteration: {i}")
break
t2 = time()
print(f"CMeans(iterative) time = {t2-t1}") |
13,320 | bf4c4d2d582917b4ff4de6d40f20b1f1f5524368 | import random as rnd
def random_permutation(iterable, r=None):
pool = tuple(iterable)
r = len(pool) if r is None else r
return tuple(rnd.sample(pool, r))
def random_shuffle(members):
rnd.shuffle(members)
return tuple(members)
|
13,321 | f59678e9bae581ee0c533b632becb3091b91011a | #!/usr/bin/env python
#
# Copyright (c) 2015 10X Genomics, Inc. All rights reserved.
import tenkit.safe_json
from crdna.singlecell_dna_cnv import cluster_jedna
import longranger.cnv.coverage_matrix as coverage_matrix
import martian
__MRO__ = """
stage GENERATE_CLUSTERS(
in h5 normalized_singlecell_profiles,
in string reference_path,
in bool skip_clustering,
#
out json clusters,
#
src py "stages/copy_number_processor/profile_clusterer/generate_clusters",
)
"""
#...............................................................................
def split(args):
raise Exception("Split is unimplemented")
#...............................................................................
def main(args, outs):
normalized_singlecell_profiles, mask = coverage_matrix.load_matrix(
args.normalized_singlecell_profiles, args.reference_path)
print('DEBUG generate_final_clusters/__init__.main():')
print('normalized_singlecell_profiles[0].shape')
print(normalized_singlecell_profiles[0].shape)
ncells = normalized_singlecell_profiles[0].shape[0]
results = [range(ncells)]
try:
if args.skip_clustering:
print('Skipping clustering.')
else:
## NOTE: this is a temporary short circuit of clustering when there are more than
## 500 cells. We will revisit this module and fix the issue later.
if True: # ncells < 500:
# results = cluster_jedna.cluster(normalized_singlecell_profiles, mask, n_merge=25, score_cutoff=10)
results = cluster_jedna.cluster(
normalized_singlecell_profiles, mask, n_merge=25, score_cutoff=5)
else:
martian.alarm("Too many cells for clustering. Putting all cells in one cluster.")
# if ncells else
# if skip_clustering else
except Exception as error:
martian.alarm("Clustering encountered an exception. Putting all cells in one cluster. Error: %s" % repr(error))
# try/except
#
out_file = open(outs.clusters, 'w')
out_file.write(tenkit.safe_json.safe_jsonify(results))
out_file.close()
# main
#...............................................................................
def join(args, outs, chunk_defs, chunk_outs):
raise Exception("Join is unimplemented")
# join
|
13,322 | 4a8a83176724b888b1f5e8a89169d12d1cb964e1 | import unittest
import multiprocessing
import selectable
class CustomObject(object):
pass
class Proc(multiprocessing.Process):
def __init__(self, pipe):
self.pipe = pipe
super(Proc, self).__init__()
def run(self):
self.pipe.use_right()
msg = None
while msg != 'shutdown':
msg = self.pipe.read()
self.pipe.write(['okay', msg])
class TestPipe(unittest.TestCase):
def test_multiprocess(self):
pipe = selectable.Pipe()
proc = Proc(pipe)
proc.daemon = True
proc.start()
pipe.use_left()
pipe.write('hello')
pipe.write(5)
pipe.write(CustomObject())
self.assertEqual(pipe.read(), ['okay', 'hello'])
self.assertEqual(pipe.read(), ['okay', 5])
[okay, obj] = pipe.read()
self.assertEqual(okay, 'okay')
self.assertEqual(type(obj), type(CustomObject()))
pipe.write('shutdown')
proc.join()
self.assertEqual(pipe.read(), ['okay', 'shutdown'])
self.assertFalse(proc.is_alive())
pipe.close()
|
13,323 | 0e09836723a5a8d685c3b799d529b12e356fd6e8 | # -*- coding: utf-8 -*-
"""
Created on Sat Sep 12 01:34:29 2020
@author: Noah Lee, lee.no@northeastern.edu
"""
from pystreamable import StreamableApi
from dotenv import load_dotenv
import os
class StreamableUpload(object):
def __init__(self):
load_dotenv()
USERNAME = os.getenv('STREAMABLE_USERNAME')
PASSWORD = os.getenv('STREAMABLE_PASSWORD')
self.streamable = StreamableApi(USERNAME, PASSWORD)
def __call__(self, filepath):
self.streamable.upload_video(filepath)
|
13,324 | d3bb5b188f1e453cba6feb5eeba0be6668d19be5 | from channels.routing import route
from channels.routing import ProtocolTypeRouter, URLRouter
from main.routing import websockets
application = ProtocolTypeRouter({
"websocket": websockets,
}) |
13,325 | 05d7fb6dff824aba9bc573ee966b8881c2b32d4c | """
@generated by mypy-protobuf. Do not edit manually!
isort:skip_file
"""
import builtins
import google.protobuf.descriptor
import google.protobuf.internal.containers
import google.protobuf.internal.enum_type_wrapper
import google.protobuf.internal.extension_dict
import google.protobuf.message
import pyatv.protocols.mrp.protobuf.PlayerPath_pb2
import pyatv.protocols.mrp.protobuf.ProtocolMessage_pb2
import typing
import typing_extensions
DESCRIPTOR: google.protobuf.descriptor.FileDescriptor = ...
class SendError(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor = ...
class Enum(_Enum, metaclass=_EnumEnumTypeWrapper):
pass
class _Enum:
V = typing.NewType('V', builtins.int)
class _EnumEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[_Enum.V], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor = ...
# This is None in original message definitions (no-go with python)
NoError = SendError.Enum.V(0)
ApplicationNotFound = SendError.Enum.V(1)
ConnectionFailed = SendError.Enum.V(2)
Ignored = SendError.Enum.V(3)
CouldNotLaunchApplication = SendError.Enum.V(4)
TimedOut = SendError.Enum.V(5)
OriginDoesNotExist = SendError.Enum.V(6)
InvalidOptions = SendError.Enum.V(7)
NoCommandHandlers = SendError.Enum.V(8)
ApplicationNotInstalled = SendError.Enum.V(9)
NotSupported = SendError.Enum.V(10)
# This is None in original message definitions (no-go with python)
NoError = SendError.Enum.V(0)
ApplicationNotFound = SendError.Enum.V(1)
ConnectionFailed = SendError.Enum.V(2)
Ignored = SendError.Enum.V(3)
CouldNotLaunchApplication = SendError.Enum.V(4)
TimedOut = SendError.Enum.V(5)
OriginDoesNotExist = SendError.Enum.V(6)
InvalidOptions = SendError.Enum.V(7)
NoCommandHandlers = SendError.Enum.V(8)
ApplicationNotInstalled = SendError.Enum.V(9)
NotSupported = SendError.Enum.V(10)
def __init__(self,
) -> None: ...
global___SendError = SendError
class HandlerReturnStatus(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor = ...
class Enum(_Enum, metaclass=_EnumEnumTypeWrapper):
pass
class _Enum:
V = typing.NewType('V', builtins.int)
class _EnumEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[_Enum.V], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor = ...
Success = HandlerReturnStatus.Enum.V(0)
NoSuchContent = HandlerReturnStatus.Enum.V(1)
CommandFailed = HandlerReturnStatus.Enum.V(2)
NoActionableNowPlayingItem = HandlerReturnStatus.Enum.V(10)
DeviceNotFound = HandlerReturnStatus.Enum.V(20)
UIKitLegacy = HandlerReturnStatus.Enum.V(3)
SkipAdProhibited = HandlerReturnStatus.Enum.V(100)
QueueIsUserCurated = HandlerReturnStatus.Enum.V(101)
UserModifiedQueueDisabled = HandlerReturnStatus.Enum.V(102)
UserQueueModificationNotSupportedForCurrentItem = HandlerReturnStatus.Enum.V(103)
SubscriptionRequiredForSharedQueue = HandlerReturnStatus.Enum.V(104)
InsertionPositionNotSpecified = HandlerReturnStatus.Enum.V(105)
InvalidInsertionPosition = HandlerReturnStatus.Enum.V(106)
RequestParametersOutOfBounds = HandlerReturnStatus.Enum.V(107)
SkipLimitReached = HandlerReturnStatus.Enum.V(108)
AuthenticationFailure = HandlerReturnStatus.Enum.V(401)
MediaServiceUnavailable = HandlerReturnStatus.Enum.V(501)
Success = HandlerReturnStatus.Enum.V(0)
NoSuchContent = HandlerReturnStatus.Enum.V(1)
CommandFailed = HandlerReturnStatus.Enum.V(2)
NoActionableNowPlayingItem = HandlerReturnStatus.Enum.V(10)
DeviceNotFound = HandlerReturnStatus.Enum.V(20)
UIKitLegacy = HandlerReturnStatus.Enum.V(3)
SkipAdProhibited = HandlerReturnStatus.Enum.V(100)
QueueIsUserCurated = HandlerReturnStatus.Enum.V(101)
UserModifiedQueueDisabled = HandlerReturnStatus.Enum.V(102)
UserQueueModificationNotSupportedForCurrentItem = HandlerReturnStatus.Enum.V(103)
SubscriptionRequiredForSharedQueue = HandlerReturnStatus.Enum.V(104)
InsertionPositionNotSpecified = HandlerReturnStatus.Enum.V(105)
InvalidInsertionPosition = HandlerReturnStatus.Enum.V(106)
RequestParametersOutOfBounds = HandlerReturnStatus.Enum.V(107)
SkipLimitReached = HandlerReturnStatus.Enum.V(108)
AuthenticationFailure = HandlerReturnStatus.Enum.V(401)
MediaServiceUnavailable = HandlerReturnStatus.Enum.V(501)
def __init__(self,
) -> None: ...
global___HandlerReturnStatus = HandlerReturnStatus
class SendCommandStatusCode(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor = ...
class Enum(_Enum, metaclass=_EnumEnumTypeWrapper):
pass
class _Enum:
V = typing.NewType('V', builtins.int)
class _EnumEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[_Enum.V], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor = ...
Success = SendCommandStatusCode.Enum.V(0)
NoSuchContent = SendCommandStatusCode.Enum.V(1)
CommandFailed = SendCommandStatusCode.Enum.V(2)
NoActionableNowPlayingItem = SendCommandStatusCode.Enum.V(10)
DeviceNotFound = SendCommandStatusCode.Enum.V(20)
UIKitLegacy = SendCommandStatusCode.Enum.V(3)
SkipAdProhibited = SendCommandStatusCode.Enum.V(100)
QueueIsUserCurated = SendCommandStatusCode.Enum.V(101)
UserModifiedQueueDisabled = SendCommandStatusCode.Enum.V(102)
UserQueueModificationNotSupportedForCurrentItem = SendCommandStatusCode.Enum.V(103)
SubscriptionRequiredForSharedQueue = SendCommandStatusCode.Enum.V(104)
InsertionPositionNotSpecified = SendCommandStatusCode.Enum.V(105)
InvalidInsertionPosition = SendCommandStatusCode.Enum.V(106)
RequestParametersOutOfBounds = SendCommandStatusCode.Enum.V(107)
SkipLimitReached = SendCommandStatusCode.Enum.V(108)
AuthenticationFailure = SendCommandStatusCode.Enum.V(401)
MediaServiceUnavailable = SendCommandStatusCode.Enum.V(501)
Success = SendCommandStatusCode.Enum.V(0)
NoSuchContent = SendCommandStatusCode.Enum.V(1)
CommandFailed = SendCommandStatusCode.Enum.V(2)
NoActionableNowPlayingItem = SendCommandStatusCode.Enum.V(10)
DeviceNotFound = SendCommandStatusCode.Enum.V(20)
UIKitLegacy = SendCommandStatusCode.Enum.V(3)
SkipAdProhibited = SendCommandStatusCode.Enum.V(100)
QueueIsUserCurated = SendCommandStatusCode.Enum.V(101)
UserModifiedQueueDisabled = SendCommandStatusCode.Enum.V(102)
UserQueueModificationNotSupportedForCurrentItem = SendCommandStatusCode.Enum.V(103)
SubscriptionRequiredForSharedQueue = SendCommandStatusCode.Enum.V(104)
InsertionPositionNotSpecified = SendCommandStatusCode.Enum.V(105)
InvalidInsertionPosition = SendCommandStatusCode.Enum.V(106)
RequestParametersOutOfBounds = SendCommandStatusCode.Enum.V(107)
SkipLimitReached = SendCommandStatusCode.Enum.V(108)
AuthenticationFailure = SendCommandStatusCode.Enum.V(401)
MediaServiceUnavailable = SendCommandStatusCode.Enum.V(501)
def __init__(self,
) -> None: ...
global___SendCommandStatusCode = SendCommandStatusCode
class SendCommandResultType(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor = ...
class Enum(_Enum, metaclass=_EnumEnumTypeWrapper):
pass
class _Enum:
V = typing.NewType('V', builtins.int)
class _EnumEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[_Enum.V], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor = ...
Dialog = SendCommandResultType.Enum.V(1)
Error = SendCommandResultType.Enum.V(2)
Custom = SendCommandResultType.Enum.V(999)
Dialog = SendCommandResultType.Enum.V(1)
Error = SendCommandResultType.Enum.V(2)
Custom = SendCommandResultType.Enum.V(999)
def __init__(self,
) -> None: ...
global___SendCommandResultType = SendCommandResultType
class SendCommandResultStatus(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor = ...
STATUSCODE_FIELD_NUMBER: builtins.int
TYPE_FIELD_NUMBER: builtins.int
CUSTOMDATA_FIELD_NUMBER: builtins.int
CUSTOMDATATYPE_FIELD_NUMBER: builtins.int
statusCode: global___SendCommandStatusCode.Enum.V = ...
type: global___SendCommandResultType.Enum.V = ...
# optional ... dialog = 3;
# optional ... error = 4;
customData: builtins.bytes = ...
customDataType: typing.Text = ...
def __init__(self,
*,
statusCode : typing.Optional[global___SendCommandStatusCode.Enum.V] = ...,
type : typing.Optional[global___SendCommandResultType.Enum.V] = ...,
customData : typing.Optional[builtins.bytes] = ...,
customDataType : typing.Optional[typing.Text] = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal[u"customData",b"customData",u"customDataType",b"customDataType",u"statusCode",b"statusCode",u"type",b"type"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal[u"customData",b"customData",u"customDataType",b"customDataType",u"statusCode",b"statusCode",u"type",b"type"]) -> None: ...
global___SendCommandResultStatus = SendCommandResultStatus
class SendCommandResult(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor = ...
PLAYERPATH_FIELD_NUMBER: builtins.int
SENDERROR_FIELD_NUMBER: builtins.int
STATUSES_FIELD_NUMBER: builtins.int
@property
def playerPath(self) -> pyatv.protocols.mrp.protobuf.PlayerPath_pb2.PlayerPath: ...
sendError: global___SendError.Enum.V = ...
@property
def statuses(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___SendCommandResultStatus]: ...
def __init__(self,
*,
playerPath : typing.Optional[pyatv.protocols.mrp.protobuf.PlayerPath_pb2.PlayerPath] = ...,
sendError : typing.Optional[global___SendError.Enum.V] = ...,
statuses : typing.Optional[typing.Iterable[global___SendCommandResultStatus]] = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal[u"playerPath",b"playerPath",u"sendError",b"sendError"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal[u"playerPath",b"playerPath",u"sendError",b"sendError",u"statuses",b"statuses"]) -> None: ...
global___SendCommandResult = SendCommandResult
class SendCommandResultMessage(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor = ...
SENDERROR_FIELD_NUMBER: builtins.int
HANDLERRETURNSTATUS_FIELD_NUMBER: builtins.int
HANDLERRETURNSTATUSDATAS_FIELD_NUMBER: builtins.int
COMMANDID_FIELD_NUMBER: builtins.int
PLAYERPATH_FIELD_NUMBER: builtins.int
COMMANDRESULT_FIELD_NUMBER: builtins.int
sendError: global___SendError.Enum.V = ...
handlerReturnStatus: global___HandlerReturnStatus.Enum.V = ...
@property
def handlerReturnStatusDatas(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.bytes]: ...
commandID: typing.Text = ...
@property
def playerPath(self) -> pyatv.protocols.mrp.protobuf.PlayerPath_pb2.PlayerPath: ...
@property
def commandResult(self) -> global___SendCommandResult: ...
def __init__(self,
*,
sendError : typing.Optional[global___SendError.Enum.V] = ...,
handlerReturnStatus : typing.Optional[global___HandlerReturnStatus.Enum.V] = ...,
handlerReturnStatusDatas : typing.Optional[typing.Iterable[builtins.bytes]] = ...,
commandID : typing.Optional[typing.Text] = ...,
playerPath : typing.Optional[pyatv.protocols.mrp.protobuf.PlayerPath_pb2.PlayerPath] = ...,
commandResult : typing.Optional[global___SendCommandResult] = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal[u"commandID",b"commandID",u"commandResult",b"commandResult",u"handlerReturnStatus",b"handlerReturnStatus",u"playerPath",b"playerPath",u"sendError",b"sendError"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal[u"commandID",b"commandID",u"commandResult",b"commandResult",u"handlerReturnStatus",b"handlerReturnStatus",u"handlerReturnStatusDatas",b"handlerReturnStatusDatas",u"playerPath",b"playerPath",u"sendError",b"sendError"]) -> None: ...
global___SendCommandResultMessage = SendCommandResultMessage
sendCommandResultMessage: google.protobuf.internal.extension_dict._ExtensionFieldDescriptor[pyatv.protocols.mrp.protobuf.ProtocolMessage_pb2.ProtocolMessage, global___SendCommandResultMessage] = ...
|
13,326 | a577f843667e2ec0c990141f7935b96bdb7f9f26 | a = 5
def hi():
return 'hi'
def am_i_main():
return __name__ == '__main__' |
13,327 | 1be0c498c306cf6032e2f9b3bc7c1216caf17283 | #!/usr/bin/python
"""
=============================================================================
ROCK MASS SLIDING
=============================================================================
Calculate the factor of safety for a rock mass above a sliding surface.
"""
from argparse import ArgumentParser
from math import sin, cos, tan, radians
# ============================================================================ #
# SCRIPT ARGUMENTS
# ============================================================================ #
parser = ArgumentParser()
parser.add_argument("-g", "--gravity", help="set the value of gravity",
type=float)
exclusive = parser.add_mutually_exclusive_group()
exclusive.add_argument("-b", "--bolt", type=float,
help="include a rock bolt, normal to slope")
exclusive.add_argument("-F", "--fos", type=float,
help="specify a FOS value to calculate a bolt for")
required = parser.add_argument_group('required arguments')
required.add_argument("-a", "--angle", help="sliding surface angle",
type=float, required=True)
required.add_argument("-f", "--friction", help="rock angle of friction",
type=float, required=True)
required.add_argument("-A", "--area", help="contact area of the rock mass",
type=float, required=True)
required.add_argument("-m", "--mass", help="mass of the rock block",
type=float, required=True)
args = parser.parse_args()
if not args.bolt:
rock_bolt = 0
else:
rock_bolt = args.bolt
if not args.gravity:
gravity = 9.81
else:
gravity = args.gravity
rock_mass = args.mass
slope_angle = args.angle
friction_angle = args.friction
contact_area = args.area
required_fos = args.fos
# ============================================================================ #
# FUNCTIONS
# ============================================================================ #
# TODO: add doc strings for functions
def force(mass, grav):
return (mass * grav)/1000 # Force in kN
def normal(total_force, slope, bolt_force):
return total_force * cos(radians(slope)) + bolt_force
def shear(total_force, slope):
return total_force * sin(radians(slope))
def normal_stress(normal_force, area):
return normal_force / area # kN/m^2
def shear_stress(shear_force, area):
return shear_force / area # kN/m^2
def capacity(norm_stress, friction):
return norm_stress * tan(radians(friction))
def fos(mass_capacity, mass_demand):
return mass_capacity / mass_demand
# Functions for determining bolt tension required for FOS:
def capacity_required(fos_req, mass_demand):
return fos_req * mass_demand
def normal_stress_required(cap_req, friction):
return cap_req / tan(radians(friction))
def normal_required(normal_stress_req, area):
return normal_stress_req * area
def tension_required(normal_req, block_force, slope):
return normal_req - (block_force * cos(radians(slope)))
# ============================================================================ #
# MAIN SCRIPT BLOCK
# ============================================================================ #
if __name__ == "__main__":
if not required_fos:
fos_value = fos(
capacity(
normal_stress(
normal(force(rock_mass, gravity), slope_angle, rock_bolt),
contact_area
),
friction_angle
),
shear_stress(
shear(force(rock_mass, gravity), slope_angle),
contact_area
)
)
print("{:0.3f}".format(fos_value))
else:
demand = shear_stress(
shear(force(rock_mass, gravity), slope_angle),
contact_area
)
tension_value = tension_required(
normal_required(
normal_stress_required(
capacity_required(required_fos, demand),
friction_angle
),
contact_area
),
force(rock_mass, gravity),
slope_angle
)
print("{:0.2f}kN".format(tension_value))
# TODO: add option for more verbose output, with individual calc results
|
13,328 | 7a00798935599b240f29e53e5b5e300bf74bf08b | """Pytorch Lightning Sphinx theme.
From https://github.com/shiftlab/lightning_sphinx_theme.
"""
from os import path
__version__ = "0.0.28"
__version_full__ = __version__
def get_html_theme_path():
"""Return list of HTML theme paths."""
cur_dir = path.abspath(path.dirname(path.dirname(__file__)))
return cur_dir
# See http://www.sphinx-doc.org/en/stable/theming.html#distribute-your-theme-as-a-python-package
def setup(app):
app.add_html_theme(
"pt_lightning_sphinx_theme", path.abspath(path.dirname(__file__))
)
|
13,329 | f9712a63277f6904c72b69c4c022ee4c9266a718 | from os.path import isabs, join
import sys
import subprocess
try:
subprocess.check_output(
'{} -c "import numba"'.format(sys.executable), shell=True
)
print('numba available, importing jit')
from numba import jit
except Exception:
print('cannot import numba, creating dummy jit definition')
def jit(function):
def wrapper(*args, **kwargs):
return function(*args, **kwargs)
return wrapper
def compose_path(file_path, reference_path):
if reference_path and not isabs(file_path):
file_path = join(reference_path, file_path)
return file_path
|
13,330 | 8599475bbcd08c1241b5bf208f47296b05796e5d | from random import shuffle
import time
from random import randint
import timeit
import matplotlib as mpl
import matplotlib.pyplot as plt
def desenhaGrafico(x,y,y2, xl = "Entradas", yl = "Saídas"):
fig = plt.figure(figsize=(10, 8))
ax = fig.add_subplot(111)
ax.plot(x,y, label = "Tempo no caso qualquer")
ax.plot(x,y2, label = "Tempo na lista invertida")
ax.legend(bbox_to_anchor=(1, 1),bbox_transform=plt.gcf().transFigure)
plt.ylabel(yl)
plt.xlabel(xl)
plt.show()
def geraLista(tam):
lista = list(range(1, tam + 1))
shuffle(lista)
return lista
def geraLista2(tam):
lista = []
for i in range(tam-1,0,-1):
lista.append(i)
return lista
x2 = [100000,200000,300000,400000,500000,1000000,2000000]
y = []
y2= []
z = []
z2 = []
def merge_iter(lst1, lst2):
new = []
while lst1 and lst2:
if lst1[0] < lst2[0]:
new += [lst1[0]]
lst1 = lst1[1:]
else:
new += [lst2[0]]
lst2 = lst2[1:]
if lst1:
return new + lst1
else:
return new + lst2
def middle(seq):
return len(seq) // 2
def mergesort_iter(seq):
if not seq:
return []
if len(seq) == 1:
return seq
def helper():
partition_boundary_list = []
partition_copy = seq
while len(partition_copy) > 1:
partition_boundary_list += [[ [0, middle(partition_copy), False], [middle(partition_copy), len(partition_copy), False] ]]
partition_copy = partition_copy[0:middle(partition_copy)]
list_index = len(partition_boundary_list) - 1
left_memoiz = -1
right_memoiz = -1
while partition_boundary_list:
partition_boundary_element = partition_boundary_list[list_index]
left_lower, left_upper, sorted_left = partition_boundary_element[0]
right_lower, right_upper, sorted_right = partition_boundary_element[1]
if left_lower == left_memoiz:
partition_boundary_list[list_index][0][2] = True
if right_upper == right_memoiz:
partition_boundary_list[list_index][1][2] = True
if left_upper - left_lower > 1 and (not partition_boundary_list[list_index][0][2]):
mid = (left_lower + left_upper) // 2
partition_boundary_list += [[ [left_lower, mid, False], [mid, left_upper, False] ]]
list_index += 1
elif right_upper - right_lower > 1 and (not partition_boundary_list[list_index][1][2]):
mid = (right_lower + right_upper) // 2
partition_boundary_list += [[ [right_lower, mid, False], [mid, right_upper, False] ]]
list_index += 1
else:
left_memoiz = left_lower
right_memoiz = right_upper
ret_seq = merge_iter(seq[left_lower:left_upper], seq[right_lower:right_upper])
for element in ret_seq:
seq[left_lower] = element
left_lower += 1
partition_boundary_list.pop(list_index)
list_index -= 1
helper()
return seq
for a in range(len(x2)):
array = (geraLista(x2[a]))
inicio = timeit.default_timer()
mergesort_iter(array)
fim = timeit.default_timer()
y.append('%f' %(fim - inicio))
array2 = (geraLista2(x2[a]))
z = timeit.default_timer()
mergesort_iter(array2)
w = timeit.default_timer()
y2.append('%f' %(w-z))
print(y2)
print(y)
desenhaGrafico(x2,y,y2)
|
13,331 | 788c6f8bf153455c77e872584842d03d32c11e1a | import numpy as np
# array1 = np.random.randint(5, size=[2, 3])
# print(array1)
# # axis 表示按行还是列排序
# # 0表示按列排序,1表示按行排序
# array1.sort(axis=1)
# print(array1)
array = np.random.randint(5, size=[4])
print(array)
indexArray = np.argsort(array)
# 排序后的索引数组
print(indexArray)
print(array[indexArray]) |
13,332 | 3b5638ae0d57e37d813ad145efb5b385523ce42f | import sys
import math
maxSize = 10001
# generate a list of prime numbers
aIsPrime = [True] * maxSize
for i in range(2, maxSize):
if not aIsPrime[i]:
continue
for j in range(2*i, maxSize, i):
aIsPrime[j] = False
primeList = []
for i in range(3, maxSize):
if aIsPrime[i] and i != 5:
primeList.append(i)
lengthList = [0] * maxSize
T = int(input().strip())
for a0 in range(T):
N = int(input().strip())
maxLength = 0
maxPrime = 2
for prime in primeList:
if prime >= N:
break
if lengthList[prime] == 0:
length = 1
divisor = 10
while divisor % prime != 1:
length += 1
divisor = divisor * 10 % prime # mod prime is crucial, otherwise it will get time out.
lengthList[prime] = length
if lengthList[prime] > maxLength:
maxLength = lengthList[prime]
maxPrime = prime
print(maxPrime)
|
13,333 | 03fce57cd06a930017a4f82fe101a6b59a4c6b88 | #!/usr/bin/env python
PACKAGE = 'amr_localization'
NODE = 'pose_likelihood_server'
import roslib
roslib.load_manifest(PACKAGE)
import rospy
import tf
import numpy as np
import math
from sensor_msgs.msg import LaserScan
from geometry_msgs.msg import PoseStamped, Pose2D
from amr_srvs.srv import GetMultiplePoseLikelihood, GetMultiplePoseLikelihoodResponse, GetNearestOccupiedPointOnBeam, GetNearestOccupiedPointOnBeamRequest, SwitchRanger
class PoseLikelihoodServerNode:
"""
This is a port of the AMR Python PoseLikelihoodServerNode
"""
def __init__(self):
rospy.init_node(NODE)
# Wait until SwitchRanger service (and hence stage node) becomes available.
rospy.loginfo('Waiting for the /switch_ranger service to be advertised...');
rospy.wait_for_service('/switch_ranger')
try:
switch_ranger = rospy.ServiceProxy('/switch_ranger', SwitchRanger)
# Make sure that the hokuyo laser is available and enable them (aka switch on range scanner)
switch_ranger('scan_front', True)
except rospy.ServiceException, e:
rospy.logerror("Service call failed: %s"%e)
"""
Expose GetMultiplePoseLikelihood Service here,
subscribe for /scan_front,
create client for /occupancy_query_server/get_nearest_occupied_point_on_beam service
http://wiki.ros.org/ROS/Tutorials/WritingServiceClient(python)
"""
self._tf = tf.TransformListener()
rospy.loginfo('Started [pose_likelihood_server] node.')
self.scanner_subs = rospy.Subscriber("/scan_front", LaserScan, self.scan_cb, queue_size = 50)
s = rospy.Service('/pose_likelihood_server/get_pose_likelihood', GetMultiplePoseLikelihood, self.pose_cb)
rospy.wait_for_service('/occupancy_query_server/get_nearest_occupied_point_on_beam')
# cresting a client to simulate the poses and give predicted readings
self.p_client = rospy.ServiceProxy('/occupancy_query_server/get_nearest_occupied_point_on_beam', GetNearestOccupiedPointOnBeam)
def pose_cb(self, req):
likelihood_list = []
all_poses = req.poses
# iterating through each pose and calculating the likelihood values
for pose in all_poses:
# initializing the values and counters
sigma = 0.3
bad_matches = 0
acceptable_matches = 0
total_weight = 1
# converting the poses in robots team
beamposes = self.transform_poses(pose)
service_request = GetNearestOccupiedPointOnBeamRequest()
service_request.threshold = 4
service_request.beams = beamposes
distances = self.p_client(service_request)
for i in range(len(distances.distances)):
# real reading
real_distance = self.real_ranges[i]
# predicted reading
pred_range = self.clamp(distances.distances[i], self.range_min, self.range_max)
# calculating the likelihood only if the predicted range and real range differ by a small amount
if (abs(real_distance - pred_range) <= 1.6*sigma):
# calculating the weights for each pose
individual_weight = np.exp(-(np.power((real_distance - pred_range),2.0) / (2.0*np.power(sigma,2))) \
/ (sigma * np.sqrt(2*np.pi)))
total_weight = total_weight * individual_weight
acceptable_matches = acceptable_matches + 1
else:
bad_matches = bad_matches + 1
# append the weight only if the number of bad matches is less than the acceptable number
if bad_matches < 5:
likelihood_list.append(total_weight)
else:
likelihood_list.append(0.0)
likelihood_response = GetMultiplePoseLikelihoodResponse(likelihood_list)
return likelihood_response
def scan_cb(self,data):
# reading in the requirred values from the scanner, including the real distances
self.number_of_beams = len(data.ranges)
self.angle_min = data.angle_min
self.angle_increment = data.angle_increment
self.real_ranges = list(data.ranges)
self.range_max = data.range_max
self.range_min = data.range_min
def clamp(self, distance, range_min, range_max):
# function to clamp the predicted distance within the required range
if distance > range_max:
return range_max
elif distance < range_min:
return range_min
else:
return distance
def transform_poses(self,base_pose):
# function to transform the pose from robot base to the laser base
transformed_beams = []
time = self._tf.getLatestCommonTime("/base_link","/base_laser_front_link")
position, quaternion = self._tf.lookupTransform("/base_link","/base_laser_front_link",time)
yaw = tf.transformations.euler_from_quaternion(quaternion)[2]
x, y, yaw = position[0], position[1], yaw
# iterating over every pose
for i in range(len(self.real_ranges)):
t_pose = Pose2D()
euler_form = tf.transformations.euler_from_quaternion((base_pose.pose.orientation.x, \
base_pose.pose.orientation.y,base_pose.pose.orientation.z,base_pose.pose.orientation.w))
# shifting the x and y coordinates
t_pose.x = base_pose.pose.position.x + x
t_pose.y = base_pose.pose.position.y + y
# pose of laser beam is the addition of robot's orientation, beam angle, and the yaw
t_pose.theta = self.angle_min + (i*self.angle_increment) + yaw + euler_form[2]
transformed_beams.append(t_pose)
return transformed_beams
"""
============================== YOUR CODE HERE ==============================
Instructions: implemenent the pose likelihood server node including a
constructor which should create all needed servers, clients,
and subscribers, and appropriate callback functions.
GetNearestOccupiedPointOnBeam service allows to query
multiple beams in one service request. Use this feature to
simulate all the laser beams with one service call, otherwise
the time spent on communication with the server will be too
long.
Hint: refer to the sources of the previous assignments or to the ROS
tutorials to see examples of how to create servers, clients, and
subscribers.
Hint: in the laser callback it is enough to just store the incoming laser
readings in a class member variable so that they could be accessed
later while processing a service request.
Hint: the GetNearestOccupiedPointOnBeam service may return arbitrary large
distance, do not forget to clamp it to [0..range_max] interval.
Look at the tf library capabilities, you might need it to find transform
from the /base_link to /base_laser_front_link.
Here's an example how to use the transform lookup:
time = self._tf.getLatestCommonTime(frame_id, other_frame_id)
position, quaternion = self._tf.lookupTransform(frame_id,
other_frame_id,
time)
yaw = tf.transformations.euler_from_quaternion(quaternion)[2]
x, y, yaw = position[0], position[1], yaw
You might need other functions for transforming routine, you can find
a brief api description
http://mirror.umd.edu/roswiki/doc/diamondback/api/tf/html/python/tf_python.html
"""
if __name__ == '__main__':
w = PoseLikelihoodServerNode()
rospy.spin()
|
13,334 | 490a90a7dac2e2bd9fc33f153cb70e43a207090a | from typing import Any, Optional
from abc import ABC
from fastapi import HTTPException
from fastapi.responses import JSONResponse
from pydantic import BaseModel
from src.api.responses.error.error import BaseErrorResponse
from src.api.responses.error.r_404_not_found import BaseNotFound, UserNotFound
class MyBaseHttpException(HTTPException, ABC):
default_status_code = 400
default_response_model = BaseErrorResponse
default_headers: dict[str, str] = dict()
def __init__(self, answer: BaseErrorResponse = None,
status_code: int = None,
headers: Optional[dict[str, Any]] = None, **kwargs):
status_code = status_code or self.default_status_code
answer: BaseErrorResponse = answer or self.default_response_model(**kwargs)
super(MyBaseHttpException, self).__init__(status_code, detail=answer,
headers=self.default_headers | headers)
self.detail: BaseErrorResponse = answer
@property
def response(self) -> JSONResponse:
return JSONResponse(content=self.detail.dict(), status_code=self.status_code, headers=self.headers)
def __repr__(self):
model_ = ", ".join([f"{key}='{str(val.value)}'" for key, val in self.detail.dict().items()])
return f'{self.__class__.__name__}(st_code={self.status_code}, answer=({model_}))'
def __str__(self):
return self.__repr__()
|
13,335 | 4c5d710705cba19d5566943f10fedfe61a857b8d |
from __future__ import unicode_literals
from wtforms import BooleanField, StringField
from wtforms.fields.html5 import EmailField
from wtforms.validators import DataRequired, Email
from fossir.util.i18n import _
from fossir.web.forms.base import fossirForm
from fossir.web.forms.validators import UsedIfChecked
from fossir.web.forms.widgets import SwitchWidget
class CephalopodForm(fossirForm):
joined = BooleanField('Join the community', widget=SwitchWidget())
contact_name = StringField('Contact Name', [UsedIfChecked('joined'), DataRequired()],
description=_('Name of the person responsible for your fossir server.'))
contact_email = EmailField('Contact Email',
[UsedIfChecked('joined'), DataRequired(), Email()],
description=_('Email address of the person responsible for your fossir server.'))
|
13,336 | 7861c7790728c1c11c164aee2af089961f5b38ae | #!/usr/bin/env python
from flask import render_template
from utility import get_mongodb_connection
class Subscription():
def __init__(self, number, body, timestamp, latitude=None, longitude=None):
self.number = number
self.body = body
self.timestamp = timestamp
if latitude is None and body is not None:
self.parse()
else:
self.latitude = latitude
self.longitude = longitude
def parse(self):
""" Parse an incoming message in the form "SUBSCRIBE -1.932091 1.309280" """
sub = self.body.split(' ')
if len(sub) == 3:
self.latitude = float(sub[1])
self.longitude = float(sub[2])
else:
self.latitude = None
self.longitude = None
raise Exception("Invalid message")
def handle(self):
return self.save()
def to_dictionary(self):
return {
"number" : self.number,
"timestamp" : self.timestamp,
"latitude" : self.latitude,
"longitude" : self.longitude
}
def save(self):
conn = get_mongodb_connection()
subscriptions = conn.potsandpans.subscriptions
if subscriptions.insert(self.to_dictionary()):
return render_template('subscription_stored.twiml')
else:
return render_template('subscription_failed.twiml')
@staticmethod
def find_in_area(number, min_lat, min_long, max_lat, max_long):
conn = get_mongodb_connection()
query = {
"latitude": {
"$gt":min_lat,
"$lt":max_lat
},
"longitude": {
"$gt": min_long,
"$lt": max_long
},
"number": {
"$ne": number
}
}
print query
cursor = conn.potsandpans.subscriptions.find(query)
print '\n\nfound %d subscriptions\n\n' % (cursor.count())
subscriptions = []
for record in cursor:
subscriptions.append(Subscription(record['number'], None, record['timestamp'], record['latitude'], record['longitude']))
return subscriptions
|
13,337 | b412c9afd05294a72414c3eb1f93e5113a2f8606 | #! /usr/bin/python
# -*- encoding: utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Parameter
from models.ResNetBlocks import *
class x_vector_model(nn.Module):
def __init__(self, numSpkrs=5994, p_dropout=0.05):
super(x_vector_model, self).__init__()
self.tdnn1 = nn.Conv1d(in_channels=257, out_channels=512, kernel_size=5, dilation=1)
self.bn_tdnn1 = nn.BatchNorm1d(512, momentum=0.1, affine=False)
self.dropout_tdnn1 = nn.Dropout(p=p_dropout)
self.tdnn2 = nn.Conv1d(in_channels=512, out_channels=512, kernel_size=5, dilation=2)
self.bn_tdnn2 = nn.BatchNorm1d(512, momentum=0.1, affine=False)
self.dropout_tdnn2 = nn.Dropout(p=p_dropout)
self.tdnn3 = nn.Conv1d(in_channels=512, out_channels=512, kernel_size=7, dilation=3)
self.bn_tdnn3 = nn.BatchNorm1d(512, momentum=0.1, affine=False)
self.dropout_tdnn3 = nn.Dropout(p=p_dropout)
self.tdnn4 = nn.Conv1d(in_channels=512, out_channels=512, kernel_size=1, dilation=1)
self.bn_tdnn4 = nn.BatchNorm1d(512, momentum=0.1, affine=False)
self.dropout_tdnn4 = nn.Dropout(p=p_dropout)
self.tdnn5 = nn.Conv1d(in_channels=512, out_channels=1500, kernel_size=1, dilation=1)
self.bn_tdnn5 = nn.BatchNorm1d(1500, momentum=0.1, affine=False)
self.dropout_tdnn5 = nn.Dropout(p=p_dropout)
self.fc1 = nn.Linear(3000,512)
self.bn_fc1 = nn.BatchNorm1d(512, momentum=0.1, affine=False)
self.dropout_fc1 = nn.Dropout(p=p_dropout)
self.fc2 = nn.Linear(512,512)
self.bn_fc2 = nn.BatchNorm1d(512, momentum=0.1, affine=False)
self.dropout_fc2 = nn.Dropout(p=p_dropout)
self.fc3 = nn.Linear(512,numSpkrs)
self.instancenorm = nn.InstanceNorm1d(257)
def forward(self, x, eps=0.01):
# Note: x must be (batch_size, feat_dim, chunk_len)
stft = torch.stft(x, 512, hop_length=int(0.01*16000), win_length=int(0.025*16000), window=torch.hann_window(int(0.025*16000)), center=False, normalized=False, onesided=True)
stft = (stft[:,:,:,0].pow(2)+stft[:,:,:,1].pow(2)).pow(0.5)
x = self.instancenorm(stft).detach()
#print(x.shape)
x = self.dropout_tdnn1(self.bn_tdnn1(F.relu(self.tdnn1(x))))
#print(x.shape)
x = self.dropout_tdnn2(self.bn_tdnn2(F.relu(self.tdnn2(x))))
#print(x.shape)
x = self.dropout_tdnn3(self.bn_tdnn3(F.relu(self.tdnn3(x))))
#print(x.shape)
x = self.dropout_tdnn4(self.bn_tdnn4(F.relu(self.tdnn4(x))))
#print(x.shape)
x = self.dropout_tdnn5(self.bn_tdnn5(F.relu(self.tdnn5(x))))
#print(x.shape)
if self.training:
shape=x.size()
noise = torch.cuda.FloatTensor(shape) if torch.cuda.is_available() else torch.FloatTensor(shape)
torch.randn(shape, out=noise)
x += noise*eps
stats = torch.cat((x.mean(dim=2), x.std(dim=2)), dim=1)
#print(x.mean(dim=2).shape)
#print(stats.shape)
x = self.dropout_fc1(self.bn_fc1(F.relu(self.fc1(stats))))
#print(x.shape)
x = self.dropout_fc2(self.bn_fc2(F.relu(self.fc2(x))))
#print(x.shape)
#x = self.fc3(x)
return x
def x_vector(nOut=512, **kwargs):
# Number of filters
model = x_vector_model()
return model
|
13,338 | 214979437405db8529eded3a47b4e3a2df3f1568 | from django import template
from django.utils.safestring import mark_safe
register = template.Library()
@register.filter(name='lookup')
def lookup(d, key):
return d[key]
@register.filter(name='getContentFromJsonSolrResult')
def getContentFromHiglights(d, key):
data=d[key]
return mark_safe(d[key]["content"][0])
|
13,339 | 7062148e8e4b2237800420425a0d90fd3ef20a4e | # Generated by Django 2.0 on 2018-10-15 21:05
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('writers', '0022_auto_20181015_1216'),
]
operations = [
migrations.CreateModel(
name='AssignmentFiles',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('download_file', models.FileField(upload_to='download_assignment')),
('file_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='writers.Bids')),
],
),
]
|
13,340 | 1dcfcbecf3380b4f3f240647596f6eb3b7098ff6 | from .Spixel_single_layer import * |
13,341 | 2f1452384511490b4edf3129e88cebf1f1194be7 | import re
from ansiblelater.standard import StandardBase
from ansiblelater.utils import count_spaces
class CheckBracesSpaces(StandardBase):
sid = "ANSIBLE0004"
description = "YAML should use consistent number of spaces around variables"
helptext = "no suitable numbers of spaces (min: {min} max: {max})"
version = "0.1"
types = ["playbook", "task", "handler", "rolevars", "hostvars", "groupvars", "meta"]
def check(self, candidate, settings):
yamllines, errors = self.get_normalized_yaml(candidate, settings)
conf = settings["ansible"]["double-braces"]
matches = []
braces = re.compile("{{(.*?)}}")
if not errors:
for i, line in yamllines:
if "!unsafe" in line:
continue
match = braces.findall(line)
if match:
for item in match:
matches.append((i, item))
for i, line in matches:
[leading, trailing] = count_spaces(line)
sum_spaces = leading + trailing
if (
sum_spaces < conf["min-spaces-inside"] * 2
or sum_spaces > conf["min-spaces-inside"] * 2
):
errors.append(
self.Error(
i,
self.helptext.format(
min=conf["min-spaces-inside"], max=conf["max-spaces-inside"]
)
)
)
return self.Result(candidate.path, errors)
|
13,342 | 2a6cf6b77d295b7715c1b101d85da83a6e66a54a | import os
from pathlib import Path
def get_last_modified(folder_path: Path):
'''
Findest the last modified file in a folder.
:param folder_path: Path to the folder
:return:
newest_file: the file name of the
newest_file_date: the date the file was last modified in seconds in epoch
'''
files = os.listdir(folder_path)
file_path = Path(folder_path, files[0])
newest_file_date = os.path.getmtime(file_path)
newest_file = files[0]
for file in files:
file_path = Path(folder_path, file)
file_date = os.path.getmtime(file_path)
if file_date > newest_file_date:
newest_file = file
newest_file_date = file_date
return newest_file, newest_file_date
|
13,343 | 11b0cf636a9591f1501b16ddd05214861c1da1d9 | # -*- coding: utf-8 -*-
__author__ = 'Bernard Kuehlhorn'
__email__ = 'bkuehlhorn@acm.org'
__version__ = '0.0.1'
|
13,344 | fa96c5ce1dd135a16932f13023d852020fca2699 | import numpy as np
class LogReg:
def __init__(self, param_niter=1000, param_delta=0.5):
self.param_niter = param_niter
self.param_delta = param_delta
def sigmoid_array(self, x):
return 1 / (1 + np.exp(-x))
def logreg_train(self, X, Y_):
w = np.random.randn(len(X[0]))
b = 0
for i in range(0, self.param_niter):
scores = np.dot(X, w) + b
# expscores =
# sumexp =
probs = self.sigmoid_array(scores)
logprobs = np.log(probs)
loss = np.sum(-logprobs)
# dijagnostički ispis
if i % 10 == 0:
print("iteration {}: loss {}".format(i, loss))
# dL_ds =
grad_W = (((probs - Y_) * X.T).T).sum(axis=0);
grad_b = np.sum(probs - Y_)
w += -self.param_delta * grad_W
b += -self.param_delta * grad_b
def logreg_classify(self, X, w, b):
scores = np.dot(X, w) + b
probs = self.sigmoid_array(scores)
return probs
|
13,345 | 136c32025878eac3df6e164d74ffdc2f376ec3e8 | # Generated by Django 3.0.8 on 2020-08-13 20:19
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('recipes', '0004_auto_20200813_0848'),
]
operations = [
migrations.CreateModel(
name='Unit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
options={
'ordering': ['name'],
},
),
migrations.RemoveField(
model_name='ingredient',
name='recipes',
),
migrations.CreateModel(
name='IngredientAmount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.DecimalField(decimal_places=2, max_digits=4)),
('ingredient', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='recipes.Ingredient')),
('recipe', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='recipes.Recipe')),
('unit', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='recipes.Unit')),
],
),
migrations.AddField(
model_name='recipe',
name='ingredient_amounts',
field=models.ManyToManyField(through='recipes.IngredientAmount', to='recipes.Ingredient'),
),
]
|
13,346 | 86ccc7f2c5c24de9b217110911014d8993c65c72 | #-*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import csv
with open('egg.csv', 'wb') as csvfile:
spamwriter =csv.writer(csvfile, delimiter=' ', quotechar='|', quoting =csv.QUOTE_MINIMAL)
spamwriter.writerow(['Spam']*5+['Baked Beans'])
spamwriter.writerow(['Spam', 'Lovely Spam','wronderful spam'])
with open ('egg.csv','rb') as csvfile:
spamreader = csv.reader(csvfile, delimiter= ' ', quotechar='|')
for row in spamreader:
a= ','.join(row)
print type(a)
print a |
13,347 | 7a120fa69a6b9119d6a1146c516a382959d4f9d2 | # -*- coding: utf-8 -*-
import ast, sys
from typing import Set
import astunparse
def ast_error(msg: str, ast_node):
print("({},{}): {}".format(ast_node.lineno, ast_node.col_offset, msg),
file=sys.stderr)
sys.exit(1)
class CallGraphNode:
def __init__(self):
self.is_device = False
@property
def name(self):
return None
@property
def type(self):
return TypeNode()
class RootNode(CallGraphNode):
def __init__(self):
self.declared_classes: list[ClassNode] = []
random_class: ClassNode = ClassNode("random", None)
random_class.declared_functions.add(FunctionNode("getrandbits", "random", IntNode()))
random_class.declared_functions.add(FunctionNode("uniform", "random", FloatNode()))
random_class.declared_functions.add(FunctionNode("seed", "random", None))
self.declared_classes.append(random_class)
da_class: ClassNode = ClassNode("DeviceAllocator", None)
da_class.declared_functions.add(FunctionNode("new", "DeviceAllocator", None))
da_class.declared_functions.add(FunctionNode("destroy", "DeviceAllocator", None))
da_class.declared_functions.add(FunctionNode("device_do", "DeviceAllocator", None))
da_class.declared_functions.add(FunctionNode("parallel_do", "DeviceAllocator", None))
da_class.declared_functions.add(FunctionNode("array", "DeviceAllocator", None))
self.declared_classes.append(da_class)
self.declared_functions: Set[FunctionNode] = set()
self.library_functions: Set[FunctionNode] = set()
self.called_functions: Set[FunctionNode] = set()
self.declared_variables: Set[VariableNode] = set()
self.called_variables: Set[VariableNode] = set()
self.device_class_names = []
self.fields_class_names = set()
self.has_device_data = False
def get_ClassNode(self, class_name):
for class_node in self.declared_classes:
if class_node.name == class_name:
return class_node
return None
def get_FunctionNode(self, function_name, class_name):
for class_node in self.declared_classes:
ret = class_node.get_FunctionNode(function_name, class_name)
if ret is not None:
return ret
else:
for function_node in self.declared_functions:
if function_node.name == function_name:
return function_node
for function_node in self.library_functions:
if function_node.name == function_name:
return function_node
return None
def get_VariableNode(self, var_name):
for var_node in self.declared_variables:
if var_node.name == var_name:
return var_node
return None
class ClassNode(CallGraphNode):
def __init__(self, class_name, super_class, ast_node=None):
super().__init__()
self.class_name = class_name
self.super_class = super_class
self.ast_node = ast_node
self.declared_functions: Set[FunctionNode] = set()
self.declared_variables: Set[VariableNode] = set()
self.declared_fields: list[VariableNode] = []
self.expanded_fields: dict = {}
self.has_random_state: bool = False
@property
def name(self):
return self.class_name
@property
def type(self):
return ClassTypeNode(self)
def get_FunctionNode(self, function_name, class_name):
if class_name is None:
for func_node in self.declared_functions:
if func_node.name == function_name:
return func_node
for function_node in self.declared_functions:
if function_node.name == function_name and \
function_node.host_name == class_name:
return function_node
return None
def get_VariableNode(self, var_name):
for declared_variable in self.declared_variables:
if var_name == declared_variable.name:
return declared_variable
return None
class FunctionNode(CallGraphNode):
def __init__(self, function_name, host_name, return_type, owner_class=None, ast_node=None):
super().__init__()
self.function_name = function_name
self.host_name = host_name
self.owner_class = owner_class
self.return_type = return_type
self.ast_node = ast_node
self.arguments: list[VariableNode] = []
self.declared_functions: Set[FunctionNode] = set()
self.called_functions: Set[FunctionNode] = set()
self.declared_variables: Set[VariableNode] = set()
self.called_variables: Set[VariableNode] = set()
@property
def name(self):
return self.function_name
def get_FunctionNode(self, function_name, class_name):
if class_name is None:
for function_node in self.declared_functions:
if function_node.name == function_name:
return function_node
for function_node in self.declared_functions:
if function_node.name == function_name and \
function_node.host_name == class_name:
return function_node
return None
def get_VariableNode(self, var_name):
for declared_variable in self.declared_variables:
if var_name == declared_variable.name:
return declared_variable
return None
class VariableNode(CallGraphNode):
def __init__(self, var_name, var_type, owner_class=None):
super().__init__()
self.var_name = var_name
self.var_type = var_type
self.is_device = True if var_name == "kSeed" else False
# add fields for multiple inheritance
self.owner_class = owner_class
@property
def name(self):
return self.var_name
@property
def type(self):
return self.var_type
def is_ref(self):
return self.var_name.split("_")[-1] == "ref"
""" types for nodes in the call graph """
class TypeNode():
def __init__(self):
pass
@property
def name(self):
return None
@property
def e_type(self):
return None
def to_cpp_type(self):
return "auto"
def to_field_type(self):
return "auto"
def declared_functions(self):
return None
class IntNode(TypeNode):
def __init__(self, unsigned=False, size=None):
super().__init__()
self.unsigned = unsigned
self.size = size
@property
def name(self):
return "int"
def to_cpp_type(self):
if self.unsigned:
return "uint" + str(self.size) + "_t"
else:
return "int"
def to_field_type(self):
return "int"
class FloatNode(TypeNode):
def __init__(self):
super().__init__()
@property
def name(self):
return "float"
def to_cpp_type(self):
return "float"
def to_field_type(self):
return "float"
class BoolNode(TypeNode):
def __init__(self):
super().__init__()
@property
def name(self):
return "bool"
def to_cpp_type(self):
return "bool"
def to_field_type(self):
return "bool"
class CurandStateTypeNode(TypeNode):
def __init__(self):
super().__init__()
@property
def name(self):
return "curandState"
def to_cpp_type(self):
return "curandState&"
def to_field_type(self):
return "curandState"
class ListTypeNode(TypeNode):
size: int = None
def __init__(self, element_type):
super().__init__()
self.element_type = element_type
@property
def name(self):
return "list[" + self.element_type.name + "]"
def to_field_type(self):
return "DeviceArray<" + self.element_type.to_field_type() \
+ ", " + str(self.size) + ">"
class ClassTypeNode(TypeNode):
def __init__(self, class_node):
super().__init__()
self.class_node = class_node
@property
def name(self):
return self.class_node.name
def to_cpp_type(self):
return self.name + "*"
def to_field_type(self):
return self.name + "*"
def declared_functions(self):
return self.class_node.declared_functions
class RefTypeNode(TypeNode):
def __init__(self, type_node):
super().__init__()
self.type_node = type_node
@property
def name(self):
return self.type_node.name
def to_cpp_type(self):
return self.name + "*"
def to_field_type(self):
return self.name + "*"
def declared_functions(self):
return self.type_node.declared_functions()
""" visit AST and build call graph """
class CallGraphVisitor(ast.NodeVisitor):
def __init__(self):
self.stack = [RootNode()]
self.current_node = None
@property
def root(self):
return self.stack[0]
def visit(self, node):
self.current_node = self.stack[-1]
super(CallGraphVisitor, self).visit(node)
def visit_Module(self, node):
self.generic_visit(node)
def visit_Module(self, node):
for body in node.body:
if type(body) is ast.ClassDef:
class_name = body.name
class_node = self.current_node.get_ClassNode(class_name)
if class_node is not None:
ast_error("The class {} is already defined".format(class_name), node)
super_class = None
if len(body.bases) == 1:
super_class = self.current_node.get_ClassNode(body.bases[0].id)
elif len(body.bases) > 1:
ast_error("Sanajeh does not yet support multiple inheritances", node)
class_node = ClassNode(class_name, super_class, ast_node=body)
self.current_node.declared_classes.append(class_node)
self.generic_visit(node)
def visit_Global(self, node):
for global_variable in node.names:
var_node = self.root.get_VariableNode(global_variable)
if var_node is None:
ast_error("The global variable {} does not exist".format(global_variable))
self.stack[-1].called_variables.add(var_node)
def visit_ClassDef(self, node):
if type(self.current_node) is not RootNode:
ast_error("Sanajeh does not yet support nested classes", node)
class_name = node.name
for decorator in node.decorator_list:
if type(decorator) is ast.Name and decorator.id == "device":
self.root.has_device_data = True
if class_name not in self.root.device_class_names:
self.root.device_class_names.append(class_name)
class_node = self.current_node.get_ClassNode(class_name)
self.stack.append(class_node)
self.generic_visit(node)
self.stack.pop()
def visit_FunctionDef(self, node):
func_name = node.name
func_node = self.current_node.get_FunctionNode(func_name, self.current_node.name)
if func_node is not None:
ast_error("Function {} is already defined".format(func_name), node)
return_type = None
if type(self.current_node) is ClassNode and func_name == "__init__":
return_type = self.current_node.type
elif hasattr(node.returns, "id"):
return_type = ast_to_call_graph_type(self.stack, node.returns)
host = self.stack[-1] if type(self.stack[-1]) is ClassNode else None
func_node = FunctionNode(func_name, self.current_node.name, return_type, owner_class=host, ast_node=node)
self.current_node.declared_functions.add(func_node)
self.stack.append(func_node)
self.generic_visit(node)
self.stack.pop()
def visit_arguments(self, node):
if type(self.current_node) is not FunctionNode:
ast_error("Argument should be found in a function", node)
for arg in node.args:
var_type = ast_to_call_graph_type(self.stack, arg.annotation)
var_node = VariableNode(arg.arg, var_type)
self.current_node.arguments.append(var_node)
def visit_Assign(self, node):
for var in node.targets:
var_name = None
if type(var) is ast.Name:
var_name = var.id
if self.current_node.get_VariableNode(var_name) is None:
var_node = VariableNode(var_name, TypeNode())
self.current_node.declared_variables.add(var_node)
self.generic_visit(node)
def visit_AnnAssign(self, node):
var = node.target
if type(var) is ast.Attribute:
var_name = var.attr
var_type = ast_to_call_graph_type(self.stack, node.annotation, var_name=var_name)
if type(var_type) is ListTypeNode:
var_type.size = is_list_initialization(node.value)
if hasattr(var.value, "id") and var.value.id == "self" \
and self.current_node.name == "__init__":
host_class = self.stack[-2]
field_node = VariableNode(var_name, var_type, host_class)
host_class.declared_fields.append(field_node)
if type(var_type) is ClassTypeNode:
host_class.expanded_fields[var_name] = self.expand_field(field_node)
else:
host_class.expanded_fields[var_name] = [field_node]
elif type(var) is ast.Name:
var_name = var.id
var_type = ast_to_call_graph_type(self.stack, node.annotation, var_name=var_name)
if type(var_type) is ListTypeNode:
var_type.size = is_list_initialization(node.value)
if self.current_node.get_VariableNode(var_name) is None:
var_node = VariableNode(var_name, var_type)
self.current_node.declared_variables.add(var_node)
self.generic_visit(node)
def visit_Name(self, node):
if self.current_node.get_VariableNode(node.id) is None:
var_node = None
for ancestor_node in self.stack[-2::-1]:
if ancestor_node.get_VariableNode(node.id) is not None:
var_node = ancestor_node.get_VariableNode(node.id)
if var_node is None:
ast_error("Cannot find variable " + node.id, node)
self.current_node.called_variables.add(var_node)
break
else:
return
def visit_Call(self, node):
func_name = None
var_type = TypeNode()
if type(node.func) is ast.Attribute:
func_name = node.func.attr
if type(node.func.value) is ast.Name:
if node.func.value.id == "random" and func_name == "seed" and \
type(self.stack[-2]) is ClassNode:
self.stack[-2].has_random_state = True
if (node.func.value.id == "allocator" or node.func.value.id == "PyAllocator") and \
func_name == "parallel_new":
self.root.has_device_data = True
if node.args[0].id not in self.root.device_class_names:
self.root.device_class_names.append(node.args[0].id)
elif type(node.func.value) is ast.Attribute:
if hasattr(node.func.value.value, "id") and node.func.value.value.id == "self" and \
type(self.stack[-2]) is ClassNode:
for var in self.stack[-2].declared_fields:
if var.name == node.func.value.attr:
var_type = var.type
elif type(node.func) is ast.Name:
func_name = node.func.id
call_node = None
for parent_node in self.stack[::-1]:
if type(parent_node) is FunctionNode:
continue
call_node = parent_node.get_FunctionNode(func_name, var_type.name)
if call_node is not None:
break
if call_node is None:
call_node = FunctionNode(func_name, var_type, None)
self.root.library_functions.add(call_node)
self.current_node.called_functions.add(call_node)
self.generic_visit(node)
def expand_field(self, field_node):
result = []
if type(field_node.type) is ClassTypeNode and field_node.name.split("_")[-1] == "ref":
for class_node in self.root.declared_classes:
if check_equal_type(class_node.type, field_node.type):
for nested_field in class_node.declared_fields:
result.extend(self.expand_field(nested_field))
else:
result.append(field_node)
return result
""" visit call graph and marks corresponding nodes as device nodes """
class MarkDeviceVisitor:
def __init__(self):
self.root = None
def visit(self, node):
self.root = node
if type(node) is RootNode:
self.visit_RootNode(node)
def visit_RootNode(self, node):
for class_node in node.declared_classes:
if class_node.name in self.root.device_class_names \
and not class_node.is_device:
self.visit_ClassNode(class_node)
for class_node in node.declared_classes:
if self.has_device_ancestor(class_node) \
and not class_node.is_device:
self.visit_ClassNode(class_node)
for device_class in self.root.device_class_names:
if device_class in self.root.fields_class_names:
self.root.fields_class_names.remove(device_class)
def visit_ClassNode(self, node):
node.is_device = True
if not node.name in self.root.device_class_names:
self.root.device_class_names.append(node.name)
if node.super_class is not None:
if not node.super_class.is_device:
self.visit_ClassNode(node.super_class)
for field_node in node.declared_fields:
self.visit_FieldNode(field_node)
for func_node in node.declared_functions:
self.visit_FunctionNode(func_node)
for var_node in node.declared_variables:
self.visit_VariableNode(var_node)
def visit_FieldNode(self, node):
node.is_device = True
if type(node.type) is ListTypeNode:
elem_type = node.type.element_type
if type(elem_type) is ClassTypeNode:
if not elem_type.name in self.root.fields_class_names:
self.root.fields_class_names.add(elem_type.name)
if not elem_type.class_node.is_device:
self.visit_ClassNode(elem_type.class_node)
elif type(node.type) is RefTypeNode and type(node.type.type_node) is ClassTypeNode:
ref_type = node.type.type_node
if not ref_type.class_node.is_device:
if not ref_type.name in self.root.fields_class_names:
self.root.fields_class_names.add(ref_type.name)
self.visit_ClassNode(ref_type.class_node)
node_type = node.type
if type(node_type) is ClassTypeNode:
if not node_type.class_node.name in self.root.fields_class_names:
self.root.fields_class_names.add(node_type.class_node.name)
def visit_FunctionNode(self, node):
node.is_device = True
for arg in node.arguments:
if type(arg.type) is ClassTypeNode and not arg.type.class_node.name in self.root.fields_class_names:
self.root.fields_class_names.add(arg.type.class_node.name)
for var_node in node.called_variables:
self.visit_VariableNode(var_node)
for var_node in node.declared_variables:
self.visit_VariableNode(var_node)
for func_node in node.called_functions:
if not func_node.is_device:
self.visit_FunctionNode(func_node)
def visit_VariableNode(self, node):
node.is_device = True
node_type = node.type
if type(node_type) is ClassTypeNode:
if not node_type.class_node.name in self.root.fields_class_names:
self.root.fields_class_names.add(node_type.class_node.name)
def has_device_ancestor(self, class_node):
if class_node.super_class:
if class_node.super_class.class_name in self.root.device_class_names:
return True
else:
return self.has_device_ancestor(class_node.super_class)
return False
def is_list_initialization(node):
if type(node) is ast.BinOp and type(node.left) is ast.List and len(node.left.elts) == 1 \
and type(node.left.elts[0]) is ast.NameConstant and node.left.elts[0].value is None \
and type(node.right) is ast.Num:
return node.right.n
return False
""" used to check equivalence between two typenodes """
def check_equal_type(ltype: TypeNode, rtype: TypeNode):
if type(ltype) is TypeNode and type(rtype) is TypeNode:
return True
if type(ltype) is IntNode and type(rtype) is IntNode:
return True
if type(ltype) is FloatNode and type(rtype) is FloatNode:
return True
if type(ltype) is BoolNode and type(rtype) is BoolNode:
return True
if type(ltype) is ListTypeNode and type(rtype) is ListTypeNode:
return check_equal_type(ltype.element_type, rtype.element_type)
if type(ltype) is ClassTypeNode and type(rtype) is ClassTypeNode:
return ltype.name == rtype.name
if type(ltype) is RefTypeNode and type(rtype) is RefTypeNode:
return check_equal_type(ltype.type_node, rtype.type_node)
return False
""" convert AST nodes into types for call graph """
def ast_to_call_graph_type(stack, node, var_name=None):
if type(node) is ast.Name:
type_name = node.id
if type_name == "self":
if type(stack[-2]) is ClassNode:
return ClassTypeNode(stack[-2])
elif type_name == "int":
return IntNode()
elif type_name == "float":
return FloatNode()
elif type_name == "bool":
return BoolNode()
elif type_name == "uint32_t":
return IntNode(True, 32)
elif type_name == "uint8_t":
return IntNode(True, 8)
elif type_name == "curandState":
return CurandStateTypeNode()
else:
i = len(stack) - 1
while i >= 0:
current_node = stack[i]
# check declared variable
var_node = current_node.get_VariableNode(type_name)
if var_node:
return var_node.type
# check function parameter
if type(current_node) is FunctionNode:
for arg_node in current_node.arguments:
if type_name == arg_node.name:
return arg_node.type
i -= 1
# check declared classes
for class_node in stack[0].declared_classes:
if class_node.name == type_name:
if var_name:
split_var_name = var_name.split("_")
if split_var_name[-1] == "ref":
return RefTypeNode(ClassTypeNode(class_node))
return ClassTypeNode(class_node)
elif type(node) is ast.Attribute:
if type(node.value) is ast.Name and node.value.id == "DeviceAllocator" \
and node.attr == "RandomState":
return CurandStateTypeNode()
receiver_type = ast_to_call_graph_type(stack, node.value, var_name)
if type(receiver_type) is not TypeNode and type(receiver_type) is not ListTypeNode:
if type(receiver_type) is RefTypeNode:
receiver_type = receiver_type.type_node
if type(receiver_type) is ClassTypeNode:
for field in receiver_type.class_node.declared_fields:
if field.name == node.attr:
return field.type
elif type(node) is ast.Call:
if type(node.func) is ast.Attribute:
receiver_type = ast_to_call_graph_type(stack, node.func.value, var_name)
if receiver_type.declared_functions():
for func in receiver_type.declared_functions():
if func.name == node.func.attr:
return func.return_type
elif type(node) is ast.Subscript:
if type(node.value) is ast.Name and node.value.id == "list":
e_type = ast_to_call_graph_type(stack, node.slice.value, var_name)
if e_type is None:
ast_error("Requires element type for list", node)
return ListTypeNode(e_type)
else:
list_type = ast_to_call_graph_type(stack, node.value, var_name)
slice_type = ast_to_call_graph_type(stack, node.slice.value, var_name)
if type(slice_type) is IntNode:
return list_type.element_type
return TypeNode()
|
13,348 | 9e7898e3adcebc02e6d025b32e126e955d130895 | import boto3
session = boto3.session.Session()
#Endpoint information
endpoint_url = "http://objects007.scalcia.com"
access_key = "jOMpFFvMMw7un0UxBXRP2EhcVjkGza8n"
secret_key = "pzdBtAhr6Ak_o7IkfSavlxfhMSVMUUaL"
#Bucket, Objectname
bucket = "testbucket"
key = "employee_stats.txt"
#Connect to your Objects endpoint.
s3c = session.client(aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
endpoint_url=endpoint_url,
service_name="s3")
#Check if bucket exists and create it.
s3c.head_bucket(Bucket=bucket)
print "Bucket exists : %s"%(bucket)
#Verify if file exists
print "Checking if %s exists."%(key)
res = s3c.head_object(Bucket=bucket, Key=key)
#Read the object.
res = s3c.get_object(Bucket=bucket, Key=key)
data_stream = res["Body"]
datalen = 0
while True:
#Read 64K at a time from stream.
data = data_stream.read(64*1024)
if not data:
break
datalen += len(data)
print "Data read so far : %s"%(datalen)
print "Object %s read successfully. Total size : %sbytes"%(key, datalen)
|
13,349 | c01d76f395310a27fef665e137f786cba58144c5 | from pyramid.view import view_config
from pyramid.httpexceptions import HTTPFound
from pyramid.security import authenticated_userid
from pyramid.compat import escape
from phoenix.twitcherclient import generate_access_token
from phoenix.esgf.slcsclient import ESGFSLCSClient
class Actions(object):
def __init__(self, request):
self.request = request
self.session = request.session
# settings = request.registry.settings
self.collection = self.request.db.users
self.userid = self.request.matchdict.get('userid', authenticated_userid(self.request))
@view_config(route_name='update_esgf_certs', permission='edit')
def update_esgf_certs(self):
client = ESGFSLCSClient(self.request)
if client.get_token():
try:
client.get_certificate()
except Exception as err:
self.session.flash('Could not update certificate: {}'.format(escape(err.message)), queue="danger")
else:
self.session.flash('ESGF certificate was updated.', queue="success")
return HTTPFound(location=self.request.route_path('profile', userid=self.userid, tab='esgf_certs'))
elif False: # TODO: update slcs token ... slcs does not work yet
auth_url = client.authorize()
return HTTPFound(location=auth_url)
else:
callback = self.request.route_path('profile', userid=self.userid, tab='esgf_certs')
return HTTPFound(location=self.request.route_path('esgflogon', _query=[('callback', callback)]))
@view_config(route_name='forget_esgf_certs', permission='edit')
def forget_esgf_certs(self):
user = self.collection.find_one({'identifier': self.userid})
user['credentials'] = None
user['cert_expires'] = None
self.collection.update({'identifier': self.userid}, user)
self.session.flash("ESGF credentials removed.", queue='info')
return HTTPFound(location=self.request.route_path('profile', userid=self.userid, tab='esgf_certs'))
@view_config(route_name='generate_twitcher_token', permission='submit')
def generate_twitcher_token(self):
try:
generate_access_token(self.request.registry, userid=self.userid)
except Exception as err:
self.session.flash('Could not refresh token: {}'.format(escape(err.message)), queue="danger")
else:
self.session.flash('Twitcher token was updated.', queue="success")
return HTTPFound(location=self.request.route_path('profile', userid=self.userid, tab='twitcher'))
@view_config(route_name='generate_esgf_slcs_token', permission='submit')
def generate_esgf_slcs_token(self):
"""
Update ESGF slcs token.
"""
client = ESGFSLCSClient(self.request)
if client.get_token():
try:
client.refresh_token()
except Exception as err:
self.session.flash('Could not refresh token: {}'.format(escape(err.message)), queue="danger")
else:
self.session.flash('ESGF token was updated.', queue="success")
return HTTPFound(location=self.request.route_path('profile', userid=self.userid, tab='esgf_slcs'))
else:
try:
auth_url = client.authorize()
except Exception as err:
self.session.flash('Could not retrieve token: {}'.format(escape(err.message)), queue="danger")
return HTTPFound(location=self.request.route_path('profile', userid=self.userid, tab='esgf_slcs'))
else:
return HTTPFound(location=auth_url)
@view_config(route_name='forget_esgf_slcs_token', permission='submit')
def forget_esgf_slcs_token(self):
"""
Forget ESGF slcs token.
"""
client = ESGFSLCSClient(self.request)
client.delete_token()
self.session.flash("ESGF token removed.", queue='info')
return HTTPFound(location=self.request.route_path('profile', userid=self.userid, tab='esgf_slcs'))
@view_config(route_name='esgf_oauth_callback', permission='submit')
def esgf_oauth_callback(self):
"""
Convert an authorisation grant into an access token.
"""
client = ESGFSLCSClient(self.request)
if client.callback():
# Redirect to the token view
return HTTPFound(location=self.request.route_path('profile', userid=self.userid, tab='esgf_slcs'))
else:
# If we have not yet entered the OAuth flow, redirect to the start
return HTTPFound(location=self.request.route_path('generate_esgf_slcs_token'))
@view_config(route_name='delete_user', permission='admin')
def delete_user(self):
if self.userid:
self.collection.remove(dict(identifier=self.userid))
self.session.flash('User removed', queue="info")
return HTTPFound(location=self.request.route_path('people'))
def includeme(config):
""" Pyramid includeme hook.
:param config: app config
:type config: :class:`pyramid.config.Configurator`
"""
config.add_route('update_esgf_certs', 'people/update_esgf_certs')
config.add_route('forget_esgf_certs', 'people/forget_esgf_certs')
config.add_route('generate_twitcher_token', 'people/gentoken')
config.add_route('generate_esgf_slcs_token', 'people/generate_esgf_token')
config.add_route('forget_esgf_slcs_token', 'people/forget_esgf_token')
config.add_route('esgf_oauth_callback', 'account/oauth/esgf/callback')
config.add_route('delete_user', 'people/delete/{userid}')
|
13,350 | 38dc9e6ec8e94360a102c6c450f027b45b65fb92 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect, StreamingHttpResponse
from anthony_bi.sql import Order_info, NewTable
from json import dumps as encodeJSON
import random
from datetime import datetime
import time
import pandas as pd
import numpy as np
from anthony_bi.planwork import PlanWork, MACHINE, GROUPS, WORK
# Create your views here.
# 产生随机订单
START_TIME = time.mktime((2017,1,1,0,0,0,0,0,0)) #生成开始时间戳
END_TIME = time.mktime((2017,12,31,23,59,59,0,0,0)) #生成结束时间戳
saler_name = ['Ami', 'Tom', 'Jonh']
customer_name = ['A公司', 'B公司', 'C公司', 'x公司']
state_list = ['new', 'process', 'finish']
category_list = ['裤子', '短袖', '长袖', '大衣']
city_list = ["海门","鄂尔多斯","招远","舟山","齐齐哈尔","盐城","赤峰","青岛","乳山","金昌","泉州","莱西","日照","胶南","南通","拉萨","云浮","梅州","文登","上海","攀枝花","威海","承德","厦门","汕尾","潮州","丹东","太仓","曲靖","烟台","福州","瓦房店","即墨","抚顺","玉溪","张家口","阳泉","莱州","湖州","汕头","昆山","宁波","湛江","揭阳","荣成","连云港","葫芦岛","常熟","东莞","河源","淮安","泰州","南宁","营口","惠州","江阴","蓬莱","韶关","嘉峪关","广州","延安","太原","清远","中山","昆明","寿光","盘锦","长治","深圳","珠海","宿迁","咸阳","铜川","平度","佛山","海口","江门","章丘","肇庆","大连","临汾","吴江","石嘴山","沈阳","苏州","茂名","嘉兴","长春","胶州","银川","张家港","三门峡","锦州","南昌","柳州","三亚","自贡","吉林","阳江","泸州","西宁","宜宾","呼和浩特","成都","大同","镇江","桂林","张家界","宜兴","北海","西安","金坛","东营","牡丹江","遵义","绍兴","扬州","常州","潍坊","重庆","台州","南京","滨州","贵阳","无锡","本溪","克拉玛依","渭南","马鞍山","宝鸡","焦作","句容","北京","徐州","衡水","包头","绵阳","乌鲁木齐","枣庄","杭州","淄博","鞍山","溧阳","库尔勒","安阳","开封","济南","德阳","温州","九江","邯郸","临安","兰州","沧州","临沂","南充","天津","富阳","泰安","诸暨","郑州","哈尔滨","聊城","芜湖","唐山","平顶山","邢台","德州","济宁","荆州","宜昌","义乌","丽水","洛阳","秦皇岛","株洲","石家庄","莱芜","常德","保定","湘潭","金华","岳阳","长沙","衢州","廊坊","菏泽","合肥","武汉","大庆"]
provency = ["广东", "广西", "云南", "江西", "贵州", "海南", "湖北", "湖南", "黑龙江", "吉林", "辽宁","上海", "新疆", "西藏", "内蒙古", "甘肃", "青海", "四川", "山西", "福建", "台湾", "山东"]
def home(request):
return render(request, 'index.html')
# 添加数据
def add_order_date(request):
for i in range(0, 5):
timestamp=time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(random.randint(START_TIME,END_TIME)))
order = Order_info(
saler=saler_name[random.randint(0,2)],
customer=customer_name[random.randint(0,3)],
created=timestamp,
updated=timestamp,
state=state_list[random.randint(0,2)],
price=random.randint(1000,5000)
)
order.save()
c_order = NewTable(
customer=customer_name[random.randint(0,3)],
created=timestamp,
updated=timestamp,
city=provency[random.randint(0,len(provency)-1)],
category=category_list[random.randint(0,3)],
price=random.randint(1000,5000),
qty=random.randint(100, 1000),
)
c_order.save()
return HttpResponse(encodeJSON({'state': 20, 'message': 'OK'}), content_type="application/json")
# 展现DEMO数据
def show_demo(request):
return render(request, 'show_demo.html')
# 展现数据(1)
def show_order(request):
content = {
'begin_date': request.GET.get('begin_date'),
'end_date': request.GET.get('end_date'),
'frequence': request.GET.get('frequence') or 0,
}
return render(request, 'order_demo.html', content)
# 展现数据(2)
def show_order_2(request):
content = {
'begin_date': request.GET.get('begin_date'),
'end_date': request.GET.get('end_date'),
}
return render(request, 'order_demo_2.html', content)
# 拿取数据
def api_order_info(request):
# 时间筛选
if request.GET.get('begin_date') and request.GET.get('end_date'):
orders = Order_info().filter(created_gte=request.GET.get('begin_date'),created_lte=request.GET.get('end_date'))
else:
if request.GET.get('end_date'):
orders = Order_info().filter(created_lte=request.GET.get('end_date'))
elif request.GET.get('begin_date'):
orders = Order_info().filter(created_gte=request.GET.get('begin_date'))
else:
orders = Order_info().get_all()
# 按时间分类
new_order = orders.set_index('created')
orders['updated'] = orders['updated'].apply(lambda x: x.strftime('%Y-%m-%d %H:%M:%S'))
orders['created'] = orders['created'].apply(lambda x: x.strftime('%Y-%m-%d %H:%M:%S'))
# 表格数据
content = {
'table_data': orders.to_dict(orient='records')
}
content['companys'] = list(orders['customer'].value_counts().to_dict().keys())
content['price'] = sum(orders['price'])
# 不同客户数据分开
new_order = new_order['2017']
for company in content['companys']:
content[company] = ['0']*12
price_by_month = new_order[new_order['customer'] == company].resample('M')['price'].sum().fillna(0).to_dict()
#print(company, '---------')
for key, value in price_by_month.items():
#print(key)
content[company][key.month-1] = value
# 汇总
content['total_price'] = ['0']*12
tmp_by_month = new_order.resample('M')['price'].sum().fillna(0).to_dict()
#print('------total------')
for key, value in tmp_by_month.items():
#print(key.month, value)
content['total_price'][key.month-1] = value
return HttpResponse(encodeJSON({'data': content, 'status': 0, 'message': 'OK'}),content_type='application/json')
def api_show_data(request):
if request.GET.get('begin_date'):
begin_date = request.GET.get('begin_date')
orders = Order_info().filter(created_gte=begin_date)
else:
orders = Order_info().get_all()
# orders['updated'] = orders['updated'].apply(lambda x: x.strftime('%Y-%m-%d %H:%M:%S'))
# orders['created'] = orders['created'].apply(lambda x: x.strftime('%Y-%m-%d %H:%M:%S'))
# 总金额
cg = orders.groupby('customer')
res = cg['price'].sum().to_dict()
content = {
'companys': list(res.keys()),
'total_price': list(res.values()),
'total': sum(res.values()),
}
# 订单数量
res = orders['customer'].value_counts().to_dict()
content['orders_by_companys'] = [{'name': k, 'value': v} for k, v in res.items()]
content['total_order'] = sum(res.values())
# 状态
states = list(orders['state'].value_counts().to_dict().keys())
content['orders_by_states'] = list()
for company in content['companys']:
tmp_dict = orders[orders['customer'] == company]['state'].value_counts().to_dict()
content['orders_by_states'] += [{'name': k, 'value': v} for k, v in tmp_dict.items()]
# print(content)
return HttpResponse(encodeJSON({'data': content, 'status': 0, 'message': 'OK'}),content_type='application/json')
def api_show_c_data(request):
c_data = NewTable().get_all()
# c_data.to_excel('test.xlsx')
# c_data['created'] = c_data['created'].apply(lambda x: x.strftime('%Y-%m-%d %H:%M:%S'))
# c_data['updated'] = c_data['updated'].apply(lambda x: x.strftime('%Y-%m-%d %H:%M:%S'))
cg = c_data.groupby('category')
res = cg['price'].sum().to_dict()
content = {
'category': list(res.keys()),
'price': [{'name':k, 'value': v} for k, v in res.items()],
'total_price': sum(res.values()),
}
res = cg['qty'].sum().to_dict()
content['qty'] = list(res.values())
content['total_num'] = sum(res.values())
# 城市
cg = c_data.groupby(['category', 'city'])
res = cg['qty'].sum().to_dict()
for category in content['category']:
content[category] = list()
for key, value in res.items():
content[key[0]].append({'name': key[1], 'value': value})
return HttpResponse(encodeJSON({'data': content, 'status': 0, 'message': 'OK'}),content_type='application/json')
def api_show_plan(request):
df = pd.read_json(encodeJSON(WORK))
new_plan = PlanWork(df, MACHINE, GROUPS['B'], 500)
res = new_plan.find_best_result()
content = {
'default_time':new_plan.default_res['max_time'],
'default_variance':new_plan.default_res['variance'],
'reslut': res
}
return HttpResponse(encodeJSON({'data': content, 'status': 0, 'message': 'OK'}),content_type='application/json')
|
13,351 | 6008458cba1bcb94f89353d94997724252c6220d | """
https://leetcode.com/problems/maximum-subarray/#/description
Find the contiguous subarray within an array (containing at least one number) which has the largest sum.
For example, given the array [-2 , 1, -3, 4, -1, 2, 1, -5, 4],
the contiguous subarray [4,-1,2,1] has the largest sum = 6.
explanation: you keep one value, max_ending_here: the cumulative sum up to current element starting from somewhere in the past.
Algo:
At each new element, you could either add the new element to the existing sum or start calculating the sum from the current element (wipe out previous results)
At each new element if the current element is greater than the cumulative sum from the past plus the current element, we know the current element is the max_ending_here; else max_ending_here will be the cumulative sum + current element.
Example 1: [-2 , 1]. when you add -2 + 1 the result is not greater than 1. So 1 becomes max_ending_here. Start calculating cumulative sum from 1.
Example 2: [-2 , 1, -3, 4]. When you add -2 + 1 + (-3) + 4, the result is not greater than 4 so 4 becomes max_ending_here. Start calculating cumulative sum from 4.
All the while keep updating the max_so_far using max(max_so_far, max_ending_here).
Finally return max_so_far
ref: https://en.wikipedia.org/wiki/Maximum_subarray_problem
"""
def max_subarray(nums):
max_ending_here = nums[0]
max_so_far = nums[0]
for i in range(1, len(nums)):
max_ending_here = max(nums[i], max_ending_here + nums[i])
max_so_far = max(max_so_far, max_ending_here)
return max_so_far
if __name__ == '__main__':
print(max_subarray([-2, 1, -3, 4, -1, 2, 1, -5, 4]))
'''
num = 1
max_ending_here = max(1, -2+1) = 1
max_so_far = max(-2, 1) = 1
num=-3
max_ending_here = max(-3, 1+-3) = -2
max_so_far = max(1, -2) = 1
num=4
max_ending_here = max(4, -2+4) = 4
max_so_far = max(1, 4) = 4
num = -1
max_ending_here = max(-1, 4+-1) = 3
max_so_far = max(3, 4) = 4
num = 2
max_ending_here = max(2, 3+2) = 5
max_so_far = max(4, 5) = 5
num = 1
max_ending_here = max(1, 5 + 1) = 6
max_so_far = max(5, 6) = 6
num = -5
max_ending_here = max(-5, 6 + -5) = 1
max_so_far = max(6, 1) = 6
num = 4
max_ending_here = max(4, 1 + 5) = 5
max_so_far = max(6, 5) = 6
return 6
'''
|
13,352 | e031d3a4be69022d6eab74478d0257e18017da2f | import os
basedir = os.path.abspath(os.path.dirname(__file__))
DB_NAME = "cuisina_db"
DB_HOST = "127.0.0.1"
DB_USERNAME = "root"
DB_PASSWORD = "abonitalla123"
SECRET_KEY = "cuisinaDB"
|
13,353 | 1a57798e4dc24856847a47e1c263de07b81f5e9b | from flask import Flask, request, render_template
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import update
from datetime import datetime
import hashlib
app= Flask(__name__)
app.config.from_pyfile('config.py')
from models import db
from models import Productos, Pedidos, ItemsPedidos, Usuarios
@app.route('/')
def inicio():
return render_template('inicio.html')
@app.route('/pedido', methods= ['POST', 'GET'])
def Mpedido():
DNI=request.form['DNI']
return render_template('pedido.html', DNI=DNI)
@app.route('/mozo', methods= ['POST', 'GET'])
def mozo():
DNI=request.form['DNI']
return render_template('mozo.html', DNI=DNI)
@app.route('/listadonocobrados', methods= ['POST', 'GET'])
def nocobrados():
items=ItemsPedidos.query.all()
pedidos=Pedidos.query.all()
productos=Productos.query.all()
DNI=request.form['DNI']
return render_template('nocobrados.html',DNI=DNI, pedidos=pedidos, items=items, productos=productos )
@app.route('/cartamozo', methods= ['POST', 'GET'])
def cartamozo():
DNI=request.form['DNI']
mesa=request.form['Numero de mesa']
productos= Productos.query.all()
name0=productos[0].Nombre
preciogaseosa='$'+productos[1].PrecioUnitario
preciopizza='$'+productos[0].PrecioUnitario
preciocerveza='$'+productos[2].PrecioUnitario
preciolomo='$'+productos[2].PrecioUnitario
name1=productos[1].Nombre
name2=productos[2].Nombre
name3=productos[3].Nombre
return render_template('cartamozo.html',DNI=DNI,mesa=mesa, preciolomo = preciolomo, preciocerveza=preciocerveza, preciopizza=preciopizza, preciogaseosa = preciogaseosa,name0 = name0 ,name1 = name1,name2 = name2,name3 = name3)
@app.route('/pedidofinalizado',methods= ['POST', 'GET'] )
def pedidorealizado():
Itemspedidos= ItemsPedidos.query.all()
productos= Productos.query.all()
DNI=request.form['DNI']
Datospedidos= Pedidos.query.all()
mesa=request.form['Numero de mesa']
nump=int(Itemspedidos[len(Itemspedidos)-1].NumPedido)+1
T=0
num=int(Itemspedidos[len(Itemspedidos)-1].NumItem)+1
if request.form['Gaseosa'] != '':
for i in range(int(request.form['Gaseosa'])):
numpro=productos[1].NumProducto
pre=productos[1].PrecioUnitario
estad='Pendiente'
#print(f"nump {nump} num {num} numpro {numpro} precio {pre} {estad}")
producto= ItemsPedidos(NumItem=str(num), NumPedido=nump, NumProducto=numpro, Precio=pre, Estado=estad)
db.session.add(producto)
db.session.commit()
num=int(num)+1
T=T+(int(pre)*int(request.form['Gaseosa']))
if request.form['Cerveza'] != '':
for i in range(int(request.form['Cerveza'])):
numpro=productos[2].NumProducto
pre=productos[2].PrecioUnitario
estad='Pendiente'
#print(f"nump {nump} num {num} numpro {numpro} precio {pre} {estad}")
producto= ItemsPedidos(NumItem=str(num), NumPedido=nump, NumProducto=numpro, Precio=pre, Estado=estad)
db.session.add(producto)
db.session.commit()
num=int(num)+1
T=T+(int(pre)*int(request.form['Cerveza']))
if request.form['Pizza Muzarrella'] != '':
for i in range(int(request.form['Pizza Muzarrella'])):
numpro=productos[0].NumProducto
pre=productos[0].PrecioUnitario
estad='Pendiente'
#print(f"nump {nump} num {num} numpro {numpro} precio {pre} {estad}")
producto= ItemsPedidos(NumItem=str(num), NumPedido=nump, NumProducto=numpro, Precio=pre, Estado=estad)
db.session.add(producto)
db.session.commit()
num=int(num)+1
T=T+(int(pre)*int(request.form['Pizza Muzarrella']))
if request.form['Lomo'] != '':
for i in range(int(request.form['Lomo'])):
numpro=productos[3].NumProducto
pre=productos[3].PrecioUnitario
estad='Pendiente'
#print(f"nump {nump} num {num} numpro {numpro} precio {pre} {estad}")
producto= ItemsPedidos(NumItem=str(num), NumPedido=nump, NumProducto=numpro, Precio=pre, Estado=estad)
db.session.add(producto)
db.session.commit()
num=int(num)+1
T=T+(int(pre)*int(request.form['Lomo']))
numped=int(Datospedidos[len(Datospedidos)-1].NumPedido)+1
pedido=Pedidos(NumPedido=numped, Fecha=datetime.now(), Total=T, Cobrado='False', Observacion= request.form['Observaciones'],DNIMozo=DNI, Mesa=mesa)
db.session.add(pedido)
db.session.commit()
return render_template('pedidorealizado.html', DNI=DNI)
@app.route('/listapedidos', methods= ['POST', 'GET'])
def listapedidos():
items= ItemsPedidos.query.all()
Lista=[]
bandera=True
for i in range(len(items)):
if bandera==True:
if items[i].Estado == 'Pendiente':
Lista.append(items[i].NumPedido)
bandera=False
else:
if items[i].Estado == 'Pendiente':
if items[i].NumPedido not in Lista:
Lista.append(items[i].NumPedido)
return render_template('listapedidos.html',productos=Productos.query.all(),Lista=Lista,items=items ,pedidos= Pedidos.query.all())
@app.route('/cambio', methods= ['POST', 'GET'])
def cambio():
Num=request.form.getlist('Listo')
for N in Num:
stmt = (update(ItemsPedidos).where(ItemsPedidos.NumItem == N).values(Estado='Listo'))
db.session.execute(stmt)
db.session.commit()
return render_template('cambio.html')
@app.route('/venta',methods= ['POST', 'GET'])
def venta():
DNI=request.form['DNI']
Num=request.form.getlist('Listo')
Valores=[]
for N in Num:
A=int(N)
if A == 2:
stmt = (update(Pedidos).where(Pedidos.NumPedido == N).values(Cobrado='True'))
Valores.append(A)
else:
stmt = (update(Pedidos).where(Pedidos.NumPedido == A).values(Cobrado='True'))
Valores.append(A)
db.session.execute(stmt)
db.session.commit()
return render_template('venta.html', DNI=DNI, Valores=Valores, pedidos= Pedidos.query.all())
@app.route('/bienvenida', methods= ['POST', 'GET'])
def bienvenida():
usuarios= Usuarios.query.all()
DNI=request.form['DNI']
if request.method == 'POST':
if request.form['DNI'] and request.form['Clave']:
band= False
i=0
try:
while i <= len(usuarios) and band == False:
if request.form['DNI'] == usuarios[i].DNI:
contrasena= hashlib.md5(bytes(request.form['Clave'], encoding='utf-8'))
if contrasena.hexdigest() == usuarios[i].Clave:
if usuarios[i].Tipo == 'Mozo':
return render_template('mozo.html', DNI=DNI)
else:
return render_template('cocinero.html')
band= True
else:
return render_template('pass_mal.html')
else:
i+=1
except:
return render_template('usuario_mal.html')
else:
return render_template('login_incorrecto.html')
if __name__ == '__main__':
db.create_all()
app.run(debug= False)
|
13,354 | a00b946f2e56fa13c13c0efc2534840aa4093eef | """
This file will contain functions that will fetch data from and write into xml
files from the "data" directory. So far the data is in the form of py modules
and the functions respect that.
In the future the data will be converted to xml and these functions should be
modified acordingly.
"""
import data.characters
def get(query):
if query == "races":
return sorted(data.characters.races)
elif query == "classes":
return sorted(data.characters.classes)
elif query == "ability_names":
return data.characters.ability_names
else:
return
def get_ability_names():
return data.characters.ability_names
def get_ability_adjustment(race, ability):
try:
return data.characters.ability_adjustment[race][ability]
except KeyError:
return 0
def get_saves(id, lvl):
'''
Returns value of save from list according to level of character
'''
return data.characters.saves[id][lvl - 1]
def get_class_saves(key, ch_class):
'''
Returns code that identifies which list of saves applies to given character
class
'''
return data.characters.classes[ch_class]['SavesLvls'][key]
def get_base_attack(id, lvl):
'''
Returns value of base attack from list according to level of character
'''
return data.characters.AttackBonuses[id][lvl-1]
def get_class_base_attack(ch_class):
'''
Returns code that identifies which list of attack bonuses applies to given
character class
'''
return data.characters.classes[ch_class]['AttackBonuses']
def get_abilities_order(ch_class):
return data.characters.classes[ch_class]['AbilitiesOrder']
def get_class_skills(ch_class):
return data.characters.classes[ch_class]['ClassSkills']
def get_skillp_modifier(ch_class):
return data.characters.classes[ch_class]['SkillPointsModifier']
def get_hit_die(ch_class):
return data.characters.classes[ch_class]['HitDie']
|
13,355 | 4d3ba1ca0e7a7f747e7b7f8ee359133ca12337dc | ../../../lib/lv_bindings/driver/esp32/ili9XXX.py |
13,356 | ae5337f39611bad656682452a094c890272a81d0 | import os
import csv
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
# Check for environment variable
if not os.getenv("DATABASE_URL"):
raise RuntimeError("DATABASE_URL is not set")
# Set up database
engine = create_engine(os.getenv("DATABASE_URL"))
db = scoped_session(sessionmaker(bind=engine))
# Open file
file = open("zips.csv")
reader = csv.reader(file)
# Skip known header line
next(reader)
progress = 0
for zipcode, city, state, lat, lon, pop in reader:
try:
db.execute("INSERT INTO locations (zipcode, city, state, lat, long, pop) VALUES (:zipcode, :city, :state, :lat, :lon, :pop);",
{ "zipcode": zipcode, "city": city, "state": state, "lat": lat, "lon": lon, "pop":pop })
progress = progress + 1
if progress % 100 == 0:
print(f"Inserted {progress} zip codes...")
except:
raise RuntimeError(f"Error inserting zipcode {zipcode} into the table.")
try:
print("Committing changes to database...")
db.commit()
except:
raise RuntimeError("Failed to commit changes.")
print(f"Successfully imported {progress} zip codes.") |
13,357 | ced2fdd8ae5bf1bbd66a1b957d77360deb006a26 | <?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<meta content="text/html; charset=UTF-8" http-equiv="content-type" />
<title>Lab 5-Shruti Charugulla</title>
</head>
<body>
<table cellpadding="1" cellspacing="1" border="1">
<thead>
<tr><td rowspan="1" colspan="3">Test Case 1</td></tr>
</thead>
<tbody>
<tr>
<td>open</td>
<td>http://www.austincc.edu/wtucker/
<datalist>
<option>http://www.austincc.edu/wtucker/</option>
</datalist>
</td>
<td></td>
</tr>
<tr>
<td>clickAt</td>
<td>link=ITSE 1392 - Special Topics - Automated Software Testing
<datalist>
<option>link=ITSE 1392 - Special Topics - Automated Software Testing</option>
<option>//a[contains(text(),'ITSE 1392 - Special Topics - Automated Software Testing')]</option>
<option>//a[contains(@href, 'ITSE1392/fallautotesting.htm')]</option>
<option>//a[2]</option>
</datalist>
</td>
<td>57,13</td>
</tr>
<tr>
<td>clickAt</td>
<td>link=Synonym 34859
<datalist>
<option>link=Synonym 34859</option>
<option>//a[contains(text(),'Synonym 34859')]</option>
<option>//a[contains(@href, 'ITSE1392fall17.pdf')]</option>
<option>//h4/a</option>
</datalist>
</td>
<td>39,8</td>
</tr>
</tbody></table>
<table cellpadding="1" cellspacing="1" border="1">
<thead>
<tr><td rowspan="1" colspan="3">Test Case 2</td></tr>
</thead>
<tbody>
<tr>
<td>open</td>
<td>http://www.austincc.edu/wtucker/
<datalist>
<option>http://www.austincc.edu/wtucker/</option>
</datalist>
</td>
<td></td>
</tr>
<tr>
<td>clickAt</td>
<td>link=ITSE 1391 - Special Topics - Fundamentals of Software Testing
<datalist>
<option>link=ITSE 1391 - Special Topics - Fundamentals of Software Testing</option>
<option>//a[contains(text(),'ITSE 1391 - Special Topics - Fundamentals of Software Testing')]</option>
<option>//a[contains(@href, 'ITSE1391/fallsoftwaretesting.htm')]</option>
<option>//td/a</option>
</datalist>
</td>
<td>90,8</td>
</tr>
<tr>
<td>clickAt</td>
<td>link=Synonym 34854
<datalist>
<option>link=Synonym 34854</option>
<option>//a[contains(text(),'Synonym 34854')]</option>
<option>//a[contains(@href, 'ITSE1391fall17.pdf')]</option>
<option>//h4/a</option>
</datalist>
</td>
<td>45,8</td>
</tr>
</tbody></table>
</body>
</html> |
13,358 | b8537e06f056680a409c10efb7dade229ac93a65 | """
tipsy.py -- This is a Flask-based to-do list
"""
from flask import Flask, render_template, request
import model
app = Flask(__name__)
@app.route("/")
def index():
return render_template("index.html", user_name="Angie")
@app.route("/tasks")
def list_tasks():
return render_template("list_tasks.html")
@app.route("/add-task")
def add_task():
return render_template("add_task.html", user_id="user_id")
@app.route("/add-task-create")
def add_task_create():
model.connect_to_db()
title = request.args.get("title")
user_id = request.args.get("user_id")
created_at = request.args.get("datestamp")
row = model.new_task(title, created_at, user_id)
html = render_template("added_task.html")
return html
if __name__ == "__main__":
app.run(debug=True) |
13,359 | b460e4ca571177e3105b907114215f3be1929a66 | #coding:utf-8
import urllib,urllib2
url='http://translate.google.cn'
data={'sl':'zh-CN','tl':'en','js':'n','prev':'_t','hl':'zh-CN','ie':'UTF-8','text':'你好'}
data=urllib.urlencode(data)
header={'User-Agent':'chrome/28.0'}
req=urllib2.Request(url,data,header)
response=urllib2.urlopen(req).read()
response=unicode(response,'GBK').encode('UTF-8')
print response
|
13,360 | 04ac93f3c9d6d2b9aa9dbe1eecc5d36d5c57d6e6 | # 3 lines: For loop, built-in enumerate function, new style formatting
friends = ['john', 'pat', 'gary', 'michael']
for i, name in enumerate(friends):
print("iteration {iteration} is {name}".format(iteration=i, name=name))
|
13,361 | fd8bae747c78cd62151152984122e38e51c90acd | import itertools
import math
def is_path(pathList):
gridSize = len(pathList)//2
# start at top left
x = 0
y = 0
count = 0
for step in pathList:
count += 1
if step == 'R':
x += 1
elif step == 'D':
y += 1
if (x>gridSize) or (y>gridSize):
break
if (x==gridSize) and (y==gridSize) and (count==2*gridSize):
return True
return False
print(list(itertools.product('RD', repeat=4)))
# The code below is way too slow
# It's the number of ways to choose exactly half of 2 * gridsize of Rs and Ds
# so it's just num choose k or num! / (k! * (num-k)!) or 40 choose 20 or 40! / (20! * 20!)
answer = math.factorial(40) / (math.factorial(20)**2)
print("answer is " + str(answer))
# p = is_path(('R','R','D','D'))
# print(str(p))
#
# gridSize = 20
# count = 0
# for path in itertools.product('RD', repeat=2*gridSize):
# p = is_path(path)
# if p:
# count += 1
# print(str(path) + " is path " + str(count))
#
# print("There are " + str(count) + " paths for a grid of size " + str(gridSize))
|
13,362 | a3812e02c921b4ae707b4789491c24438d538d48 |
class Averages:
def __init__(self, localbitcoin, yadio, dolartoday_usd, dolartoday_btc):
self.localbitcoin = localbitcoin
self.yadio = yadio
self.dolartoday_usd = dolartoday_usd
self.dolartoday_btc = dolartoday_btc
|
13,363 | b8962adcc645768ffedc39742c4720e5e00c5f84 | import os
import threading
import yaml
import json
import hashlib
import datetime
import shutil
import argparse
import socket
import BaseHTTPServer
import SimpleHTTPServer
import httplib
import logging, logging.handlers
import socket
from threading import Thread, Event
from time import sleep
from flask import (Flask, request, redirect, url_for, jsonify,
render_template, send_from_directory)
from werkzeug import (secure_filename, SharedDataMiddleware)
from flask.ext.socketio import SocketIO, emit
from common import *
from core import *
from dispatcher import *
from compileLauncher import *
from mesosutils import *
import db
webapp = Flask(__name__, static_url_path='')
socketio = SocketIO(webapp)
logger = logging.getLogger("")
dispatcher = None
driverDispatch = None
dispatcherTerminate = Event()
webserverTerminate = Event()
haltFlag = Event()
# driver = None
# driver_t = None
# driverInitilize = Event()
compileService = None
driverCompiler = None
lastCompile = None
compile_tasks = {}
index_message = 'Welcome to K3'
class SocketIOHandler(logging.Handler):
def emit(self, record):
socketio.emit('my response', record.getMessage(), namespace='/compile')
#===============================================================================
# General Web Service Functions
#===============================================================================
def initWeb(port, **kwargs):
"""
Peforms web service initialization
"""
# Configure logging
# logging.Formatter(fmt='[%(asctime)s %(levelname)-5s %(name)s] %(message)s',datefmt='%H:%M:%S')
log_fmt = ServiceFormatter('[%(asctime)s %(levelname)6s] %(message)s')
log_console = logging.StreamHandler()
log_console.setFormatter(log_fmt)
logger.setLevel(logging.DEBUG)
logger.addHandler(log_console)
logger.debug("Setting up directory structure")
# LOCAL DIR : Local path for storing all K3 Applications, job files, executor, output, etc.
LOCAL_DIR = kwargs.get('local', '/k3/web')
# SERVER URL : URL for serving static content. Defaults to using Flask as static handler via /fs/ endpoint
SERVER_URL = kwargs.get('server', '/fs/')
host = kwargs.get('host', socket.gethostname())
master = kwargs.get('master', None)
JOBS_TARGET = 'jobs'
APPS_TARGET = 'apps'
ARCHIVE_TARGET = 'archive'
BUILD_TARGET = 'build'
LOG_TARGET = 'log'
# TODO: Either do away with this Flask_request (python vers limitation)
# or simplify this
APPS_DEST = os.path.join(LOCAL_DIR, APPS_TARGET)
APPS_URL = os.path.join(SERVER_URL, APPS_TARGET)
JOBS_DEST = os.path.join(LOCAL_DIR, JOBS_TARGET)
JOBS_URL = os.path.join(SERVER_URL, JOBS_TARGET)
ARCHIVE_DEST = os.path.join(LOCAL_DIR, ARCHIVE_TARGET)
ARCHIVE_URL = os.path.join(SERVER_URL, ARCHIVE_TARGET)
BUILD_DEST = os.path.join(LOCAL_DIR, BUILD_TARGET)
BUILD_URL = os.path.join(SERVER_URL, BUILD_TARGET)
LOG_DEST = os.path.join(LOCAL_DIR, LOG_TARGET)
# Store dir structures in web context
webapp.config['DIR'] = LOCAL_DIR
webapp.config['PORT'] = port
webapp.config['HOST'] = host
webapp.config['ADDR'] = 'http://%s:%d' % (host, port)
webapp.config['MESOS'] = master
webapp.config['UPLOADED_APPS_DEST'] = APPS_DEST
webapp.config['UPLOADED_APPS_URL'] = APPS_URL
webapp.config['UPLOADED_JOBS_DEST'] = JOBS_DEST
webapp.config['UPLOADED_JOBS_URL'] = JOBS_URL
webapp.config['UPLOADED_ARCHIVE_DEST'] = ARCHIVE_DEST
webapp.config['UPLOADED_ARCHIVE_URL'] = ARCHIVE_URL
webapp.config['UPLOADED_BUILD_DEST'] = BUILD_DEST
webapp.config['UPLOADED_BUILD_URL'] = BUILD_URL
webapp.config['LOG_DEST'] = LOG_DEST
webapp.config['COMPILE_OFF'] = not(kwargs.get('compile', False))
# Create dirs, if necessary
for p in [LOCAL_DIR, JOBS_TARGET, APPS_TARGET, ARCHIVE_TARGET, BUILD_TARGET, LOG_TARGET]:
path = os.path.join(LOCAL_DIR, p)
if not os.path.exists(path):
os.mkdir(path)
# Configure rotating log file
logfile = os.path.join(LOG_DEST, 'web.log')
webapp.config['LOGFILE'] = logfile
log_file = logging.handlers.RotatingFileHandler(logfile, maxBytes=2*1024*1024, backupCount=5, mode='w')
log_file.setFormatter(log_fmt)
logger.addHandler(log_file)
logger.info("\n\n\n\n\n==================== <<<<< K3 >>>>> ===================================")
logger.info("FLASK WEB Initializing:\n" +
" Host : %s\n" % host +
" Port : %d\n" % port +
" Master: %s\n" % master +
" Local : %s\n" % LOCAL_DIR +
" Server: %s\n" % SERVER_URL +
" Port : %d\n" % port)
# Configure Compilation Service
if not webapp.config['COMPILE_OFF']:
compileLogger = logging.getLogger("compiler")
sio_fmt = ServiceFormatter('\n' + u'[%(asctime)s] %(message)s\n'.encode("utf8", errors='ignore'))
sio = SocketIOHandler()
sio.setFormatter(sio_fmt)
compileLogger.addHandler(sio)
compileFile = os.path.join(LOG_DEST, 'compiler.log')
webapp.config['COMPILELOG'] = compileFile
compile_fmt = ServiceFormatter(u'[%(asctime)s] %(message)s'.encode("utf8", errors='ignore'))
compile_file = logging.handlers.RotatingFileHandler(compileFile, maxBytes=5*1024*1024, backupCount=20, mode='w')
compile_file.setFormatter(compile_fmt)
compileLogger.addHandler(compile_file)
compileLogger.info("\n\n\n\n\n==================== <<<<< Compiler Service Initiated >>>>> ===================================")
# Check for executor(s), copy if necessary
compiler_nm = 'CompileExecutor.py'
compiler_exec = os.path.join(LOCAL_DIR, compiler_nm)
if not os.path.exists(compiler_exec):
logger.info("Compiler executor not found. Copying to web root dir.")
shutil.copyfile(compiler_nm, compiler_exec)
launcher_nm = 'k3executor.py'
launcher_exec = os.path.join(LOCAL_DIR, launcher_nm)
if not os.path.exists(launcher_exec):
logger.info("K3 Executor not found. Copying to web root dir.")
shutil.copyfile(launcher_nm, launcher_exec)
def returnError(msg, errcode):
"""
returnError -- Helper function to format & return error messages & codes
"""
logger.warning("[FLASKWEB] Returning error code %d, `%s`" % (errcode, msg))
if request.headers['Accept'] == 'application/json':
return msg, errcode
else:
return render_template("error.html", message=msg, code=errcode)
def shutdown_server():
global dispatcher
logging.warning ("[FLASKWEB] Attempting to kill the server")
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError("Not running the server")
func()
webserverTerminate.set()
dispatcher.terminate = True
def shutdown_dispatcher():
# driver.stop()
# driver_t.join()
for k, v in compile_tasks.items():
v.kill()
logging.info ("[FLASKWEB] Detached from Mesos")
dispatcherTerminate.set()
#===============================================================================
# General Web Service End Points
#===============================================================================
@webapp.route('/')
def root():
"""
#------------------------------------------------------------------------------
# / - Home (welcome msg)
#------------------------------------------------------------------------------
"""
if request.headers['Accept'] == 'application/json':
return "Welcome\n\n", 200
else:
return redirect(url_for('index'))
@webapp.route('/index')
def index():
if request.headers['Accept'] == 'application/json':
return "Welcome\n\n", 200
else:
return render_template('index.html')
@webapp.route('/about')
def about():
"""
#------------------------------------------------------------------------------
# /about - Display about page
#------------------------------------------------------------------------------
"""
logger.debug('[FLASKWEB /about]')
if request.headers['Accept'] == 'application/json':
return redirect(url_for('staticFile', filename="rest.txt"))
else:
return render_template('about.html')
@webapp.route('/restapi')
def restapi():
"""
#------------------------------------------------------------------------------
# /restapi - Display complete list of API EndPoints
#------------------------------------------------------------------------------
"""
logger.debug('[FLASKWEB /restapi] API Reference request')
if request.headers['Accept'] == 'application/json':
return redirect(url_for('staticFile', filename="rest.txt"))
else:
with open('static/rest.txt') as restapi:
rest = restapi.read()
return render_template('rest.html', api=rest)
@webapp.route('/log')
def getLog():
"""
#------------------------------------------------------------------------------
# /log - Displays current log file
#------------------------------------------------------------------------------
"""
with open(webapp.config['LOGFILE'], 'r') as logfile:
output = logfile.read()
if request.headers['Accept'] == 'application/json':
return output, 200
else:
return render_template("output.html", output=output)
@webapp.route('/yamlinstructions')
def getYamlInstructions():
"""
#------------------------------------------------------------------------------
# /yamlinstructions - Displays YAML Instructions
#------------------------------------------------------------------------------
"""
with open('role_file_template.yaml', 'r') as yamlfile:
output = yamlfile.read()
if request.headers['Accept'] == 'application/json':
return output, 200
else:
return render_template("output.html", output=output)
@webapp.route('/trace')
def trace():
"""
#------------------------------------------------------------------------------
# /trace - Debugging response. Returns the client's HTTP request data in json
#------------------------------------------------------------------------------
"""
logger.debug('[FLASKWEB /trace] Trace debug request')
output = {}
output['args'] = request.args
output['form'] = request.form
output['method'] = request.method
output['url'] = request.url
output['client_ip'] = request.remote_addr
output['headers'] = {k: str(v) for k,v in request.headers.items()}
return jsonify(output), 200
@webapp.route('/restart')
def restart():
"""
#------------------------------------------------------------------------------
# /restart - Restart the K3 Dispatch Service (kills/cleans all running tasks)
#------------------------------------------------------------------------------
"""
logging.warning ("[FLASKWEB] Shutting down K3 Dispatcher....")
shutdown_dispatcher()
return 'Dispatcher is restarting.....Give me a millisec'
@webapp.route('/kill')
def shutdown():
"""
#------------------------------------------------------------------------------
# /kill - Kill the server (TODO: Clean this up)
#------------------------------------------------------------------------------
"""
logging.warning ("[FLASKWEB] Shutting down Flask Web Server....")
shutdown_server()
logging.warning ("[FLASKWEB] Shutting down K3 Dispatcher....")
shutdown_dispatcher()
haltFlag.set()
return 'Server is going down...'
#===============================================================================
# Static Content Service
#===============================================================================
@webapp.route('/fs/<path:path>/')
def staticFile(path):
"""
#------------------------------------------------------------------------------
# /fs - File System Exposure for the local webroot folder
# Note: Direct file access via curl should include a trailing slash (/)
# Otherwise, you will get a 302 redirect to the actual file
#------------------------------------------------------------------------------
"""
logger.info('[FLASKWEB /fs] Static File Request for `%s`' % path)
local = os.path.join(webapp.config['DIR'], path)
if not os.path.exists(local):
return returnError("File not found: %s" % path, 404)
if os.path.isdir(local):
contents = sorted(os.listdir(local))
for i, f in enumerate(contents):
if os.path.isdir(f):
contents[i] += '/'
if request.headers['Accept'] == 'application/json':
return jsonify(dict(cwd=local, contents=contents)), 200
else:
return render_template('listing.html', cwd=path, listing=contents), 200
else:
if 'stdout' in local or 'output' in local or local.split('.')[-1] in ['txt', 'yaml', 'yml', 'json', 'log']:
with open(local, 'r') as file:
# output = unicode(file.read(), 'utf-8')
output = file.read()
if request.headers['Accept'] == 'application/json':
return output, 200
else:
return render_template("output.html", output=output)
return send_from_directory(webapp.config['DIR'], path)
#===============================================================================
# Application End Points
#===============================================================================
@webapp.route('/app', methods=['GET', 'POST'])
def uploadAppRedir():
"""
Redirect to uploadApp (/apps)
"""
logger.debug('[FLASKWEB /app] Redirect to /apps')
return uploadApp()
@webapp.route('/apps', methods=['GET', 'POST'])
def uploadApp():
"""
#------------------------------------------------------------------------------
# /apps, /app - Application Level interface
# POST Upload new application
# curl -i -X POST -H "Accept: application/json"
# -F file=@<filename> http://<host>:<port>/apps
#
# GET Display both list of loaded apps and form to upload new ones
#
# /app will redirect to /apps
#------------------------------------------------------------------------------
"""
if request.method == 'POST':
logger.debug("[FLASKWEB /apps] POST request to upload new application")
file = request.files['file']
if file:
name = secure_filename(file.filename)
path = os.path.join(webapp.config['UPLOADED_APPS_DEST'], name)
if not os.path.exists(path):
os.mkdir(path)
uid = getUID() if 'uid' not in request.form or not request.form['uid'] else request.form['uid']
path = os.path.join(path, uid)
if not os.path.exists(path):
os.mkdir(path)
fullpath = os.path.join(path, name)
file.save(fullpath)
jobdir = os.path.join(webapp.config['UPLOADED_JOBS_DEST'], name)
if not os.path.exists(jobdir):
os.mkdir(jobdir)
# hash = hashlib.md5(open(fullpath).read()).hexdigest()
hash = request.form['tag'] if 'tag' in request.form else ''
# if (db.checkHash(hash)):
db.insertApp(dict(uid=uid, name=name, hash=hash))
logger.info("[FLASKWEB] Added new application: `%s`, uid=`%s`", name, uid)
#TODO: Complete with paths for archive & app
if request.headers['Accept'] == 'application/json':
output = dict(name=name, uid=uid, status='SUCCESS', greeting='Thank You!')
return jsonify(output), 200
else:
return redirect(url_for('uploadApp'))
logger.debug('[FLASKWEB /apps] GET request for list of apps')
applist = db.getAllApps()
versions = {a['name']: db.getVersions(a['name'], limit=5) for a in applist}
# TODO: Add 2nd link on AppList: 1 to launch latest, 1 to show all versions
if request.headers['Accept'] == 'application/json':
return jsonify(dict(apps=applist)), 200
else:
return render_template('apps.html', applist=applist, versions=versions)
@webapp.route('/app/<appName>')
def getAppRedir(appName):
"""
Redirect to uploadApp (/apps/<appName>)
"""
logger.debug('[FLASKWEB /app/<appName>] Redirect to /apps/%s' % appName)
return getApp(appName)
@webapp.route('/apps/<appName>')
def getApp(appName):
"""
#------------------------------------------------------------------------------
# /apps/<appName> - Specific Application Level interface
# GET Display all versions for given application
#------------------------------------------------------------------------------
"""
logger.debug('[FLASKWEB /apps/<appName>] GET request for app, `%s`' % appName)
applist = [a['name'] for a in db.getAllApps()]
if appName in applist:
versionList = db.getVersions(appName)
if request.headers['Accept'] == 'application/json':
return jsonify(dict(name=appName, versions=versionList)), 200
else:
return render_template("apps.html", name=appName, versionList=versionList)
else:
return returnError("Application %s does not exist" % appName, 404)
@webapp.route('/app/<appName>/<appUID>', methods=['GET', 'POST'])
def archiveAppRedir(appName, appUID):
"""
Redirect to archiveApp
"""
logger.debug('[FLASKWEB /app/<appName>/<appUID>] Redirec to /apps/%s/%s'
% (appName, appUID))
return archiveApp(appName, appUID)
@webapp.route('/apps/<appName>/<appUID>', methods=['GET', 'POST'])
def archiveApp(appName, appUID):
"""
#------------------------------------------------------------------------------
# /apps/<appName>/<appUID> - Specific Application Level interface
# POST (Upload archive data (C++, Source, etc....)
# curl -i -X POST -H "Accept: application/json"
# -F "file=<filename>" http://qp1:5000/apps/<addName>/<addUID>
#
# GET (TODO) Display archived files... NotImplemented
#------------------------------------------------------------------------------
"""
logger.debug('[FLASKWEB /app/<appName>/<appUID>] %s Request for App Archive `%s`, UID=`%s`' % (request.method, appName, appUID))
applist = [a['name'] for a in db.getAllApps()]
uname = AppID.getAppId(appName, appUID)
# if appName not in applist:
# logger.warning("Archive request for app that does not exist: %s", appName)
# return returnError("Application %s does not exist" % appName, 404)
if request.method == 'POST':
file = request.files['file']
if file:
filename = secure_filename(file.filename)
path = os.path.join(webapp.config['UPLOADED_BUILD_DEST'], uname).encode(encoding='utf8', errors='ignore')
logger.debug("Archiving file, %s, to %s" % (filename, path))
if not os.path.exists(path):
os.mkdir(path)
file.save(os.path.join(path, filename))
return "File Uploaded & archived\n", 202
else:
logger.warning("Archive request, but no file provided.")
return "No file received\n", 400
elif request.method == 'GET':
path = os.path.join(webapp.config['UPLOADED_BUILD_URL'], uname)
return redirect(path, 302)
@webapp.route('/delete/app/<appName>', methods=['POST'])
def deleteApp(appName):
"""
#------------------------------------------------------------------------------
# /delete/app/<appName>
# POST Deletes an app from the web server
# NOTE: Data files will remain in webroot on the server, but
# the app will be inaccessible through the interface
# (metadata is removed from the internal db)
#------------------------------------------------------------------------------
"""
logger.debug('[FLASKWEB /delete/app/<appName>] Request to delete App `%s`', appName)
applist = [a['name'] for a in db.getAllApps()]
if appName not in applist:
return returnError("Application %s does not exist" % appName, 404)
logger.info("[FLASKWEB] DELETING all versions of app, `%s`")
db.deleteAllApps(appName)
if request.headers['Accept'] == 'application/json':
return jsonify(dict(app=appName, status='DELETED, files remain on server')), 200
else:
applist = db.getAllApps()
versions = {a['name']: db.getVersions(a['name'], limit=5) for a in applist}
return render_template('apps.html', applist=applist, versions=versions)
#===============================================================================
# Job End Points
#===============================================================================
@webapp.route('/job')
def listJobsRedir():
"""
Redirect to listJobs
"""
logger.debug('[FLASKWEB /job] Redirecting to /jobs')
return listJobs()
@webapp.route('/jobs')
def listJobs():
"""
#------------------------------------------------------------------------------
# /jobs - Current runtime & completed job Interface
# GET Display currently executing & recently completed jobs
#------------------------------------------------------------------------------
"""
logger.debug('[FLASKWEB /jobs] Request for job listing')
jobs = db.getJobs(numdays=2)
for job in jobs:
job['time'] = datetime.datetime.strptime(job['time'], db.TS_FMT).replace(tzinfo=db.timezone('UTC')).isoformat()
if job['complete']:
job['complete'] = datetime.datetime.strptime(job['complete'], db.TS_FMT).replace(tzinfo=db.timezone('UTC')).isoformat()
# Garbage Collect Orpahened jobs
compiles = db.getCompiles()
for compile in compiles:
if compile['submit']:
compile['submit'] = datetime.datetime.strptime(compile['submit'], db.TS_FMT).replace(tzinfo=db.timezone('UTC')).isoformat()
if compile['complete']:
compile['complete'] = datetime.datetime.strptime(compile['complete'], db.TS_FMT).replace(tzinfo=db.timezone('UTC')).isoformat()
# for c in compiles:
# if c['uid'] not in compile_tasks.keys():
# db.updateCompile(c['uid'], status='KILLED', done=True)
# compiles = db.getCompiles()
if request.headers['Accept'] == 'application/json':
return jsonify(dict(LaunchJobs=jobs, CompilingJobs=compiles)), 200
else:
return render_template("jobs.html", joblist=jobs, compilelist=compiles)
@webapp.route('/jobs/<appName>', methods=['GET', 'POST'])
def createJobLatest(appName):
"""
Redirect createJob using the latest uploaded application version
"""
logger.debug('[FLASKWEB /jobs/<appName>] Redirect to current version of /jobs/%s' % appName)
app = db.getApp(appName)
if app:
return createJob(appName, app['uid'])
else:
return returnError("Application %s does not exist" % appName, 404)
@webapp.route('/jobs/<appName>/<appUID>', methods=['GET', 'POST'])
def createJob(appName, appUID):
"""
#------------------------------------------------------------------------------
# /jobs/<appName>
# /jobs/<appName>/<appUID - Launch a new K3 Job
# POST Create new K3 Job
# curl -i -X POST -H "Accept: application/json"
# -F "file=@<rolefile>"
# -F logging=[True | False]
# -F jsonlog=[True | False]
# -F jsonfinal=[True | False]
# -F perf_profile=[True | False]
# -F perf_frequency=[n]
# -F cmd_prefix='str'
# -F cmd_infix='str'
# -F cmd_suffix='str'
# -F http://<host>:<port>/jobs/<appName>/<appUID>
# NOTE: if appUID is omitted, job will be submitted to latest version of this app
#
# GET Display job list for this application
#------------------------------------------------------------------------------
"""
logger.debug('[FLASKWEB /jobs/<appName>/<appUID>] Job Request for %s' % appName)
global dispatcher
applist = [a['name'] for a in db.getAllApps()]
if appName in applist:
if request.method == 'POST':
logger.debug("POST Request for a new job")
# TODO: Get user
file = request.files['file']
text = request.form['text'] if 'text' in request.form else None
k3logging = True if 'logging' in request.form else False
jsonlog = True if 'jsonlog' in request.form else False
jsonfinal = True if 'jsonfinal' in request.form else False
perf_profile = True if 'perf_profile' in request.form else False
perf_frequency = request.form['perf_frequency'] if 'perf_frequency' in request.form else ''
cmd_prefix = request.form['cmd_prefix'] if 'cmd_prefix' in request.form else None
cmd_infix = request.form['cmd_infix'] if 'cmd_infix' in request.form else None
cmd_suffix = request.form['cmd_suffix'] if 'cmd_suffix' in request.form else None
core_dump = True if 'core_dump' in request.form else False
stdout = request.form['stdout'] if 'stdout' in request.form else False
user = request.form['user'] if 'user' in request.form else 'anonymous'
tag = request.form['tag'] if 'tag' in request.form else ''
# User handling: jsonfinal is a qualifier flag for the json logging flag
if jsonfinal and not jsonlog:
jsonlog = True
logger.debug("K3 LOGGING is : %s" % ("ON" if k3logging else "OFF"))
logger.debug("JSON LOGGING is : %s" % ("ON" if jsonlog else "OFF"))
logger.debug("JSON FINAL LOGGING is : %s" % ("ON" if jsonfinal else "OFF"))
logger.debug("PERF PROFILING is : %s" % ("ON" if perf_profile else "OFF"))
logger.debug("CORE DUMPS are : %s" % ("ON" if core_dump else "OFF"))
# trials = int(request.form['trials']) if 'trials' in request.form else 1
# Check for valid submission
if not file and not text:
logger.error('Error. Cannot create job: No input file and no YAML for enviornment configuration provided. ')
return render_template("error.html", code=404, message="Invalid job request")
# Post new job request, get job ID & submit time
thisjob = dict(appName=appName, appUID=appUID, user=user, tag=tag)
jobId, time = db.insertJob(thisjob)
thisjob = dict(jobId=jobId, time=time)
# Save yaml to file (either from file or text input)
path = os.path.join(webapp.config['UPLOADED_JOBS_DEST'], appName)
if not os.path.exists(path):
os.mkdir(path)
path = os.path.join(path, str(jobId))
filename = 'role.yaml'
if not os.path.exists(path):
os.mkdir(path)
if file:
file.save(os.path.join(path, filename))
else:
with open(os.path.join(path, filename), 'w') as file:
file.write(text)
# Create new Mesos K3 Job
apploc = webapp.config['ADDR']+os.path.join(webapp.config['UPLOADED_APPS_URL'], appName, appUID, appName)
newJob = Job(binary=apploc, appName=appName, jobId=jobId,
rolefile=os.path.join(path, filename), logging=k3logging,
jsonlog=jsonlog, jsonfinal=jsonfinal,
perf_profile=perf_profile, perf_frequency=perf_frequency, core_dump=core_dump,
cmd_prefix=cmd_prefix, cmd_infix=cmd_infix, cmd_suffix=cmd_suffix)
# Submit to Mesos
dispatcher.submit(newJob)
thisjob = dict(thisjob, url=dispatcher.getSandboxURL(jobId), status='SUBMITTED')
if 'application/json' in request.headers['Accept']:
return jsonify(thisjob), 202
else:
return render_template('last.html', appName=appName, lastjob=thisjob)
elif request.method == 'GET':
jobs = db.getJobs(appName=appName)
if 'application/json' in request.headers['Accept']:
return jsonify(dict(jobs=jobs))
else:
preload = request.args['preload'] if 'preload' in request.args else 'Sample'
logger.debug("YAML file preload = %s" % preload)
if preload == 'Instructions':
yamlFile = 'role_file_template.yaml'
elif preload == 'Last':
lastJobId = max([ d['jobId'] for d in jobs])
path = os.path.join(webapp.config['UPLOADED_JOBS_DEST'], appName, "%d" % lastJobId, 'role.yaml')
logger.debug(" YAML PATH= %s" % path)
if os.path.exists(path):
yamlFile = path
else:
yamlFile = None
else:
yamlFile = 'sample.yaml'
if yamlFile:
with open(yamlFile, 'r') as f:
sample = f.read()
else:
sample = "(No YAML file to display)"
return render_template("newjob.html", name=appName, uid=appUID, sample=sample)
else:
return returnError("There is no application, %s" % appName, 404)
@webapp.route('/job/<jobId>')
def getJobRedir(jobId):
"""
Redirect to getJob
"""
jobs = db.getJobs(jobId=jobId)
job = None if len(jobs) == 0 else jobs[0]
if job == None:
return returnError("Job ID, %s, does not exist" % jobId, 404)
appName = job['appName']
return getJob(appName, jobId)
@webapp.route('/jobs/<appName>/<jobId>/status')
def getJob(appName, jobId):
"""
#------------------------------------------------------------------------------
# /jobs/<appName>/<jobId>/status - Detailed Job info
# GET Display detailed job info (default for all methods)
#------------------------------------------------------------------------------
"""
jobs = db.getJobs(jobId=jobId)
job = None if len(jobs) == 0 else jobs[0]
k3job = dispatcher.getJob(int(jobId))
if job == None:
return returnError("Job ID, %s, does not exist" % jobId, 404)
thisjob = dict(job, url=dispatcher.getSandboxURL(jobId))
if k3job != None:
thisjob['master'] = k3job.master
local = os.path.join(webapp.config['UPLOADED_JOBS_DEST'], appName, str(jobId)).encode(encoding='utf8', errors='ignore')
path = os.path.join(webapp.config['UPLOADED_JOBS_DEST'], appName, str(jobId),'role.yaml').encode(encoding='utf8', errors='ignore')
if os.path.exists(local) and os.path.exists(path):
with open(path, 'r') as role:
thisjob['roles'] = role.read()
else:
return returnError("Job Data no longer exists", 400)
thisjob['sandbox'] = sorted (os.listdir(local))
if 'application/json' in request.headers['Accept']:
return jsonify(thisjob)
else:
return render_template("last.html", appName=appName, lastjob=thisjob)
@webapp.route('/job/<jobId>/replay')
def replayJobRedir(jobId):
"""
Redirect to replayJob
"""
jobs = db.getJobs(jobId=jobId)
job = None if len(jobs) == 0 else jobs[0]
if job == None:
return returnError("Job ID, %s, does not exist" % jobId, 404)
appName = job['appName']
logging.info ("[FLASKWEB] REPLAYING JOB # %s" % jobId)
return replayJob(appName, jobId)
@webapp.route('/jobs/<appName>/<jobId>/replay', methods=['GET', 'POST'])
def replayJob(appName, jobId):
"""
#------------------------------------------------------------------------------
# /jobs/<appName>/<appUID/replay - Replay a previous K3 Job
# POST Create new K3 Job
# curl -i -X POST -H "Accept: application/json" http://<host>:<port>/jobs/<appName>/<appUID>/replay
#------------------------------------------------------------------------------
"""
global dispatcher
joblist = db.getJobs(jobId=jobId)
oldjob = None if len(joblist) == 0 else joblist[0]
if oldjob:
logger.info("[FLASKWEB] REPLAYING %s" % jobId),
# Post new job request, get job ID & submit time
thisjob = dict(appName=oldjob['appName'],
appUID=oldjob['hash'],
user=oldjob['user'],
tag='REPLAY: %s' % oldjob['tag'])
new_jobId, time = db.insertJob(thisjob)
thisjob = dict(jobId=new_jobId, time=time)
logging.info ("[FLASKWEB] new Replay JOBID: %s" % new_jobId),
# Save yaml to file (either from file or text input)
role_src = os.path.join(webapp.config['UPLOADED_JOBS_DEST'], appName, str(jobId), 'role.yaml').encode('utf8', 'ignore')
path = os.path.join(webapp.config['UPLOADED_JOBS_DEST'], appName, str(new_jobId)).encode('utf8', 'ignore')
os.mkdir(path)
role_copy = os.path.join(path, 'role.yaml')
shutil.copyfile(role_src, os.path.join(path, role_copy))
# Create new Mesos K3 Job
try:
newJob = Job(binary=webapp.config['ADDR']+os.path.join(webapp.config['UPLOADED_APPS_URL'], appName, oldjob['hash'], appName),
appName=appName, jobId=new_jobId, rolefile=role_copy)
except K3JobError as err:
db.deleteJob(jobId)
logger.error("JOB ERROR: %s" % err)
return returnError(err.value, 400)
logging.info ("[FLASKWEB] NEW JOB ID: %s" % newJob.jobId)
# Submit to Mesos
dispatcher.submit(newJob)
thisjob = dict(thisjob, url=dispatcher.getSandboxURL(new_jobId), status='SUBMITTED')
if 'application/json' in request.headers['Accept']:
return jsonify(thisjob), 202
else:
return render_template('last.html', appName=appName, lastjob=thisjob)
else:
return returnError("There is no Job, %s\n" % jobId, 404)
@webapp.route('/job/<jobId>/archive', methods=['GET', 'POST'])
def archiveJobRedir(jobId):
"""
Redirect to archiveJob
"""
jobs = db.getJobs(jobId=jobId)
job = None if len(jobs) == 0 else jobs[0]
appName = job['appName']
if job == None:
return returnError("Job ID, %s, does not exist" % jobId, 404)
return archiveJob(appName, jobId)
@webapp.route('/jobs/<appName>/<jobId>/archive', methods=['GET', 'POST'])
def archiveJob(appName, jobId):
""""
#------------------------------------------------------------------------------
# /jobs/<appName>/<jobId>/archive - Endpoint to receive & archive files
# GET returns curl command
# POST Accept files for archiving here
# curl -i -X POST -H "Accept: application/json"
# -F file=@<filename> http://<host>:<post>/<appName>/<jobId>/archive
#------------------------------------------------------------------------------
"""
job_id = str(jobId).encode('utf8', 'ignore')
if job_id.find('.') > 0:
job_id = job_id.split('.')[0]
jobs = db.getJobs(jobId=job_id)
job = None if len(jobs) == 0 else jobs[0]
if job == None:
return returnError ("Job ID, %s, does not exist" % job_id, 404)
if request.method == 'POST':
file = request.files['file']
if file:
filename = secure_filename(file.filename)
path = os.path.join(webapp.config['UPLOADED_JOBS_DEST'], appName, job_id, filename).encode(encoding='utf8', errors='ignore')
file.save(path)
return "File Uploaded & archived", 202
else:
return "No file received", 400
elif request.method == 'GET':
return '''
Upload your file using the following CURL command:\n\n
curl -i -X POST -H "Accept: application/json" -F file=@<filename> http://<server>:<port>/<appName>/<jobId>/archive
''', 200
@webapp.route('/job/<jobId>/kill', methods=['GET'])
def killRedir(jobId):
"""
Redirect to killJob
"""
jobs = db.getJobs(jobId=jobId)
job = None if len(jobs) == 0 else jobs[0]
appName = job['appName']
if job == None:
return returnError("Job ID, %s, does not exist" % jobId, 404)
return killJob(appName, jobId)
@webapp.route('/jobs/<appName>/<jobId>/kill', methods=['GET'])
def killJob(appName, jobId):
"""
#------------------------------------------------------------------------------
# /jobs/<appName>/<jobId>/kill - Job Interface to cancel a job
# GET Kills a Job (if orphaned, updates status to killed)
# curl -i -H "Accept: application/json" http://qp1:5000/jobs/<appName>/<jobId>/kill
#------------------------------------------------------------------------------
"""
jobs = db.getJobs(jobId=jobId)
job = None if len(jobs) == 0 else jobs[0]
if job == None:
return returnError ("Job ID, %s, does not exist" % jobId, 404)
logging.info ("[FLASKWEB] Asked to KILL job #%s. Current Job status is %s" % (jobId, job['status']))
# Separate check to kill orphaned jobs in Db
# TODO: Merge Job with experiments to post updates to correct table
if job['status'] == 'RUNNING' or job['status'] == 'SUBMITTED':
db.updateJob(jobId, status='KILLED')
if int(jobId) in dispatcher.getActiveJobs():
status = 'KILLED'
logging.debug('[FLASKWEB] Job %s is active. Signaling to kill in mesos.' % jobId)
dispatcher.cancelJob(int(jobId), driverDispatch)
else:
status = 'ORPHANED and CLEANED'
logging.debug('[FLASKWEB] Job # %s is ORPHANED and does not exist in current state. Cleaning up.' % jobId)
ts = db.getTS_est() #datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
thisjob = dict(jobId=jobId, time=ts, url=dispatcher.getSandboxURL(jobId), status=status)
if 'application/json' in request.headers['Accept']:
return jsonify(thisjob)
else:
return render_template("last.html", appName=appName, lastjob=thisjob)
@webapp.route('/delete/jobs', methods=['POST'])
def deleteJobs():
"""
#------------------------------------------------------------------------------
# /delete/jobs
# POST Deletes list of K3 jobs
#------------------------------------------------------------------------------
"""
deleteList = request.form.getlist("delete_job")
for jobId in deleteList:
job = db.getJobs(jobId=jobId)[0]
path = os.path.join(webapp.config['UPLOADED_JOBS_DEST'], job['appName'], jobId)
shutil.rmtree(path, ignore_errors=True)
db.deleteJob(jobId)
return redirect(url_for('listJobs')), 302
#------------------------------------------------------------------------------
# /jobs/<appName>/<jobId>/stdout - Job Interface for specific job
# GET TODO: Consolidate STDOUT for current job (from all tasks)
# POST TODO: Accept STDOUT & append here (if desired....)
#------------------------------------------------------------------------------
# @webapp.route('/jobs/<appName>/<jobId>/stdout', methods=['GET'])
# def stdout(appName, jobId):
# jobs = db.getJobs(appName=appName)
# link = resolve(MASTER)
# print link
# sandbox = dispatcher.getSandboxURL(jobId)
# if sandbox:if
# print sandbox
# return '<a href="%s">%s</a>' % (sandbox, sandbox)
# else:
# return 'test'
#===============================================================================
# Compile End Points
#===============================================================================
@webapp.route('/compileservice')
def compService():
"""
#------------------------------------------------------------------------------
# /compileservice
# GET Return compile service status
#------------------------------------------------------------------------------
"""
return jsonify(compileService.getItems())
@webapp.route('/compileservice/check')
def compServiceCheck():
"""
#------------------------------------------------------------------------------
# /compileservice/check
# GET Quick check for status
#------------------------------------------------------------------------------
"""
# global compileService
return compileService.state.name
@webapp.route('/compileservice/up', methods=['GET', 'POST'])
def compServiceUp():
"""
#------------------------------------------------------------------------------
# /compileservice/up
# POST Starts compile service
# curl -i -X POST -H "Accept: application/json"
# -F numworkers=<numworkers>
# -F gitpull=[True|False]
# -F branch=<k3_branch>
# -F cabalbuild=[True|False]
# -F m_workerthread=<#master_service_threads>
# -F w_workerthread=<#worker_service_threads>
# -F heartbeat=<heartbeat_interval_in_secs>
# http://<host>:<port>/compile
#
# Default vals: numworkers=(max workers), gitpull=True,
# branch=development, cabalbuild=False,
# m_workerthread=1, w_workerthread=1,
# heartbeat=300, cppthread=12
#
# GET Redirect to Compile Form (html) or compile service setting (json)
#------------------------------------------------------------------------------
"""
global compileService
if request.method == 'POST' and compileService.isDown():
settings = dict(webaddr=webapp.config['ADDR'])
settings['branch'] = request.form.get('branch', 'development')
gitpull = request.form.get('gitpull', True)
settings['gitpull'] = gitpull if isinstance(gitpull, bool) else (gitpull.upper() == 'TRUE')
cabalbuild = request.form.get('cabalbuild', False)
settings['cabalbuild'] = cabalbuild if isinstance(cabalbuild, bool) else (cabalbuild.upper() == 'TRUE')
if 'm_workerthread' in request.form:
settings['m_workerthread'] = request.form['m_workerthread']
if 'w_workerthread' in request.form:
settings['w_workerthread'] = request.form['w_workerthread']
if 'heartbeat' in request.form:
settings['heartbeat'] = request.form['heartbeat']
# if 'cppthread' in request.form:
# settings['cppthread'] = request.form['cppthread']
logger.debug ("[FLASKWEB] User GIT PULL Request (?): %s" % str(settings['gitpull']))
logger.debug ("[FLASKWEB] User CABAL BUILD Request (?): %s" % str(settings['cabalbuild']))
# TODO: Set Worker Nodes here (dynamic allocation of workers)
# if 'numworkers' not in request.form or request.form['numworkers'] == '':
# settings['numworkers'] = len(workerNodes)
# else:
# settings['numworkers'] = int(request.form['numworkers'])
nodelist = {n : [] for n in ['master', 'worker', 'client', 'none']}
for host in compileService.getAllNodes()['all']:
# if host in request.form:
# logger.debug(" -->Recevied host val as: " + request.form[host])
# nodelist[request.form[host].lower()].append(host)
# logger.debug(' %s added as %s ' % (host, request.form[host].lower()))
# compileService.setNodes(nodelist)
test = request.form.getlist("role_" + host)
logger.debug("Compiler Host Requested: %s", str(test))
for role in request.form.getlist("role_" + host):
logger.debug(" Compiler: adding `%s` as `%s`", host, role)
nodelist[role.lower()].append(host)
compileService.setNodes(nodelist)
compileService.update(settings)
compileService.goUp()
if request.headers['Accept'] == 'application/json':
return jsonify(compileService.getItems()), 200
else:
return render_template("compile.html", status=compileService.state.name, hostlist=compileService.getAllNodes())
@webapp.route('/compileservice/stop')
def compServiceStop():
"""
#------------------------------------------------------------------------------
# /compileservice/stop
# GET Stop compile service Immediately
#
# NOTE: Be careful, it kills all jobs (There is no confirmation check)
#------------------------------------------------------------------------------
"""
global compileService
compileService.goDown()
if request.headers['Accept'] == 'application/json':
return jsonify(compileService.getItems()), 200
else:
return render_template("compile.html", status=compileService.state.name, hostlist=compileService.getAllNodes())
@webapp.route('/compileservice/down')
def compServiceDown():
"""
#------------------------------------------------------------------------------
# /compileservice/down
# GET Shuts down compile service gracefully
#
# NOTE: Clears all pendings tasks
#------------------------------------------------------------------------------
"""
global compileService
compileService.goDownGracefully()
if request.headers['Accept'] == 'application/json':
return jsonify(compileService.getItems()), 200
else:
return render_template("compile.html", status=compileService.state.name, hostlist=compileService.getAllNodes())
#===============================================================================
# Compile End Points
#===============================================================================
@webapp.route('/compile', methods=['GET', 'POST'])
def compile():
global lastCompile
"""
#------------------------------------------------------------------------------
# /compile
# POST Submit new K3 Compile task
# curl -i -X POST -H "Accept: application/json"
# -F name=<appName>
# -F file=@<sourceFile>
# -F blocksize=<blocksize>
# -F compilestage=['both'|'cpp'|'bin']
# -F compileargs=<compile_args>
# -F mem=<mem_in_GB>
# -F cpu=<#cores>
# -F workload=['balanced'|'moderate'|'moderate2'|'extreme']
# -F user=<userName> http://<host>:<port>/compile
#
# NOTE: -user & compileargs are optional.
# -If name is omitted, it is inferred from filename
# Default vals: blocksize=8, compilestage='both', workload='balanced'
#
# GET Form for compiling new K3 Executable OR status of compiling tasks
#------------------------------------------------------------------------------
"""
if webapp.config['COMPILE_OFF']:
logger.warning("Compilation requested, but denied (Feature not turned on)")
return returnError("Compilation Features are not available", 400)
logging.debug("[FLASKWEB /compile] REQUEST ")
if request.method == 'POST':
if not compileService.isUp():
logger.warning("Compilation requested, but denied (Compiler Service not ready)")
return returnError("Compiler Service is not running. Ensure it is up before initiating a compilation task.", 400)
if compileService.gracefulHalt:
logger.warning("Compilation requested, but denied (Compiler Service is flagged to shut down, gracefully)")
return returnError("Sorry. Your compilation request is denied because the Compiler Service is flagged to shut down, gracefully.", 400)
file = request.files['file']
text = request.form.get('text', None)
name = request.form.get('name', None)
# Create a unique ID
uid = getUID()
# Set default settings for a Compile Job
# TODO: Update Settings
settings = compileService.getItems()
# update settings & error check where necessary
settings['compileargs'] = request.form.get('compileargs', settings['compileargs'])
settings['user'] = request.form.get('user', settings['user'])
settings['tag'] = request.form.get('tag', settings['tag'])
workload = request.form.get('workload', '').lower()
if settings['compileargs'] == '':
settings['compileargs'] = workloadOptions[workload]
mem = int(request.form['mem']) if 'mem' in request.form else None
cpu = int(request.form['cpu']) if 'cpu' in request.form else None
stage = request.form.get('compilestage', settings['compilestage'])
if stage not in ['both', 'cpp', 'bin']:
return returnError("Invalid Input on key `compilestage`. Valid entries are ['both', 'cpp', 'bin']", 400)
else:
settings['compilestage'] = stage #getCompileStage(stage).value
blocksize = request.form.get('blocksize', settings['blocksize'])
if isinstance(blocksize, int):
settings['blocksize'] = blocksize
elif blocksize.isdigit():
settings['blocksize'] = int(blocksize)
# Determine application name (for pass-thru naming)
if not name:
if file:
srcfile = secure_filename(file.filename)
name = srcfile.split('.')[0]
else:
return returnError("No name provided for K3 program", 400)
app = AppID(name, uid)
uname = '%s-%s' % (name, uid)
path = os.path.join(webapp.config['UPLOADED_BUILD_DEST'], uname).encode(encoding='utf8', errors='ignore')
# Save K3 source to file (either from file or text input)
settings['source'] = ('%s.k3' % name)
settings['webaddr'] = webapp.config['ADDR']
if not os.path.exists(path):
os.mkdir(path)
if file:
file.save(os.path.join(path, settings['source']))
else:
file = open(os.path.join(path, settings['source']).encode(encoding='utf8', errors='ignore'), 'w')
file.write(text)
file.close()
# Create Symlink for easy access to latest compiled task
link = os.path.join(webapp.config['UPLOADED_BUILD_DEST'], name).encode(encoding='utf8', errors='ignore')
if os.path.exists(link):
os.remove(link)
os.symlink(uname, link)
url = os.path.join(webapp.config['UPLOADED_BUILD_URL'], uname).encode(encoding='utf8', errors='ignore')
job = CompileJob(name, uid, path, settings, mem=mem, cpu=cpu)
lastCompile = job.getItems()
logger.info("[FLASKWEB] Submitting Compilation job for " + name)
compileService.submit(job)
# Prepare results to send back to the user
outputurl = "/compile/%s" % uname
cppsrc = os.path.join(webapp.config['UPLOADED_BUILD_URL'], uname, settings['source'])
thiscompile = dict(lastCompile, url=dispatcher.getSandboxURL(uname),
status='SUBMITTED', outputurl=outputurl, cppsrc=cppsrc,uname=uname)
# Return feedback to user
if request.headers['Accept'] == 'application/json':
return jsonify(thiscompile), 200
else:
return render_template("last.html", appName=name, lastcompile=thiscompile, status=compileService.state.name)
else:
# TODO: Return list of Active/Completed Compiling Tasks
if request.headers['Accept'] == 'application/json':
return ('TO SUBMIT A NEW COMPILATION:\n\n\tcurl -i -X POST -H "Accept: application/json" \
-F name=<appName> -F file=@<sourceFile> -F blocksize=<blocksize> -F compilestage=<compilestage> \
http://<host>:<port>/compile' if compileService.isUp()
else 'START THE SERVICE: curl -i -H "Accept: application/json" http://<host>:<port>/compileservice/up')
else:
hostlist = dict(master=compileService.masterNodes, worker=compileService.workerNodes, client=compileService.clientNodes)
return render_template("compile.html", status=compileService.state.name, hostlist=compileService.getAllNodes())
def getCompilerOutput(uname):
"""
Retrieves the compiler output from local file
"""
fname = os.path.join(webapp.config['UPLOADED_BUILD_DEST'], uname, 'output').encode('utf8')
if os.path.exists(fname):
stdout_file = open(fname, 'r')
output = unicode(stdout_file.read(), 'utf-8')
stdout_file.close()
return output
else:
return returnError("Output not available for " + uname, 404)
@webapp.route('/compile/<uid>', methods=['GET'])
def getCompile(uid):
"""
#------------------------------------------------------------------------------
# /compile/<uid>
# GET displays STDOUT & STDERR consolidated output for compile task
#------------------------------------------------------------------------------
"""
if webapp.config['COMPILE_OFF']:
return returnError("Compilation Features are not available", 400)
logger.debug("[FLASKWEB] Retrieving last compilation status")
result = db.getCompiles(uid=uid)
if len(result) == 0:
result = db.getCompiles(uid=AppID.getUID(uid))
if len(result) == 0:
return returnError("No output found for compilation, %s\n\n" % uid, 400)
else:
output = result[0]
output['uname'] = AppID.getAppId(output['name'], output['uid'])
local = os.path.join(webapp.config['UPLOADED_BUILD_DEST'], output['uname'])
output['sandbox'] = sorted (os.listdir(local))
if request.headers['Accept'] == 'application/json':
return jsonify(output), 200
else:
return render_template("last.html", lastcompile=output)
@webapp.route('/compilestatus')
def getCompileStatus():
"""
#------------------------------------------------------------------------------
# /compilestatus - Short list of active jobs & current statuses
#------------------------------------------------------------------------------
"""
logger.debug("[FLASKWEB] Retrieving current active compilation status")
jobs = compileService.getActiveState()
title = "Active Compiling Tasks" if jobs else "NO Active Compiling Jobs"
if request.headers['Accept'] == 'application/json':
return jsonify(jobs), 200
else:
return render_template("keyvalue.html", title=title, store=jobs)
@webapp.route('/compilelog')
def getCompileLog():
"""
#------------------------------------------------------------------------------
# /compilelog - Connects User to compile log websocket
#------------------------------------------------------------------------------
"""
if webapp.config['COMPILE_OFF']:
return returnError("Compilation Features are not available", 400)
logger.debug("[FLASKWEB] Connecting user to Compile Log WebSocket")
with open(webapp.config['COMPILELOG'], 'r') as logfile:
output = logfile.read().split('<<<<< Compiler Service Initiated >>>>>')[-1]
if request.headers['Accept'] == 'application/json':
return output, 200
else:
return render_template("socket.html", namespace='/compile', prefetch=output)
@webapp.route('/compile/<uid>/kill', methods=['GET'])
def killCompile(uid):
"""
#------------------------------------------------------------------------------
# /compile/<uid>/kill
# GET Kills an active compiling tasks (or removes an orphaned one from DB)
#------------------------------------------------------------------------------
"""
if webapp.config['COMPILE_OFF']:
return returnError("Compilation Features are not available", 400)
complist = db.getCompiles(uid=uid)
if len(complist) == 0:
complist = db.getCompiles(uid=AppID.getUID(uid))
if len(complist) == 0:
return returnError("Not currently tracking the compile task %s" % uid, 400)
else:
c = complist[0]
logging.info ("[FLASKWEB] Asked to KILL Compile UID #%s. Current status is %s" % (c['uid'], c['status']))
if c['status'] not in compileTerminatedStates:
logging.info ("[FLASKWEB] KILLING Compile UID #%s. " % (c['uid']))
c['status'] = CompileState.KILLED.name
db.updateCompile(c['uid'], status=c['status'], done=True)
svid = AppID.getAppId(c['name'], c['uid'])
compileService.killJob(svid)
if request.headers['Accept'] == 'application/json':
return jsonify(c), 200
else:
return redirect(url_for('listJobs')), 302
@webapp.route('/delete/compiles', methods=['POST'])
def deleteCompiles():
"""
#------------------------------------------------------------------------------
# /delete/compiles
# POST Deletes list of inactive compile jobs
#------------------------------------------------------------------------------
"""
if webapp.config['COMPILE_OFF']:
return returnError("Compilation Features are not available", 400)
deleteList = request.form.getlist("delete_compile")
for uid in deleteList:
logger.info("[FLASKWEB /delete/compiles] DELETING compile job uid=" + uid)
job = db.getCompiles(uid=uid)[0]
db.deleteCompile(job['uid'])
return redirect(url_for('listJobs')), 302
@socketio.on('connect', namespace='/compile')
def test_connect():
logger.info('[FLASKWEB] Client is connected to /connect stream')
emit('my response', 'Connected to Compile Log Stream')
@socketio.on('message', namespace='/log')
def test_message(message):
emit('my response', "Hello User!")
if __name__ == '__main__':
# Parse Args
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--port', help='Flaskweb Server Port', default=5000, required=False)
parser.add_argument('-m', '--master', help='URL for the Mesos Master (e.g. zk://localhost:2181/mesos', default='zk://localhost:2181/mesos', required=False)
parser.add_argument('-d', '--dir', help='Local directory for hosting application and output files', default='/k3/web/', required=False)
parser.add_argument('-c', '--compile', help='Enable Compilation Features (NOTE: will require a capable image)', action='store_true', required=False)
parser.add_argument('--wipedb', help='Wipe the Database clean before running', action='store_true', required=False)
parser.add_argument('--ip', help='Public accessible IP for connecting to flask', required=False)
args = parser.parse_args()
# TODO: Move to a Common module
#
# console.setFormatter(formatter)
# log.addHandler(console)
# logging.basicConfig(format='[%(asctime)s %(levelname)-5s %(name)s] %(message)s', level=logging.DEBUG, datefmt='%H:%M:%S')
logger.info("K3 Flask Web Service is initiating.....")
# Program Initialization
webapp.debug = True
if args.wipedb:
logger.info("Wiping database and exiting")
db.dropTables()
sys.exit(0)
db.createTables()
master = args.master
port = int(args.port)
host = socket.gethostname() if not args.ip else args.ip,
initWeb(
host=host,
port=port,
master=master,
local=args.dir,
compile=args.compile
)
# Create long running framework, dispatcher, & driver
frameworkDispatch = mesos_pb2.FrameworkInfo()
frameworkDispatch.user = "" # Have Mesos fill in the current user.
frameworkDispatch.name = "[DEV] Dispatcher"
# Note: Each compile job runs as as a separate framework
frameworkCompiler = mesos_pb2.FrameworkInfo()
frameworkCompiler.user = ""
frameworkCompiler.name = "[DEV] Compiler"
# Start mesos schedulers & flask web service
try:
# Create Job Dispatcher
logging.debug("[FLASKWEB] Dispatch Driver is initializing")
dispatcher = Dispatcher(master, webapp.config['ADDR'], daemon=True)
if dispatcher == None:
logger.error("[FLASKWEB] Failed to create dispatcher. Aborting")
sys.exit(1)
driverDispatch = mesos.native.MesosSchedulerDriver(dispatcher, frameworkDispatch, master)
threadDispatch = threading.Thread(target=driverDispatch.run)
threadDispatch.start()
# Create Compiler Service
logging.debug("[FLASKWEB] Compiler Service is initializing")
compileService = CompileServiceManager(webapp.config['LOG_DEST'], webapp.config['ADDR'])
if compileService == None:
logger.error("[FLASKWEB] Failed to create compiler service. Aborting")
sys.exit(1)
driverCompiler = mesos.native.MesosSchedulerDriver(compileService, frameworkCompiler, master)
threadCompiler = threading.Thread(target=driverCompiler.run)
threadCompiler.start()
logger.info("[FLASKWEB] Starting FlaskWeb Server on IP %s", host[0])
socketio.run(webapp, host=host[0], port=port, use_reloader=False)
webserverTerminate.clear()
initSocketIO = False
# Block until flagged to halt
haltFlag.wait()
compileService.kill()
logger.info("[FLASKWEB] Server is terminating")
driverDispatch.stop()
threadDispatch.join()
logging.debug("[FLASKWEB] Driver thread complete")
driverCompiler.stop()
threadCompiler.join()
logging.debug("[FLASKWEB] Compiler thread complete")
except socket.error as e:
logger.error("[FLASKWEB] Flask web cannot start: Port not available.")
logger.error('ERROR: %s', str(e))
compileService.kill()
driverDispatch.stop()
threadDispatch.join()
driverCompiler.stop()
threadCompiler.join()
except KeyboardInterrupt:
logger.warning("[FLASKWEB] KEYBOARD INTERRUPT -- Shutting Down")
compileService.kill()
driverDispatch.stop()
threadDispatch.join()
driverCompiler.stop()
threadCompiler.join()
|
13,364 | 98d60104d98fcf64d319e9bc086866add13c53d9 | import tensorflow as tf
import tensorflow.contrib.layers as tcl
# subpixel CNN layer (proposed in subpixel: A subpixel convolutional neural netowrk)
def PhaseShift(x, r): # x: input tensor, r: magnification value
bsize, h, w, ch = x.shape.as_list()
assert ch%(r**2) == 0, 'input channel should be multiplies of r^2'
x = tf.reshape(x, (bsize, h, w, r, r, -1))
x = tf.transpose(x, (0, 1, 2, 4, 3, 5)) # shape(bsize, h, w, r, r, new_ch)
x = tf.split(x, h, axis = 1) # len(x):h, each shape(bsize, 1, w, r, r, new_ch)
x = tf.concat([tf.squeeze(x_) for x_ in x], axis = 2) # shape(bsize, w, h*r, r, new_ch)
x = tf.split(x, w, axis = 1) # len(x):w, each shape(bsize, 1, h*r, r, new_ch)
x = tf.concat([tf.squeeze(x_) for x_ in x], axis = 2) # shape(bsize, h*r, w*r, new_ch)
return x
def PhaseShift_withConv(x, r, filters, kernel_size = (3, 3), stride = (1, 1)):
# output shape(batch, r*x_h, r*x_w, filters)
x = tcl.conv2d(x,
num_outputs = filters*r**2,
kernel_size = kernel_size,
stride = stride,
padding = 'SAME')
x = PhaseShift(x, r)
return x
|
13,365 | 4cc6420cfe25c656663578b27e464bc5da58c106 | import boto3
import botocore.exceptions
import json
import re
import datetime
from decimal import Decimal
from boto3.dynamodb.conditions import Key
def exception(e):
# Response for errors
status_code = 400
return {
'statusCode': status_code,
'body': json.dumps({'errorMessage' : str(e)})
}
def response(data):
# Response for success
return {
'statusCode': 200,
'body': json.dumps(data)
}
def logError(**kwargs):
errorMessage = {}
for key, value in kwargs.items():
if value is not None:
errorMessage[key] = value
# Publish to SNS
snsClient.publish(TargetArn=SNS_ARN,
Message=json.dumps({'default': json.dumps(errorMessage)}),
MessageStructure='json')
dynamodb = boto3.resource('dynamodb')
personTable = dynamodb.Table('Person')
snsClient = boto3.client('sns')
SNS_ARN = "arn:aws:sns:us-east-1:819527464446:NYA_Errors"
# Update Person record
def lambda_handler(event, context):
# Extract Query parameters & Validate
if 'queryStringParameters' not in event:
return exception('No query parameters in event - check API Gateway configuration')
queryParams = event["queryStringParameters"]
if not 'id' in queryParams:
return exception('Invalid Parameters. Person ID not specified')
if not 'body' in event:
return exception('Invalid Data. No person data specified in body')
try:
personQuery = personTable.get_item(Key={'id': queryParams['id']})
# Ensure Person record exists
if 'Item' not in personQuery:
return exception('No person record found: ' + queryParams['id'])
personRecord = personQuery['Item']
updatedPersonRecord = json.loads(event['body'])
# Dump body for audit purposes
print('Data for person: ' + queryParams['id'])
print(updatedPersonRecord)
# Update Person Record
updateExpression = "SET updatedOn = :u"
expressionAttributeValues = {':u': datetime.datetime.now().isoformat()}
expressionAttributeNames = {}
# Basic Data
# If a person Name has been specified, deconstruct givenName and familyName
if 'personName' in updatedPersonRecord.keys():
nameArray = updatedPersonRecord["personName"].split(' ')
if len(nameArray) == 2:
updateExpression+=", givenName= :gn"
expressionAttributeValues[":gn"]=nameArray[0]
updateExpression+=", familyName= :fn"
expressionAttributeValues[":fn"]=nameArray[1]
elif len(nameArray) == 1:
updateExpression+=", familyName= :fn"
expressionAttributeValues[":fn"]=nameArray[0]
elif len(nameArray) > 2:
updateExpression+=", familyName= :fn"
expressionAttributeValues[":fn"]=nameArray[len(nameArray) - 1]
givenNames = ''
for i in range(0, len(nameArray)-1):
givenNames += nameArray[i]
givenNames += ' '
updateExpression+=", givenName= :gn"
expressionAttributeValues[":gn"]=givenNames.strip()
else:
if 'familyName' in updatedPersonRecord.keys():
updateExpression+=", familyName= :fn"
expressionAttributeValues[":fn"]=updatedPersonRecord["familyName"]
if 'givenName' in updatedPersonRecord.keys():
updateExpression+=", givenName= :gn"
expressionAttributeValues[":gn"]=updatedPersonRecord["givenName"]
if 'photoURL' in updatedPersonRecord.keys():
updateExpression+=", photoURL= :pu"
expressionAttributeValues[":pu"]=updatedPersonRecord["photoURL"]
# Professional Data
if 'bio' in updatedPersonRecord.keys():
updateExpression+=", bio= :bio"
expressionAttributeValues[":bio"]=updatedPersonRecord["bio"]
if 'linkedIn' in updatedPersonRecord.keys():
updateExpression+=", linkedIn= :li"
expressionAttributeValues[":li"]=updatedPersonRecord["linkedIn"]
if 'experience' in updatedPersonRecord.keys():
updateExpression+=", experience= :exp"
expressionAttributeValues[":exp"]=updatedPersonRecord["experience"]
if 'interests1' in updatedPersonRecord.keys():
updateExpression+=", interests1= :int1"
expressionAttributeValues[":int1"]=updatedPersonRecord["interests1"]
if 'interests2' in updatedPersonRecord.keys():
updateExpression+=", interests2= :int2"
expressionAttributeValues[":int2"]=updatedPersonRecord["interests2"]
# Make sure syntax for update is correct (can't send empty expressionAttributeNames)
if len(expressionAttributeNames) > 0:
returnData = personTable.update_item(Key={'id': queryParams['id']},
UpdateExpression=updateExpression,
ExpressionAttributeValues=expressionAttributeValues,
ExpressionAttributeNames=expressionAttributeNames,
ReturnValues="UPDATED_NEW")
else:
returnData = personTable.update_item(Key={'id': queryParams['id']},
UpdateExpression=updateExpression,
ExpressionAttributeValues=expressionAttributeValues,
ReturnValues="UPDATED_NEW")
# Build response body
responseData = {}
responseData['success'] = True
# Return data
return response(responseData)
except Exception as e:
errorParams = {}
errorParams['activity'] = "CTA Update Member"
errorParams['key'] = queryParams['id']
errorParams['who'] = personRecord.get('familyName')
errorParams['error'] = str(e)
errorParams['data'] = updatedPersonRecord
logError(**errorParams)
print('ERROR: Unable to update Person record: ' + str(e))
return exception('Unable to update Person record: ' + str(e))
return exception('ERROR: Unable to update Person record') |
13,366 | 82a6589efc6802b11f26c847b863b41fab209116 | #!/usr/bin/python3
#
# Copyright (C) 2022 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# This script generates a weekly report of kickstart-test runs.
# It downloads the daily json summary reports and combines them into a single text
# report.
#
# It requires a github token in GITHUB_TOKEN environmental variable. This token
# needs to have access to the kickstart-tests artifacts via the github API.
import argparse
from datetime import datetime, timedelta
from glob import glob
import io
import json
import os
import shutil
import sys
from subprocess import check_call
import tempfile
import time
from urllib.request import Request, urlopen
import zipfile
import pycurl
URL = "https://api.github.com/repos/rhinstaller/kickstart-tests/actions/artifacts?per_page=100"
# Defaults list of artifacts
# For now use the full log artifacts, eventually this will be the json summaries
ARTIFACT_NAMES = ["logs-daily-iso", "logs-rhel9", "logs-rhel8"]
# We should ignore SIGPIPE when using pycurl.NOSIGNAL - see
# the libcurl tutorial for more info.
try:
import signal
from signal import SIGPIPE, SIG_IGN
except ImportError:
pass
else:
signal.signal(SIGPIPE, SIG_IGN)
def get_artifacts(token, artifact_names, start, end):
"""
get_artifacts retrieves a list of artifacts from the selected date range
that are listed in artifact_names
It returns a list of artifact dicts like this:
{
"id": 187278866,
"node_id": "MDg6QXJ0aWZhY3QxODcyNzg4NjY=",
"name": "logs-daily-iso",
"size_in_bytes": 1047408950,
"url": "https://api.github.com/repos/rhinstaller/kickstart-tests/actions/artifacts/187278866",
"archive_download_url": "https://api.github.com/repos/rhinstaller/kickstart-tests/actions/artifacts/187278866/zip",
"expired": false,
"created_at": "2022-03-17T03:30:12Z",
"updated_at": "2022-03-17T03:30:13Z",
"expires_at": "2022-03-25T03:29:06Z"
},
"""
artifacts = []
page = 1
retry_limit = 3
while True:
req = Request(URL + f"&page={page}")
req.add_header("Accept", "application/vnd.github.v3+json")
req.add_header("Authorization", f"token {token}")
with urlopen(req) as r:
# Handle hitting the GitHub rate limit
# If the reset time is < 90s in the future, wait for it (trying 3 times)
# Otherwise raise an error
if r.status == 403:
try:
reset = int(r.headers.get("X-RateLimit-Reset"))
except:
raise RuntimeError("Hit GitHub rate limit. Reset header missing.")
if retry_limit == 0 or time.time() > reset or reset - time.time() > 90:
raise RuntimeError("Hit GitHub rate limit. Reset is at %s" % time.ctime(reset))
# Try waiting until after the reset time
time.sleep(10 + (reset - time.time()))
retry_limit = retry_limit - 1
continue
if r.status != 200:
raise RuntimeError("Error (%d) with API request: %s" % (r.status, str(r)))
data = json.load(r)
# Only include the artifacts within the date range and names
for a in data["artifacts"]:
if a["name"] not in artifact_names:
continue
updated_at = datetime.fromisoformat(a["updated_at"][:-1])
if start <= updated_at <= end:
artifacts.append(a)
if len(data["artifacts"]) < 100:
break
# There are more results, get the next page
page = page + 1
# Avoid hitting per-second rate limits
time.sleep(2)
return sorted(artifacts, key=lambda x: x["updated_at"])
def run_curl(token, url, filename):
"""
run_curl downloads an artifact file
It returns True if the response was a 200
If there was an exception is returns False and the error, as well as
printing it to stderr
"""
with open(filename, "wb") as f:
c = pycurl.Curl()
headers = [
"Accept: application/vnd.github.v3+json",
f"Authorization: token {token}",
]
options = {
pycurl.FOLLOWLOCATION: 1,
pycurl.MAXREDIRS: 5,
pycurl.CONNECTTIMEOUT: 30,
pycurl.TIMEOUT: 300,
pycurl.NOSIGNAL: 1,
pycurl.URL: url,
pycurl.HTTPHEADER: headers,
pycurl.WRITEDATA: f
}
for k, v in options.items():
c.setopt(k, v)
try:
c.perform()
status = c.getinfo(pycurl.HTTP_CODE)
ok = (status == 200, None)
except Exception as e:
print(f"ERROR: {e}", file=sys.stderr)
sys.stderr.flush()
ok = (False, e)
c.close()
return ok
def download_artifacts(token, artifacts):
"""
download_artifacts downloads the artifacts as uniquely named files
If there is a problem downloading an artifact it is skipped and not
added to the returned list.
It returns a list of tuples containing the artifact name, the
artifact name with the updated date appended (eg. logs-rhel8-2022-04-01),
and the filename of the zipfile.
"""
zipfiles = []
for a in artifacts:
updated_at = datetime.fromisoformat(a["updated_at"][:-1])
datename = a["name"]+updated_at.strftime("-%Y-%m-%d")
filename = datename + ".zip"
if os.path.exists(filename):
zipfiles.append((a["name"], datename, filename))
print(f"{filename} skipped, already downloaded")
continue
print(f"Fetching {filename}")
ok = run_curl(token, a["archive_download_url"], filename)
if not ok:
continue
zipfiles.append((a["name"], datename, filename))
return zipfiles
def extract_logs(f):
"""
extract_logs unzips the archive into a temporary directory
This directory is deleted when the object goes out of scope
"""
tdir = tempfile.TemporaryDirectory(prefix="kstest-log-", dir="/var/tmp/")
with zipfile.ZipFile(f) as zf:
zf.extractall(tdir.name)
# Return the object so that the temporary directory isn't deleted yet
return tdir
def generate_test_list(tdir):
"""
Build kstest-list if it is missing
This is a list of the expected tests, and it is used to help detect when
tests are totally missing from the results.
The original kstest.log has a line:
Running tests: ./bridge-httpks.sh ./packages-instlangs-1.sh ...
Parse this and create kstest-list, one test name per line for later use.
"""
# Skip this if it already exists
if os.path.exists(os.path.join(tdir.name, "kstest-list")):
return
kstest_log = os.path.join(tdir.name, "kstest.log")
with open(kstest_log) as f:
for line in f.readlines():
if not line.startswith("Running tests: "):
continue
tests = [os.path.basename(os.path.splitext(s)[0]) for s in line[15:].split()]
with open(os.path.join(tdir.name, "kstest-list"), "wt") as klf:
for t in tests:
print(t, file=klf)
break
def rebuild_logs(tdir):
"""
rebuild_logs recreates kstest.log with timestamps
It does this by appending all the individual kstest.log files, which do contain
timestamps, into a new kstest.log
"""
# Remove the old log with no timestamps
kstest_log = os.path.join(tdir.name, "kstest.log")
os.unlink(kstest_log)
# Find all the test's kstest.log files and append them to kstest.log
with open(kstest_log, "w") as ksf:
for log in glob(os.path.join(tdir.name, "*", "kstest.log")):
with open(log) as f:
data = f.read(1024**2)
while data:
ksf.write(data)
data = f.read(1024**2)
def check_tests(tests):
"""
Check the tests for success, failing, missing, or flakes -- success after first failing
This returns a tuple of:
- list of the names of successful tests
- list of the names of the missing tests
- dict of failed tests, each entry being a list of the failure details dict
- dict of flaky tests, each entry being a list of the flaky details dict
"""
success = []
missing = []
failed = {}
flakes = {}
# The goal is to sort the tests into good/failed and record the ones
# that passed after first failing in flakes
for t in tests:
name = t["name"]
if t["success"]:
# Tests should never have more than one success
if name in success:
raise RuntimeError(f"{name} already passed, should only be 1 per test")
success.append(name)
if name in failed:
# Previously failed, move that to flakes and remove it from the failed list
flakes[name] = failed[name]
del failed[name]
else:
if "MISSING" in t["result"]:
# Test is completely missing, don't count it as failed
missing.append(name)
elif name in success:
# Test was also successful, make sure to record it in flakes list
if name in flakes:
flakes[name].append(t)
else:
flakes[name] = [t]
else:
if name in failed:
failed[name].append(t)
else:
failed[name] = [t]
return sorted(success), sorted(missing), failed, flakes
def print_test_details(scenario, days, test_name, buf):
"""Add the details of the named tests (failed-tests or flaky-tests) to the buffer
"""
for d in days:
if scenario not in days[d]:
continue
for n in days[d][scenario][test_name]:
print(f"\n{n}:", file=buf)
for test in days[d][scenario][test_name][n]:
if "start_time" not in test:
start_time = ""
else:
start_time = datetime.fromtimestamp(test["start_time"]).strftime("%m/%d/%Y %H:%M:%S")
if "elapsed_time" not in test:
elapsed_time = 0
else:
elapsed_time = test["elapsed_time"]
# Get the result message
msg = test["result"].rsplit("FAILED:")[-1]
print(f' {start_time} ({elapsed_time}s): {msg}', file=buf)
def kstest_logdir(tmpdir, test):
"""
Return the directory of the logs for the test
The logfile path should start with /var/tmp/kstest-*
but this finds the kstest-* no matter what the leading
path elements are.
"""
logfile = test["logfile"]
for e in logfile.split(os.path.sep):
if e.startswith("kstest-"):
return os.path.join(tmpdir, e)
raise RuntimeError(f"No kstest-* directory found in {logfile}")
def archive_test_logs(days, archive_path, all_logs):
"""
Store the logs from the failed/flaky tests for archiving
and later examination. Paths follow the pattern:
MM-DD-YYYY / scenario / failed|flakes / TESTNAME / [1...] /
and each directory contains the logs for that run of the test.
Currently this is the kstest.log and virt-install.log files
"""
for day in days.keys():
daydir = datetime.strptime(day, "%Y%m%d").strftime("%m-%d-%Y")
for scenario in days[day].keys():
# temporary log directories are stored by scenario + date
datename = scenario + "-" + datetime.strptime(day, "%Y%m%d").strftime("%Y-%m-%d")
if datename not in all_logs:
raise RuntimeError(f"Missing all_log entry for {datename}")
if not os.path.exists(all_logs[datename].name):
raise RuntimeError(f"Missing log directory for {datename}")
tmpdir = all_logs[datename].name
failed = days[day][scenario]["failed-tests"]
flakes = days[day][scenario]["flaky-tests"]
scenario_archive = os.path.join(archive_path, daydir, scenario)
os.makedirs(os.path.join(scenario_archive, "failed"))
os.makedirs(os.path.join(scenario_archive, "flakes"))
# data is organized by test names as keys with lists of tests
for name in failed:
i = 1
for t in sorted(failed[name], key=lambda x: x["start_time"]):
try:
logdir = kstest_logdir(tmpdir, t)
if not os.path.exists(logdir):
raise RuntimeError(f"Missing logdir - {logdir}")
except RuntimeError:
continue
dst = os.path.join(scenario_archive, "failed", name, str(i))
shutil.copytree(logdir, dst)
i += 1
for name in flakes:
i = 1
for t in sorted(flakes[name], key=lambda x: x["start_time"]):
try:
logdir = kstest_logdir(tmpdir, t)
if not logdir or not os.path.exists(logdir):
raise RuntimeError(f"Missing logdir - {logdir}")
except RuntimeError:
continue
dst = os.path.join(scenario_archive, "flakes", name, str(i))
shutil.copytree(logdir, dst)
i += 1
def process_logs(logs):
"""
Process the logfiles into a data structure
Returns a dictionary with each log's data that looks similar to this:
{"20220401": {"logs-daily-iso": [{test data dict}, ...], ...}}, ...}
So every day has an entry for the scenarios that were run on that day.
Note that sometimes a scenario can be missing if there was a problem running it at all.
"""
all_data = {}
for log in logs:
with open(log) as f:
data = json.load(f)
scenario = data[0].get("scenario", None)
if scenario is None:
# No scenario name, no way to organize the data
continue
# Use the log's date as the run identifier
# This assumes the format is SCENARIO-YYYY-MM-DD.json
# NOTE: This may not match the GitHub Action run dates due to tests taking
# a very long time.
day = datetime.strptime(log[1+len(scenario):-5], "%Y-%m-%d").strftime("%Y%m%d")
if day not in all_data:
all_data[day] = {}
# Group them by scenario, assume each file is from one scenario per day
all_data[day][scenario] = data
return all_data
def summary(args, json_logs, all_logs):
"""
summary generates a summary of all the tests run in the selected date range
It returns a string with the summary text
"""
all_data = process_logs(json_logs)
if args.debug:
print(json.dumps(all_data))
buf = io.StringIO()
start = args.start.strftime("%m/%d/%Y %H:%M:%S")
end = args.end.strftime("%m/%d/%Y %H:%M:%S")
print(f"Test Summary Report: {start} -> {end}\n", file=buf)
# Calculate test failures per day/scenario
all_days = {} # dict of per-scenario counts
days = {} # dict of per-day -> per-scenario counts and test names
top_failed = {} # dict of per-test failure counts
top_flakes = {} # dict of per-test flake counts
for day in sorted(all_data.keys()):
days[day] = {}
for scenario in sorted(all_data[day].keys()):
if scenario not in all_days:
all_days[scenario] = {"success": 0, "missing": 0, "failed": 0, "flakes": 0}
# Figure out how many were successful, failed, or were flakes
success, missing, failed, flakes = check_tests(all_data[day][scenario])
days[day][scenario] = {
"success": len(success),
"missing": len(missing),
"failed": len(failed),
"flakes": len(flakes),
"missing-tests": missing,
"failed-tests": failed,
"flaky-tests": flakes}
all_days[scenario]["success"] += len(success)
all_days[scenario]["missing"] += len(missing)
all_days[scenario]["failed"] += len(failed)
all_days[scenario]["flakes"] += len(flakes)
for n in failed:
top_failed[n] = top_failed.get(n, 0) + 1
for n in flakes:
top_flakes[n] = top_flakes.get(n, 0) + 1
# Summary of tests per scenario
print("Weekly summary", file=buf)
print("==============", file=buf)
for scenario in sorted(all_days.keys()):
success = all_days[scenario]["success"]
missing = all_days[scenario]["missing"]
failed = all_days[scenario]["failed"]
flakes = all_days[scenario]["flakes"]
print(f"{scenario}: Ran {success+failed+missing} tests. {success} passed, {failed} failed, {missing} missing, {flakes} flakes.", file=buf)
print("\n", file=buf)
print("Top 5 failed tests for the week", file=buf)
for n in sorted((n for n in top_failed), key=lambda x: top_failed[x], reverse=True)[:5]:
print(f" {n} - {top_failed[n]}", file=buf)
print("\n", file=buf)
print("Top 5 flaky tests for the week", file=buf)
for n in sorted((n for n in top_flakes), key=lambda x: top_flakes[x], reverse=True)[:5]:
print(f" {n} - {top_flakes[n]}", file=buf)
print("\n", file=buf)
# Print daily stats
for day in sorted(days.keys()):
print(datetime.strptime(day, "%Y%m%d").strftime("%m/%d/%Y"), file=buf)
for scenario in sorted(days[day].keys()):
s = days[day][scenario]
success = s["success"]
missing = s["missing"]
failed = s["failed"]
total = success + failed + missing
flakes = s["flakes"]
print(f" {scenario} (Ran {total}, {success} passed, {failed} failed, {missing} missing. {flakes} flakes) :", file=buf)
if s["missing-tests"]:
print(" Missing:", file=buf)
for n in s["missing-tests"]:
print(f" {n}", file=buf)
if s["failed-tests"]:
print(" Failed:", file=buf)
for n in sorted(s["failed-tests"].keys()):
print(f" {n}", file=buf)
if s["flaky-tests"]:
print(" Flakes:", file=buf)
for n in sorted(s["flaky-tests"].keys()):
print(f" {n}", file=buf)
print("\n", file=buf)
# Print the failure details for each scenario, on each day.
for scenario in sorted(all_days.keys()):
success = all_days[scenario]["success"]
failed = all_days[scenario]["failed"]
flakes = all_days[scenario]["flakes"]
msg = f"{scenario}: Ran {success+failed} tests. {success} passed, {failed} failed, {flakes} flakes."
print("=" * len(msg), file=buf)
print(msg, file=buf)
print("=" * len(msg), file=buf)
if args.flake_details:
print("Failed test details", file=buf)
print("-------------------", file=buf)
print_test_details(scenario, days, "failed-tests", buf)
if args.flake_details:
print("\nFlake test details", file=buf)
print("-------------------", file=buf)
print_test_details(scenario, days, "flaky-tests", buf)
print("\n", file=buf)
# Save the logs for the failures and flakes if a path is specified
try:
if args.archive_logs:
archive_test_logs(days, args.archive_logs, all_logs)
except RuntimeError as e:
print(f"\nERROR: Problem archiving test logs - {e}", file=buf)
return buf.getvalue()
def test_summary(args, path):
"""
Test the weekly summary on a single directory
Instead of pulling artifacts from github use a single directory with
kstest.log.json and the log directories.
"""
log = os.path.join(path, "kstest.log.json")
with open(log) as f:
data = json.load(f)
scenario = data[0].get("scenario", None)
if not scenario:
raise RuntimeError("No scenario found in %s" % log)
# The json log filename needs to be in the form of <scenario>-<YYYY-MM-DD>.json
datename = f"{scenario}-1990-01-01"
shutil.copy(log, datename+".json")
datenames = [datename]
all_logs = {datename: path}
report = summary(args, (d+".json" for d in datenames), all_logs)
if args.output:
with open(args.output, "w") as f:
f.write(report)
else:
print(report)
def main(args, token):
artifacts = get_artifacts(token, args.artifact_names, args.start, args.end)
if args.verbose:
print(json.dumps(artifacts))
zipfiles = download_artifacts(token, artifacts)
if args.debug:
print(f"zipfiles = {zipfiles}")
datenames = [] # List of valid logfile names
all_logs = {} # Keep track of all the log tempdirs
for name, datename, f in zipfiles:
if args.rebuild or not os.path.exists(datename+".json"):
try:
all_logs[datename] = extract_logs(f)
except zipfile.BadZipFile:
# GitHub can responds with a 200 and a json error instead of a zip
# so if it isn't a valid zip, just skip it.
os.unlink(f)
continue
# This is needed for logs without timestamps
if args.rebuild:
generate_test_list(all_logs[datename])
rebuild_logs(all_logs[datename])
# Run summary on kstest.log
cmd = ["log2json",
"--scenario", name,
"--output", datename+".json",
os.path.join(all_logs[datename].name, "kstest.log")
]
if args.debug:
print(cmd)
check_call(cmd)
# If the summary exists, add it to the list
if os.path.exists(datename+".json"):
datenames.append(datename)
report = summary(args, (d+".json" for d in datenames), all_logs)
if args.output:
with open(args.output, "w") as f:
f.write(report)
else:
print(report)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Generate a weekly summary of test results")
parser.add_argument("--artifacts",
type=lambda x: x.split(","),
default=ARTIFACT_NAMES,
dest="artifact_names",
help="Comma separated list of artifact names to summarize")
parser.add_argument("--start", type=datetime.fromisoformat,
default=(datetime.now() - timedelta(days=7)),
help="Start time. eg. 2022-03-03T03:46:11 (default is -7 days)")
parser.add_argument("--end", type=datetime.fromisoformat,
default=datetime.now(),
help="end time. eg. 2022-03-03T03:46:11 (default is now)")
parser.add_argument("--rebuild",
default=False, action="store_true",
help="Rebuild logs with timestamps")
parser.add_argument("--flake-details",
default=False, action="store_true",
help="Include details about flaky tests in summary")
parser.add_argument("--archive-logs",
help="Optionally collect the failed/flake logs in an archive directory tree")
parser.add_argument("--output",
help="Path and filename to write summary report to")
parser.add_argument("--test", help=argparse.SUPPRESS)
parser.add_argument("--debug", default=False, action="store_true")
parser.add_argument("--verbose", default=False, action="store_true")
args = parser.parse_args()
if "GITHUB_TOKEN" not in os.environ:
print("Set GITHUB_TOKEN environmental variable to github token with access to the artifact api.")
sys.exit(1)
if args.debug:
print(f"args = {args}")
if args.test:
test_summary(args, args.test)
else:
main(args, os.environ["GITHUB_TOKEN"])
|
13,367 | 8f5ede68f728f6fc160dd058ea87cb6b4e50794e | """
给定一个数组,将数组中的元素向右移动 k 个位置,其中 k 是非负数。
进阶:
尽可能想出更多的解决方案,至少有三种不同的方法可以解决这个问题。
你可以使用空间复杂度为 O(1) 的 原地 算法解决这个问题吗?
示例 1:
输入: nums = [1,2,3,4,5,6,7], k = 3
输出: [5,6,7,1,2,3,4]
解释:
向右旋转 1 步: [7,1,2,3,4,5,6]
向右旋转 2 步: [6,7,1,2,3,4,5]
向右旋转 3 步: [5,6,7,1,2,3,4]
链接:https://leetcode-cn.com/leetbook/read/top-interview-questions-easy/x2skh7/
"""
# 用python自带的切片功能,要考虑到 K 超过nums本身的长度
class Solution:
def rotate(self, nums: List[int], k: int) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
l = len(nums)
if k > l:
k -= l%k
nums[:] = nums[l - k:] + nums[:l - k]
|
13,368 | b7846e90e59770abe009251d7232f44247a7dfa8 | from django import forms
from django.contrib.auth.models import User
from django.utils.safestring import mark_safe
from models import *
class TitleInput(forms.TextInput):
'''
need some magic javascript and styling to make this work:
.combo-container {
position: relative;
height: 18px;
}
.combo-container input {
position: absolute;
top: 0;
left: 0;
z-index: 999;
padding: 0;
margin: 0;
width: 420px;
}
.combo-select {
position: absolute;
top: 0;
left: 0;
padding: 0;
margin: 0;
width: 438px;
}
$('.combo-select').change(function() {
$(this).siblings('input').val($(this).children("option").filter(":selected").text());
});
'''
default_options = ["Mrs.","Mr.","Ms.","Dr.","Prof.",]
def render(self, name, value, attrs=None):
super_def = super(TitleInput, self).render(name,value,attrs)
selecter = u'<select class="combo-select"><option></option>'
for option in self.default_options:
selecter += u'<option>%s</option>' % option
selecter += u'</select>'
return mark_safe(u'<div class="combo-container">%s%s</div>' % (super_def,selecter))
class NominatorForm(forms.ModelForm):
class Meta:
model = Nominator
exclude = ('verified','web_key','created_date',)
widgets = {
'name': forms.TextInput(attrs={'size':'70'}),
'affiliation': forms.TextInput(attrs={'size':'70'}),
'email': forms.TextInput(attrs={'size':'70'}),
'phone': forms.TextInput(attrs={'size':'70'}),
'award_text': forms.Textarea(attrs={'rows':20, 'cols':70}),
}
class CandidateForm(forms.ModelForm):
class Meta:
model = Candidate
exclude = ('award','requested','developers','nominator','created_date',)
widgets = {
'name': forms.TextInput(attrs={'size':'70'}),
'affiliation': forms.TextInput(attrs={'size':'70'}),
'email': forms.TextInput(attrs={'size':'70'}),
'phone': forms.TextInput(attrs={'size':'70'}),
'statement': forms.Textarea(attrs={'rows':20, 'cols':70}),
}
class AwardCandidateForm(forms.ModelForm):
class Meta:
model = Candidate
exclude = ('award','requested','nominator','affiliation','created_date','email','phone')
widgets = {
'name': forms.TextInput(attrs={'size':'70'}),
'statement': forms.Textarea(attrs={'rows':20, 'cols':70}),
'developers': forms.Textarea(attrs={'rows':20, 'cols':70}),
'affiliation': forms.TextInput(attrs={'size':'70'}),
}
class SupporterForm(forms.ModelForm):
class Meta:
model = Supporter
exclude = ('statement','requested','candidate','web_key','created_date',)
widgets = {
'title': TitleInput(),
'name': forms.TextInput(),
'email': forms.TextInput(),
# 'title': TitleInput(attrs={'size':'70'}),
# 'name': forms.TextInput(attrs={'size':'70'}),
# 'email': forms.TextInput(attrs={'size':'70'}),
}
class AwardForm(forms.ModelForm):
class Meta:
model = Award
widgets = {
'name': forms.TextInput(attrs={'size':'70'}),
'link': forms.TextInput(attrs={'size':'70'}),
'email_title': forms.TextInput(attrs={'size':'70'}),
'email': forms.TextInput(attrs={'size':'70'}),
'award_text': forms.Textarea(attrs={'rows':20, 'cols':70}),
}
class PendingCommitteeMemberForm(forms.ModelForm):
class Meta:
model = PendingCommitteeMember
exclude = ('award','web_key',)
class ActivateCommitteeMemberForm(forms.Form):
first_name = forms.CharField(max_length=100)
last_name = forms.CharField(max_length=100)
email = forms.EmailField()
username = forms.CharField(max_length=30, min_length=4)
password = forms.CharField(max_length=16, min_length=6,widget=forms.PasswordInput(render_value=False))
password_again = forms.CharField(max_length=16, min_length=6,widget=forms.PasswordInput(render_value=False))
def clean_username(self):
username = self.cleaned_data['username']
if ' ' in username:
raise forms.ValidationError(u'Username must not contain spaces')
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
return username
raise forms.ValidationError(u'%s already exists' % username )
def clean_password_again(self):
password1 = self.cleaned_data.get('password')
password2 = self.cleaned_data.get('password_again')
print '%s %s' % (password1, password2)
if not password2:
raise forms.ValidationError("You must confirm your password")
if password1 != password2:
raise forms.ValidationError("Your passwords do not match")
return password2
class SupportStatementForm(forms.Form):
statement = forms.CharField( widget=forms.Textarea(attrs={'rows':20, 'cols':70}))
#class SoftwareAwardAdditionalForm(forms.ModelForm):
# class Meta:
# model = SoftwareAwardAdditional
# exclude = ('candidate',)
#
# widgets = {
# 'software_title': forms.TextInput(),
# 'developers': forms.Textarea(attrs={'rows':20, 'cols':70}),
## 'title': TitleInput(attrs={'size':'70'}),
## 'name': forms.TextInput(attrs={'size':'70'}),
## 'email': forms.TextInput(attrs={'size':'70'}),
# }
|
13,369 | c0e5a617eda5eae9b47516fb2c0d65543655965a | # pyinstaller filename
import speaker.cli
speaker.cli.main()
|
13,370 | b6ae65588abfd7e38c568136d34a6793c4778bc5 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : Tom.Lee
# @File : bootstrap_app.py
# @Docs : main
from flaskapp import app
if __name__ == "__main__":
app.start()
# app.start(port=5258, debug=False)
|
13,371 | 021dc45e12010355b1d1111aba3b102015361b82 | from infi.clickhouse_orm import migrations
from ee.clickhouse.sql.person import KAFKA_PERSONS_DISTINCT_ID_TABLE_SQL, PERSONS_DISTINCT_ID_TABLE_MV_SQL
from posthog.settings import CLICKHOUSE_CLUSTER
operations = [
migrations.RunSQL(f"DROP TABLE person_distinct_id_mv ON CLUSTER {CLICKHOUSE_CLUSTER}"),
migrations.RunSQL(f"DROP TABLE kafka_person_distinct_id ON CLUSTER {CLICKHOUSE_CLUSTER}"),
migrations.RunSQL(
f"ALTER TABLE person_distinct_id ON CLUSTER {CLICKHOUSE_CLUSTER} ADD COLUMN IF NOT EXISTS is_deleted Boolean DEFAULT 0"
),
migrations.RunSQL(KAFKA_PERSONS_DISTINCT_ID_TABLE_SQL),
migrations.RunSQL(PERSONS_DISTINCT_ID_TABLE_MV_SQL),
]
|
13,372 | e986e68026879a41a6dbab2be1dfd650a1c3a3e0 | import speech_recognition
import tensorflow as tf
import keras |
13,373 | 05199c5bbd403ee17f137c3307423f2a76560daa | # Project Name: COVID-19 Care: Face Mask and Social Distancing Detection using Deep Learning
# Author List: Nikhil Joshi
# Filename: views.py
# Functions: homepage_view(request), login_view(request), logout_view(request), register_view(request), about_us_view(request), contact_us_view(request), account_view(request)
# Global Variables: NA
from django.shortcuts import render, redirect
from account.forms import *
from django.contrib.auth import logout, login, authenticate
from django.http import HttpResponse
# Function Name: homepage_view
# Input: HTTP request
# Output: Returns the template of the homepage
# Logic: Return and render the template
# Example Call: Given by Django framework
def homepage_view(request):
return render(request, 'account/home.html')
# Function Name: login_view
# Input: HTTP request
# Output: User login
# Logic: Get the user currently associated, get the email and password and login the user
# Example Call: Given by Django framework
def login_view(request):
context = {}
user = request.user
if user.is_authenticated:
return redirect("homepage")
if request.POST:
form = AccountAuthenticationForm(request.POST)
if form.is_valid():
email = request.POST['email']
password = request.POST['password']
user = authenticate(email=email, password=password)
if user:
login(request, user)
return redirect("homepage")
else:
form = AccountAuthenticationForm()
context['login_form'] = form
return render(request, "account/login.html", context)
# Function Name: logout_view
# Input: HTTP request
# Output: Logs out the user
# Logic: Use logout() function from django library
# Example Call: Given by Django framework
def logout_view(request):
logout(request)
return redirect('homepage')
# Function Name: register_view
# Input: HTTP request
# Output: Regsiters the new user
# Logic: Once the registration form is displayed, get the user email id, and password and login the user
# while storing the details in the database for future logins
def register_view(request):
valuenext= request.POST.get('next')
if request.POST:
form = RegistrationForm(request.POST)
if form.is_valid():
account = form.save()
email = form.cleaned_data.get('email')
password = form.cleaned_data.get('password')
login(request, account)
return redirect('homepage')
else:
return render(request, 'account/register.html', context={'registration_form': form})
else:
form = RegistrationForm()
return render(request, 'account/register.html', context={'registration_form': form})
# Function Name: about_us_view
# Input: HTTP request
# Output: Returns the template of the about us page
# Logic: Return and render the template
# Example Call: Given by Django framework
def about_us_view(request):
return render(request, 'account/about.html')
# Function Name: contact_us_view
# Input: HTTP request
# Output: Returns the template of the contact us page
# Logic: Return and render the template
# Example Call: Given by Django framework
def contact_us_view(request):
return render(request, 'account/contact.html')
# Function Name: account_view
# Input: HTTP request
# Output: User can change the username and email id if required
# Logic: Firstly, if the user is not logged in, ask him to login first
# Then get the current data from the database which will be editable
# and change the data. Finally display the updated data and also give a success message
# Example Call: Given by Django framework
def account_view(request):
context = {}
if not request.user.is_authenticated:
return redirect('login')
if request.POST:
form = AccountUpdateForm(request.POST, instance=request.user)
if form.is_valid():
form.initial = {
'email': request.POST['email'],
'username': request.POST['username'],
}
form.save()
context['success_message'] = 'Changes saved!!!'
else:
form = AccountUpdateForm(
initial = {
'email': request.user.email,
'username': request.user.username
}
)
context['account_form'] = form
return render(request, 'account/account.html', context) |
13,374 | 0bbc094c6caad67ed138a9212b0a72b171914ade | # Assignment 4 for CSC401 Python
import functools
from Circle import Circle
# Function One(list of strings)
# Take a list of strings as a parameter and return all unique strings
def function_one(string_list):
unique_list = []
unique = True
for x in range(len(string_list)):
string_test = string_list[x]
for y in range(len(string_list)):
if string_test == string_list[y] and x != y:
unique = False
break
elif string_test != string_list[y]:
unique = True
if unique:
unique_list.append(string_test)
# set_list = list(set(string_list))
return unique_list
# Function Two(integer)
# Take an integer and return whether or not the number is Perfect
# Perfect number = sum of divisors = number
def function_two(perfect_number):
# factors_list = []
# for i in range(1,perfect_number):
# if (perfect_number % i) == 0:
# factors_list.append(i)
sum_of = sum(factors(perfect_number))-perfect_number
if sum_of == perfect_number and sum_of > 1:
return True
else:
return False
# Function Three(integer)
# Take an integer and return Perfect numbers <= number
def function_three(perfect_number_two):
list_of_pn = []
for i in range(1,perfect_number_two):
if function_two(i):
list_of_pn.append(i)
return list_of_pn
# Function Four(list of mixed types)
# Take list of mixed type and count number of integers
def function_four(mixed_list):
count = 0
for i in range(len(mixed_list)):
if isinstance(mixed_list[i], int):
count += 1
elif isinstance(mixed_list[i],list):
count += function_four(mixed_list[i])
return count
# Function Five(list of anything)
# Take list of anything and remove second item
def function_five(list_of_anything = []):
if len(list_of_anything) > 1:
del list_of_anything[1]
return list_of_anything
# Factors function borrowed from: http://stackoverflow.com/questions/6800193/what-is-the-most-efficient-way-of-finding-all-the-factors-of-a-number-in-python
# Quicker than my for loop method
def factors(n):
return functools.reduce(list.__add__,
([i, n//i] for i in range(1, int(n**0.5) + 1) if n % i == 0))
def main():
print(function_one(['good', 'cat', 'bad', 'cat']))
print(function_two(496))
print(function_three(2048))
print(function_four([1, ['A', 2], 'B', 3, 'C', 4, ['D', 5]]))
print(function_five(['A', ['A', 'B'], 'C']))
my_circle = Circle()
my_circle.radius = 5
print(my_circle.area())
print(my_circle.circumference())
if __name__ == "__main__":
main()
|
13,375 | 9fb9ad4cc1c36b7458633e2e9da187757669166c | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-10-13 18:53
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('myapi', '0005_placeorder_placesearch'),
]
operations = [
migrations.CreateModel(
name='CategoryOrder',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, verbose_name='Название')),
('image', models.ImageField(upload_to='category_image', verbose_name='Изображение')),
('category_choice', models.CharField(choices=[('Search', 'Найти'), ('Order', 'Заказать'), ('Entertaiment', 'Развлечения')], default='Search', max_length=100, verbose_name='Раздел')),
],
options={
'verbose_name_plural': 'Категории заказать',
'verbose_name': 'Категория заказать',
},
),
migrations.CreateModel(
name='CategorySearch',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, verbose_name='Название')),
('image', models.ImageField(upload_to='category_image', verbose_name='Изображение')),
('category_choice', models.CharField(choices=[('Search', 'Найти'), ('Order', 'Заказать'), ('Entertaiment', 'Развлечения')], default='Search', max_length=100, verbose_name='Раздел')),
],
options={
'verbose_name_plural': 'Категории поиска',
'verbose_name': 'Категория поиска',
},
),
migrations.RenameModel(
old_name='Category',
new_name='CategoryEntertaiment',
),
migrations.AlterModelOptions(
name='categoryentertaiment',
options={'verbose_name': 'Категория развлечений', 'verbose_name_plural': 'Категории развлечений'},
),
migrations.AlterField(
model_name='placeorder',
name='category',
field=models.ForeignKey(blank=True, default=1, on_delete=django.db.models.deletion.CASCADE, to='myapi.CategoryOrder', verbose_name='Категория'),
),
migrations.AlterField(
model_name='placesearch',
name='category',
field=models.ForeignKey(blank=True, default=1, on_delete=django.db.models.deletion.CASCADE, to='myapi.CategorySearch', verbose_name='Категория'),
),
]
|
13,376 | 4d740dfcee347e030bee4dcb3af696e519b61b4a | # -*- coding: utf-8 -*-
"""
Created on Tue May 26 00:33:58 2020
@author: Han
"""
import numpy as np
import scipy.optimize as optimize
from tqdm import tqdm
from utils.helper_func import softmax
def fit_dynamic_learning_rate_session(choice_history, reward_history, slide_win = 10, pool = '', x0 = [], fixed_sigma_bias = 'None', method = 'DE'):
''' Fit R-W 1972 with sliding window = 10 (Wang, ..., Botvinick, 2018) '''
trial_n = np.shape(choice_history)[1]
if x0 == []: x0 = [0.4, 0.4, 0]
# Settings for RW1972
# ['RW1972_softmax', ['learn_rate', 'softmax_temperature', 'biasL'],[0, 1e-2, -5],[1, 15, 5]]
if fixed_sigma_bias == 'global':
fit_bounds = [[0, x0[1], x0[2]],[1, x0[1], x0[2]]] # Fixed sigma and bias at the global fitted parameters
elif fixed_sigma_bias == 'zeros':
fit_bounds = [[0, 1e-4, 0], [1, 1e-4, 0]]
elif fixed_sigma_bias == 'none' :
fit_bounds = [[0, 1e-2, -5],[1, 15, 5]]
Q = np.zeros(np.shape(reward_history)) # Cache of Q values (using the best fit at each step)
choice_prob = Q.copy()
fitted_learn_rate = np.zeros(np.shape(choice_history))
fitted_sigma = np.zeros(np.shape(choice_history))
fitted_bias = np.zeros(np.shape(choice_history))
for t in tqdm(range(1, trial_n - slide_win), desc = 'Sliding window', total = trial_n - slide_win):
# for t in range(1, trial_n - slide_win): # Start from the second trial
Q_0 = Q[:, t-1] # Initial Q for this window
choice_this = choice_history[:, t : t + slide_win]
reward_this = reward_history[:, t : t + slide_win]
if method == 'DE':
fitting_result = optimize.differential_evolution(func = negLL_slide_win, args = (Q_0, choice_this, reward_this),
bounds = optimize.Bounds(fit_bounds[0], fit_bounds[1]),
mutation=(0.5, 1), recombination = 0.7, popsize = 4, strategy = 'best1bin',
disp = False,
workers = 1 if pool == '' else 8, # For DE, use pool to control if_parallel, although we don't use pool for DE
updating = 'immediate' if pool == '' else 'deferred')
else:
fitting_result = optimize.minimize(negLL_slide_win, x0, args = (Q_0, choice_this, reward_this), method = 'L-BFGS-B',
bounds = optimize.Bounds(fit_bounds[0], fit_bounds[1]))
# Save parameters
learn_rate, softmax_temperature, biasL = fitting_result.x
fitted_learn_rate[:, t] = learn_rate
fitted_sigma[:, t] = softmax_temperature
fitted_bias[:, t] = biasL
# Simulate one step to get the first Q from this best fit as the initial value of the next window
choice_0 = choice_this[0, 0]
Q[choice_0, t] = Q_0[choice_0] + learn_rate * (reward_this[choice_0, 0] - Q_0[choice_0]) # Chosen side
Q[1 - choice_0, t] = Q_0[1 - choice_0] # Unchosen side
choice_prob[:, t] = softmax(Q[:, t], softmax_temperature, bias = np.array([biasL, 0])) # Choice prob (just for validation)
return fitted_learn_rate, fitted_sigma, fitted_bias, Q, choice_prob
def fit_dynamic_learning_rate_session_no_bias_free_Q_0(choice_history, reward_history, slide_win = 10, pool = '', x0 = [], fixed_sigma = 'none', method = 'DE'):
'''
Fit R-W 1972 with sliding window = 10 (Wang, ..., Botvinick, 2018)
For each sliding window, allows Q_init to be a parameter, no bias term
'''
trial_n = np.shape(choice_history)[1]
if x0 == []: x0 = [0.4, 0.4, 0.5, 0.5]
# Settings for RW1972
# ['RW1972_softmax', ['learn_rate', 'softmax_temperature', 'Q_0'],[0, 1e-2, -5],[1, 15, 5]]
if fixed_sigma == 'global':
fit_bounds = [[0, x0[1], 0,0],[1, x0[1], 1,1]] # Fixed sigma and bias at the global fitted parameters
elif fixed_sigma == 'zeros':
fit_bounds = [[0, 1e-4, 0,0], [1, 1e-4, 1,1]]
elif fixed_sigma == 'none' :
fit_bounds = [[0, 1e-2, 0,0],[1, 15, 1,1]]
Q = np.zeros(np.shape(reward_history)) # Cache of Q values (using the best fit at each step)
choice_prob = Q.copy()
fitted_learn_rate = np.zeros(np.shape(choice_history))
fitted_sigma = np.zeros(np.shape(choice_history))
# fitted_Q_0 = np.zeros(np.shape(choice_history))
for t in tqdm(range(1, trial_n - slide_win), desc = 'Sliding window', total = trial_n - slide_win):
# for t in range(1, trial_n - slide_win): # Start from the second trial
choice_this = choice_history[:, t : t + slide_win]
reward_this = reward_history[:, t : t + slide_win]
if method == 'DE':
fitting_result = optimize.differential_evolution(func = negLL_slide_win_no_bias_free_Q_0, args = (choice_this, reward_this),
bounds = optimize.Bounds(fit_bounds[0], fit_bounds[1]),
mutation=(0.5, 1), recombination = 0.7, popsize = 4, strategy = 'best1bin',
disp = False,
workers = 1 if pool == '' else 8, # For DE, use pool to control if_parallel, although we don't use pool for DE
updating = 'immediate' if pool == '' else 'deferred')
else:
fitting_result = optimize.minimize(negLL_slide_win_no_bias_free_Q_0, x0, args = (choice_this, reward_this), method = 'L-BFGS-B',
bounds = optimize.Bounds(fit_bounds[0], fit_bounds[1]))
# Save parameters
learn_rate, softmax_temperature, Q_0_L, Q_0_R = fitting_result.x
fitted_learn_rate[:, t] = learn_rate
fitted_sigma[:, t] = softmax_temperature
fitted_Q_0 = np.array([Q_0_L, Q_0_R])
# Simulate one step to get the first Q from this best fit as the initial value of the next window
choice_0 = choice_this[0, 0]
Q[choice_0, t] = fitted_Q_0[choice_0] + learn_rate * (reward_this[choice_0, 0] - fitted_Q_0[choice_0]) # Chosen side
Q[1 - choice_0, t] = fitted_Q_0[1-choice_0] # Unchosen side
choice_prob[:, t] = softmax(Q[:, t], softmax_temperature) # Choice prob (just for validation)
return fitted_learn_rate, fitted_sigma, fitted_Q_0, Q, choice_prob
def negLL_slide_win(fit_value, *args):
''' Negative likelihood function for the sliding window '''
# Arguments interpretation
Q_0, choices, rewards = args
learn_rate, softmax_temperature, biasL = fit_value
bias_terms = np.array([biasL, 0])
trial_n_win = np.shape(choices)[1]
Q_win = np.zeros_like(rewards) # K_arm * trial_n
choice_prob_win = np.zeros_like(rewards)
# -- Do mini-simulation in this sliding window (light version of RW1972) --
for t in range(trial_n_win):
Q_old = Q_0 if t == 0 else Q_win[:, t - 1]
# Update Q
choice_this = choices[0, t]
Q_win[choice_this, t] = Q_old[choice_this] + learn_rate * (rewards[choice_this, t] - Q_old[choice_this]) # Chosen side
Q_win[1 - choice_this, t] = Q_old[1 - choice_this] # Unchosen side
# Update choice_prob
choice_prob_win[:, t] = softmax(Q_win[:, t], softmax_temperature, bias = bias_terms)
# Compute negative likelihood
likelihood_each_trial = choice_prob_win [choices[0,:], range(trial_n_win)] # Get the actual likelihood for each trial
# Deal with numerical precision
likelihood_each_trial[(likelihood_each_trial <= 0) & (likelihood_each_trial > -1e-5)] = 1e-16 # To avoid infinity, which makes the number of zero likelihoods informative!
likelihood_each_trial[likelihood_each_trial > 1] = 1
negLL = - sum(np.log(likelihood_each_trial))
return negLL
def negLL_slide_win_no_bias_free_Q_0(fit_value, *args):
''' Negative likelihood function for the sliding window '''
# Arguments interpretation
choices, rewards = args
learn_rate, softmax_temperature, Q_0_L, Q_0_R = fit_value
trial_n_win = np.shape(choices)[1]
Q_win = np.zeros_like(rewards) # K_arm * trial_n
choice_prob_win = np.zeros_like(rewards)
# -- Do mini-simulation in this sliding window (light version of RW1972) --
for t in range(trial_n_win):
Q_old = np.array([Q_0_L, Q_0_R]) if t == 0 else Q_win[:, t - 1]
# Update Q
choice_this = choices[0, t]
Q_win[choice_this, t] = Q_old[choice_this] + learn_rate * (rewards[choice_this, t] - Q_old[choice_this]) # Chosen side
Q_win[1 - choice_this, t] = Q_old[1 - choice_this] # Unchosen side
# Update choice_prob
choice_prob_win[:, t] = softmax(Q_win[:, t], softmax_temperature)
# Compute negative likelihood
likelihood_each_trial = choice_prob_win [choices[0,:], range(trial_n_win)] # Get the actual likelihood for each trial
# Deal with numerical precision
likelihood_each_trial[(likelihood_each_trial <= 0) & (likelihood_each_trial > -1e-5)] = 1e-16 # To avoid infinity, which makes the number of zero likelihoods informative!
likelihood_each_trial[likelihood_each_trial > 1] = 1
negLL = - sum(np.log(likelihood_each_trial))
return negLL
|
13,377 | 88c318beb64236c6d6d21e2f13b15fd42745f13b | from MicroTokenizer import dag_tokenizer
dag_tokenizer.graph_builder.build_graph("知识就是力量")
dag_tokenizer.graph_builder.write_graphml("output.graphml")
|
13,378 | dcdb6c2f3d09aabc0b69018e4d311bd4b1b8cff1 | print('crypto{biclique}') |
13,379 | d84a05157e5e5b42ef3469061241e6b59d301034 | def mediapopulacao(salario,quantidadefilhos):
somatorio=0.0
contador=0
for i in range(len(salario)):
somatorio+=salario[i]
contador+=quantidadefilhos[i]
mediapopular=somatorio/contador
return mediapopular
def mediafilhos(quantidadefilhos,i):
somatorio=0.0
for j in range (0,len(quantidadefilhos)):
somatorio+=quantidadefilhos[j]
else:
mediafil=somatorio/(j+1)
return mediafil
def ordenasalario(salario):
# cópia do salario
r = salario
maior=r[0]
menor=r[0]
for i in range(1, len(salario)):
for j in range(i,0,-1):
if(r[j]<r[j-1]):
raux = r[j]
r[j] = r[j-1]
r[j-1] = raux
maior=r[j]
menor=r[0]
else:
break
return (maior,menor)
def verificasalario(salario,quantidadefilhos):
contador=0
for i in range(len(salario)):
if (salario[i]/quantidadefilhos[i])<=380.00:
contador+=1
return contador
salarios=0.0
salario=[]
quantidadefilhos=[]
i=0
while True:
salarios=input("Qual o valor do seu salário? ")
if salarios =="":
mediapop=mediapopulacao(salario,quantidadefilhos)
mediaf=mediafilhos(quantidadefilhos,i)
ordenas=ordenasalario(salario)
situacao=verificasalario(salario,quantidadefilhos)
print(f"A média salarial da população é: {mediapop:.2f}")
print(f"A quantidade média de filhos da população é {mediaf:.2f}")
print(f"O maior salário é {ordenas[0]} o menor salário é {ordenas[1]} ")
print(f"{(float(situacao/i)*100)}% das familia(as) está(ão) recebendo até R$380,00")
break
else:
salario.append(float(salarios))
quantidadefilhos.append(int(input("Quantas crianças tem na sua casa? ")))
i+=1 |
13,380 | a051adbb49e90a9474616c8a1835f96a85bc0876 | '''
Swap the channel of green and alpha for images exported from mali graphics debugger with texture format as rgba4444
Usage:
python swapPngChannels.py
'''
import os
from PIL import Image
if __name__ == "__main__":
curDir = os.path.dirname(os.path.realpath(__file__))
processFiles = []
for (dirname, dirs, files) in os.walk(curDir):
for filename in files:
if filename.endswith('_ga.png'):
processFiles.append(filename)
for pngFile in processFiles:
print("Opening:"+pngFile)
thefile = os.path.join(curDir, pngFile)
im = Image.open(thefile)
#im.load()
width, height = im.size
im_rgb = im.convert('RGBA')
for x in range(0, width):
for y in range(0,height):
r, g, b, a = im_rgb.getpixel((x, y))
im_rgb.putpixel((x, y), (b, a, r, g))
#outfile, ext = os.path.splitext(infile)
prefixName = pngFile[:-7]
outfile = os.path.join(curDir, prefixName + u".png")
im_rgb.save(outfile, "PNG")
print("Ding!") |
13,381 | 2449c9e233c7bb8b5509d6fa628055d679b70e48 | # *- coding: utf-8 -*-
# =================================
# time: 2020.7.28
# author: @tangzhilin(Resigned)
# function: 数据特征工程
# update: 8.18日 更改了数据的连接方式
# update: 9.08日 依照新的标签数据标准更改了代码
# =================================
import pandas as pd
import numpy as np
import re
import jieba
import jieba.analyse
import warnings
warnings.filterwarnings('ignore')
def feature_engineering(datas, fit):
"""
:param fit: 处理的是训练数据的话fit=True 默认为False
:param datas: all_datas
function: 针对数据进行特征工程
"""
# 文本缩减
datas.drop_duplicates(subset=['id', 'text'], keep='first', inplace=True)
pattern = "\s*[\u4e00-\u9fa5]{1,4}[哥|姐|先生|总|老板]\s{1}"
datas['text'] = datas['text'].apply(
lambda x: re.sub(pattern, '', x.strip())
)
datas['text'] = datas['text'].apply(
lambda x: np.nan if x == ''
else x
)
datas.dropna(subset=['text'], inplace=True)
# 使用jieba对文本进行分词处理
# 对没有对应标签的文本, 进行删减处理
jieba.load_userdict("../config/jieba_thesaurus.txt")
# jieba 分词
def remove_space(text):
text = np.array(list(jieba.cut(text)))
return text
datas['text'] = datas['text'].apply(
lambda x: remove_space(x)
)
datas['count_positive_word'] = datas['text'].apply(
lambda x: list(x).count('好的') + list(x).count('是的') + list(x).count('对的') + list(x).count('没问题')
)
# 关键词统计
datas['text'] = datas['text'].apply(
lambda x: "".join(x) + " "
)
id_count_words = datas.groupby(['id']).agg({
'count_positive_word': ['sum']
# 以文本id为维度, 获取文本中出了几次 正向词
})
id_count_words.columns = ['count_pos_word']
id_count_words.reset_index(inplace=True)
id_count_words = pd.DataFrame(id_count_words)
datas = pd.merge(datas, id_count_words[['id', 'count_pos_word']],
on=['id'], how='left')
# 连接 针对原始训练文本处理
# datas['labels'].fillna("remove", inplace=True)
if fit:
datas['solve'].fillna(-1, inplace=True)
datas_labels = datas.dropna(subset=['labels'])
datas_labels['index'] = datas_labels.index.values
datas_labels = datas_labels[['id', 'chat_id', 'labels', 'index', 'count_pos_word']]
def link_text(indexs):
start_index = -1.0
end_index = -1.0
solve = -1.0
index = indexs
while start_index == -1.0:
if datas.iloc[index].solve != -1.0:
solve = datas.iloc[index].solve
start_index = index
index = index - 1
index = indexs + 1
while end_index == -1.0:
if datas.iloc[index].solve != -1.0:
end_index = index
index = index + 1
text = ''
for i in range(start_index, end_index + 1):
text = text + " " + datas.iloc[i].text
return text + str(solve)
datas_labels['text'] = datas_labels['index'].apply(
lambda x: link_text(x)
)
else:
error_labels_index = list(datas.dropna(subset=['labels']).index.values)
labels_keywords = pd.read_excel('../test_data/label_keywords.xlsx')
mark_words = ['没问题', '不客气', '仅限', '有的', '是的', '好的', '可以',
'不了', '不到', '谢谢', '对的', '没空', '不错', '没车',
'到店', '没呢', '清楚', '明白', '确认', '没法', '不到',
'了解', '都是', '还没', '比较', '地址', '不多', '没有',
'放心', '嗯', '恩', '行', '没', '有']
# 针对标签制定逻辑(错标)
def bad_mark(index):
# 针对错误标记
chart_id = datas.loc[index]['id']
labels = datas.loc[index]['labels']
if type(labels) != str:
# 会出现NAN的原因是下面的逻辑设置
return
# 如果下文聊天框ID不同则删除该标签
try:
if str(chart_id) != str(datas.loc[index + 5]['id']):
datas.loc[datas.index == index, 'labels'] = np.NaN
return
except:
return
# 如果标签对应文本长度过长则删除该标签
if len(datas.loc[index]['text']) > 26:
datas.loc[datas.index == index, 'labels'] = np.NaN
return
# 若连续出现标签,则放弃连续出现的标签
if (index + 1) in error_labels_index:
datas.loc[datas.index == index, 'labels'] = np.NaN
datas.loc[datas.index == index + 1, 'labels'] = np.NaN
return
# 如果文本对应的标签数目大于三个以上则放弃该标签
if len(datas.loc[index]['labels'].split(',')) > 3:
datas.loc[datas.index == index, 'labels'] = np.NaN
return
# 三个标签文本则保留下文中有对应关键词出现的文本
if len(datas.loc[index]['labels'].split(',')) >= 2:
labels = labels.split(',')
# 将下文四行文本连接
text = datas.loc[index:(index + 4), 'text'].values.sum()
new_labels = []
for label in labels:
if label == '蜘蛛智选':
continue
label_keywords = labels_keywords.loc[labels_keywords['tag_lv2'] == label, '关键词'].values
for word in label_keywords:
if word in text:
new_labels.append(label)
break
new_labels = ','.join(i for i in new_labels)
datas.loc[datas.index == index, 'labels'] = new_labels
return
# 如果标签下文中出现了标签对应关键词则保留该标签
label_keywords = labels_keywords.loc[labels_keywords['tag_lv2'] == labels, '关键词'].values
for i in range(1, 6):
text = datas.loc[index + i]['text']
lk_num = 0
mk_num = 0
if type(text) != str:
continue
if lk_num == 0:
for word in label_keywords:
if word in text:
lk_num = 1
if mk_num == 0:
for word in mark_words:
if word in text:
mk_num = 1
if lk_num + mk_num == 2:
return
datas.loc[datas.index == index, 'labels'] = np.NaN
return
for index in error_labels_index:
bad_mark(index)
def omit_mark():
# 针对遗漏标记做处理
# 去除文本中的标签文本(包含标签对应的下文五行内容)
omit_label_index = list(datas.dropna(subset=['labels'])['labels'].index.values)
drop_indexs = [i + j for i in omit_label_index for j in range(5)]
drop_indexs = sorted(list(set(drop_indexs)))
drop_datas = datas.drop(index=drop_indexs[:-5])
drop_ids = drop_datas['id'].unique()
groups_datas = pd.DataFrame({})
def add_labels(texts):
if len(texts) > 50 or len(texts) <= 4:
return np.NaN
for lk_indexs, word in enumerate(labels_keywords['关键词']):
# index的作用是找到word对应的label也就是tag_lv2
if word in str(texts):
labels = labels_keywords.loc[lk_indexs]['tag_lv2']
return labels
return np.NaN
def drop_labels(labels, drop_index):
# labels为空直接返回无需操作
if type(labels) != str:
return np.NaN
# 确保不要出现连续的标签以第一个出现标签的文本为准
try:
if drop_index > 1 and type(group_datas.loc[drop_index - 1, 'labels']) == str:
return np.NaN
except:
return labels
role = group_datas.loc[drop_index]['role']
# role为MEMBER且label不为空
# 则判断后续文本行的role是否还是MEMBER
try:
# 这里会报错的原因是drop_index是最后一个
# 加1的话则超出了group_datas的界限
if role != 'CUSTOMER':
if group_datas.loc[drop_index + 1]['role'] != 'CUSTOMER':
return np.NaN
else:
role_num = 1
for i in range(1, 3):
if group_datas.loc[drop_index + i]['role'] == role:
role_num += 1
# 如果标签文本对应的role连续三行都一致则返回NaN
if role_num == 3:
return np.NaN
except:
return np.NaN
return labels
# 将聊天框ID为一组数据进行处理
# 首先解决的是为包含关键词的 行文本 添加对应关键词标签
# 其次解决的是有标签的行文本是否值得保留标签
for drop_id in drop_ids:
group_datas = drop_datas.loc[drop_datas['id'] == drop_id]
group_datas['index'] = group_datas.index.values
# 初选label
group_datas['labels'] = group_datas.apply(
lambda x: add_labels(x['text']), axis=1
)
group_datas['drop_labels'] = group_datas['labels']
# 现将group_datas.index按照长度设置之后需要将他的index变为原样
group_datas['labels'] = group_datas.apply(
lambda x: drop_labels(x['drop_labels'], x['index']), axis=1
)
group_datas.drop(['drop_labels'], axis=1, inplace=True)
# 其次下文三行内没有相关关键词和结束词的不要
drop_index_3 = list(group_datas.dropna(subset=['labels']).index.values)
for d_index3 in drop_index_3:
mk_num = 0
lk_num = 0
label = group_datas.loc[d_index3]['labels']
label_keywords = labels_keywords.loc[labels_keywords['tag_lv2'] == label]['关键词'].values
try:
for d_num in range(1, 4):
text = str(group_datas.loc[d_index3 + d_num]['text'])
if lk_num <= 1:
for lk in label_keywords:
if lk in text:
lk_num += 1
if mk_num <= 1:
for mw in mark_words:
if mw in text:
mk_num += 1
if (lk_num + mk_num) == 2:
break
if (lk_num + mk_num) != 2:
group_datas.loc[group_datas.index == d_index3, 'labels'] = np.NaN
except:
group_datas.loc[group_datas.index == d_index3, 'labels'] = np.NaN
# 将以聊天框ID为维度的数据新添加的标签合并到数据中
group_datas = group_datas[['chat_id', 'labels']]
group_datas.columns = ['chat_id', 'new_labels']
group_datas.dropna(subset=['new_labels'], inplace=True)
groups_datas = pd.concat([groups_datas, group_datas], sort=True)
return groups_datas
merge_datas = omit_mark()
datas = pd.merge(datas, merge_datas, on=['chat_id'], how='left')
datas.to_excel('C:\\Users\\tzl17\\Desktop\\show.xlsx')
def choose_labels(labels, new_labels):
if type(labels) == str:
return labels
if type(new_labels) == str:
return new_labels
return np.NaN
datas['labels'] = datas.apply(
lambda x: choose_labels(x['labels'], x['new_labels']), axis=1
)
datas.drop('new_labels', axis=1, inplace=True)
datas_labels = datas.dropna(subset=['labels'])
datas_labels['index'] = datas_labels.index.values
datas_labels = datas_labels[['id', 'chat_id', 'labels', 'index', 'count_pos_word']]
def link_text(label_index):
mark_num = 1
text = str(datas.loc[label_index]['text'])
chat_id = datas.loc[label_index]['id']
solve = str(datas.loc[label_index]['solve'])
index = label_index
while mark_num <= 5:
# 限制长度, 最硬性的标准如果超过了则直接反回
if len(text) > 130:
text = text[:126]
return text + str(solve)
index = index + 1
# 如果标签对应文本的下行文本不属于同一个id则直接返回None
# 如果不是下一行则返回当前连接的text
if datas.loc[index]['id'] != chat_id:
if index == label_index + 1:
return None
else:
return text + solve
text_pro = datas.loc[index]['text']
try:
text_pro2 = datas.loc[index + 1]['text']
# 判断文本中是否出现mark_words做出相应的处理
text = text + ' ' + text_pro
for mark_word in mark_words:
if mark_word in text_pro:
# 如果文本中出现了mark_words则直接连接到后面
mark_word2_num = 0
for mark_word2 in mark_words:
if mark_word2 in text_pro2:
mark_word2_num = 1
break
if mark_word2_num == 0:
return text + solve
else:
break
mark_num = mark_num + 1
except:
return None
return text + solve
datas_labels['text'] = datas_labels['index'].apply(
lambda x: link_text(x)
)
# 由于部分文本的solve并不是对应的标签所处的列因此需要特殊处理
# solve所处的位置为text的最后三位
datas_labels.dropna(subset=['text'], inplace=True)
datas_labels['solve'] = datas_labels['text'].apply(
lambda x: x[-3:]
)
datas_labels['text'] = datas_labels['text'].apply(
lambda x: x[:-3]
)
# 拆分, 分配, 连接(多标签)
# 找到并将多标签文本单独拎出来
more_labels_df = datas_labels.loc[datas_labels['labels'].str.contains(','), :]
# 剔除多标签文本保留单标签文本
datas_labels.loc[datas_labels['labels'].str.contains(','), 'text'] = np.nan
datas_labels.dropna(subset=['text'], inplace=True)
more_labels_df['labels'] = more_labels_df['labels'].apply(
lambda x: x.split(',') if ',' in x
else x
)
df = pd.DataFrame({'text': more_labels_df.text.repeat(more_labels_df.labels.str.len()),
'count_pos_word': more_labels_df.count_pos_word.repeat(more_labels_df.labels.str.len()),
'id': more_labels_df.solve.repeat(more_labels_df.labels.str.len()),
'chat_id': more_labels_df.chat_id.repeat(more_labels_df.labels.str.len()),
'solve': more_labels_df.solve.repeat(more_labels_df.labels.str.len()),
'labels': np.concatenate(more_labels_df.labels.values)})
df_one_label = datas_labels
# 标签只有一个的文本
df_more_labels = df
# 标签有多个的文本
def shuffle_text(text):
# 将对应多个标签的文本内容打乱顺序
text = text.split(' ')
index = np.arange(len(text))
np.random.shuffle(index)
text = "".join(text[i] for i in index).strip()
return text
df_more_labels['text'] = df_more_labels.apply(
lambda x: shuffle_text(x['text']), axis=1
)
df = pd.concat([df_one_label, df_more_labels], ignore_index=True)
datas_labels = pd.concat([datas_labels, df], ignore_index=True)
datas_labels.drop_duplicates(subset=['text', 'labels'], keep='first', inplace=True)
datas_labels.drop(columns='index', inplace=True)
datas_labels = datas_labels.sample(frac=1.0)
datas_labels['count_pos_word'].fillna(0, inplace=True)
def end_link(text, labels, pos_count):
text = text + ' ' + str(labels) + ' ' + str(pos_count)
return text
datas_labels['text'] = datas_labels.apply(
lambda x: end_link(x['text'], x['labels'], x['count_pos_word']), axis=1)
return datas_labels
def engineer_datas(datas, fit=False):
datas = feature_engineering(datas, fit)
datas['text_size'] = datas['text'].apply(
lambda x: np.nan if len(x) > 130 else len(x)
)
datas.dropna(subset=['text_size'], inplace=True)
return datas
|
13,382 | 75aa706fb750b8f3b10f85d0797ada2feac394f3 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='LogEvent',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID', primary_key=True, auto_created=True)),
('source', models.CharField(max_length=64)),
('action', models.CharField(max_length=64)),
('eventCode', models.IntegerField()),
('date', models.DateField()),
('time', models.CharField(max_length=10)),
('message', models.TextField()),
],
),
]
|
13,383 | 0fcf28a1944be3bce49ae820ae48de01916d6f81 | import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BOARD)
while True:
GPIO.setup(13, GPIO.OUT, initial=GPIO.LOW)
|
13,384 | 6f5977687e33ffa0280e67f8b11e0386ec4f9fe0 |
# coding: utf-8
# In[ ]:
from torch.utils.data import Dataset, DataLoader
import os
import torchvision
import torch
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
from torchvision.models import resnet18, alexnet
import PIL
from torchlars import LARS
import cv2
import numpy as np
##################################################### Training G_phi & C_psi (classifier) ###########################################
np.random.seed(0)
torch.manual_seed(0)
CHECKPOINT_DIR = "../Models/"
dev = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
BATCH_SIZE = 64
FEATURE_DIM = 256
IMAGE_SIZE = 32
CLASSES = 10
beta = 0.01
M = 20000
W = 5
src_path = ''
target_path = ''
class Backbone(nn.Module):
def __init__(self):
super().__init__()
def forward(self):
pass
@property
def out_features(self):
"""Output feature dimension."""
if self.__dict__.get('_out_features') is None:
return None
return self._out_features
class Convolution(nn.Module):
def __init__(self, c_in, c_out):
super().__init__()
self.conv = nn.Conv2d(c_in, c_out, 3, stride=1, padding=1)
self.relu = nn.ReLU(True)
def forward(self, x):
return self.relu(self.conv(x))
class ConvNet(Backbone):
def __init__(self, c_hidden=64):
super().__init__()
self.conv1 = Convolution(3, c_hidden)
self.conv2 = Convolution(c_hidden, c_hidden)
self.conv3 = Convolution(c_hidden, c_hidden)
self.conv4 = Convolution(c_hidden, c_hidden)
self._out_features = 2**2 * c_hidden
def _check_input(self, x):
H, W = x.shape[2:]
assert H == 32 and W == 32, 'Input to network must be 32x32, ' 'but got {}x{}'.format(H, W)
def forward(self, x):
self._check_input(x)
x = self.conv1(x)
x = F.max_pool2d(x, 2)
x = self.conv2(x)
x = F.max_pool2d(x, 2)
x = self.conv3(x)
x = F.max_pool2d(x, 2)
x = self.conv4(x)
x = F.max_pool2d(x, 2)
return x.view(x.size(0), -1)
class DGdata(Dataset):
def __init__(self, root_dir, image_size, domains=None, transform = None):
self.root_dir = root_dir
if root_dir[-1] != "/":
self.root_dir = self.root_dir + "/"
self.categories = ['0', '1', '2', '3', '4', '5', '6','7', '8', '9']
if domains is None:
self.domains = ["mnist", "mnist_m", "svhn", "syn"]
else:
self.domains = domains
if transform is None:
self.transform = transforms.ToTensor()
else:
self.transform = transform
# make a list of all the files in the root_dir
# and read the labels
self.img_files = []
self.labels = []
self.domain_labels = []
for domain in self.domains:
for category in self.categories:
for image in os.listdir(self.root_dir+domain+'/'+category):
self.img_files.append(image)
self.labels.append(self.categories.index(category))
self.domain_labels.append(self.domains.index(domain))
def __len__(self):
return len(self.img_files)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
img_path = self.root_dir + self.domains[self.domain_labels[idx]] + "/" + self.categories[self.labels[idx]] + "/" + self.img_files[idx]
image = PIL.Image.open(img_path)
label = self.labels[idx]
return self.transform(image), label
class Flatten(nn.Module):
def forward(self, input):
return input.view(input.size(0), -1)
class UnFlatten(nn.Module):
def forward(self, input, size=64):
return input.view(input.size(0), size, 1, 1)
class VAE_Digits(nn.Module):
def __init__(self, image_channels=1, h_dim=64, z_dim=32):
super(VAE_Digits, self).__init__()
self.encoder = nn.Sequential(
nn.Conv2d(image_channels, 4, kernel_size=3, stride=2,padding=1),
nn.ReLU(),
nn.Conv2d(4, 8, kernel_size=3, stride=2,padding=1),
nn.ReLU(),
nn.Conv2d(8, 16, kernel_size=3, stride=2,padding=1),
nn.ReLU(),
Flatten()
)
self.fc1 = nn.Linear(h_dim, z_dim)
self.fc2 = nn.Linear(h_dim, z_dim)
self.fc3 = nn.Linear(z_dim, h_dim)
self.decoder = nn.Sequential(
UnFlatten(),
nn.ConvTranspose2d(h_dim, 16, kernel_size=2, stride=1),
nn.ReLU(),
nn.ConvTranspose2d(16, 8, kernel_size=2, stride=2),
nn.ReLU(),
nn.ConvTranspose2d(8, 4, kernel_size=2, stride=2),
nn.Sigmoid(),
)
def reparameterize(self, mu, logvar):
std = torch.exp(0.5*logvar)
eps = torch.randn_like(std)
z = mu + eps*std
return z
def bottleneck(self, h):
mu, logvar = self.fc1(h), self.fc2(h)
z = self.reparameterize(mu, logvar)
return z, mu, logvar
def encode(self, x):
x = x.view(-1, 1, 16,16)
h = self.encoder(x)
z, mu, logvar = self.bottleneck(h)
return z, mu, logvar
def decode(self, z):
z = self.fc3(z)
z = self.decoder(z)
return z.view(-1, 1, 16,16)
def forward(self, x):
z, mu, logvar = self.encode(x)
z = self.decode(z)
return z, mu, logvar
digits_fnet = ConvNet(c_hidden=64)
checkpoint = torch.load('../Models/digits_fnet.pt')
digits_fnet.load_state_dict(checkpoint['model_state_dict'])
digits_fnet = digits_fnet.to(dev)
layers = []
layers.append(nn.Linear(FEATURE_DIM, CLASSES))
classifier = torch.nn.Sequential(*layers).to(dev)
CELoss = nn.CrossEntropyLoss()
classifier = classifier.to(dev)
data_transforms = transforms.Compose([transforms.RandomResizedCrop(size=IMAGE_SIZE), transforms.ToTensor()] )
ds = DGdata(".", IMAGE_SIZE, [src_path], transform=data_transforms)
dataloader = DataLoader(ds, batch_size=64, shuffle=True, num_workers = 4)
digits_fnet.eval()
opt = torch.optim.Adam(classifier.parameters(), lr=0.003)
for epoch in range(15):
step_wise_loss = []
step_wise_accuracy = []
for image_batch, labels in (dataloader):
image_batch = image_batch.float()
if dev is not None:
image_batch, labels = image_batch.to(dev), labels.to(dev)
# zero the parameter gradients
opt.zero_grad()
z = digits_fnet(image_batch).to(dev)
pred = classifier(z)
loss = CELoss(pred, labels)
accuracy = (pred.argmax(dim=1) == labels).float().sum()/pred.shape[0]
loss.backward()
opt.step()
step_wise_loss.append(loss.detach().cpu().numpy())
step_wise_accuracy.append(accuracy.detach().cpu().numpy())
print("Epoch " + str(epoch) + " Loss " + str(np.mean(step_wise_loss)) + " Accuracy " + str(np.mean(step_wise_accuracy)))
vae = VAE_Digits().to(dev)
VAEoptim = LARS(torch.optim.SGD(vae.parameters(), lr=0.005))
dataloader_vae = DataLoader(ds, batch_size=64, shuffle=True, num_workers = 4)
#modified loss
def loss_function(recon_x, x, mu, logvar):
l2 = F.mse_loss(recon_x, x.view(-1, 1, 16, 16), reduction='mean')
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
l1 = F.l1_loss(recon_x, x.view(-1, 1, 16, 16), reduction='mean')
return l1 + l2 + KLD
def trainVAE(epoch):
vae.train()
train_loss = 0
print(epoch)
for batch_idx, (image_batch, _) in enumerate(dataloader_vae):
image_batch = image_batch.float()
image_batch = image_batch.to(dev)
VAEoptim.zero_grad()
h = digits_fnet(image_batch).to(dev)
#print(h.shape)
h = h.view(-1, 1, 16,16)
#print(h.shape)
h=h.detach()
recon_batch, mu, logvar = vae(h)
loss = loss_function(recon_batch, h, mu, logvar)
loss.backward()
train_loss += loss.item()
VAEoptim.step()
print('====> Epoch: {} Average loss: {:.4f}'.format(
epoch, train_loss / len(dataloader_vae.dataset)))
for epoch in range(1, 150):
trainVAE(epoch)
if (epoch)%10 == 0:
torch.save({'epoch' : epoch,
'model_state_dict': vae.state_dict(),
'optimizer_state_dict': VAEoptim.state_dict()
}, CHECKPOINT_DIR+"VAEepoch_digits_"+str(epoch)+".pt")
############################################ inference - target projection ##############################################################
vae.eval()
data_transforms = transforms.Compose([transforms.RandomResizedCrop(size=IMAGE_SIZE), transforms.ToTensor()] )
test_data = DGdata(".", IMAGE_SIZE, [target_path], transform=data_transforms)
test_dataloader = DataLoader(test_data, batch_size=BATCH_SIZE, shuffle=True, num_workers = 4)
runs = 5
elbow = ptr
accuracy_per_run = []
for run in range(5):
print('run:',run)
step_wise_accuracy = []
for image_batch, labels in (test_dataloader):
image_batch = image_batch.float()
if dev is not None:
image_batch, labels = image_batch.to(dev), labels.to(dev)
h = digits_fnet(image_batch).to(dev)
h = h.detach()
batches = int(len(image_batch)/1)
for batch in (range(batches)):
lbl = labels[batch*1:(batch+1) * 1]
x_real = h[batch*1:(batch+1) * 1]
#print(x_real.shape)
no_1hot = lbl
lbl = F.one_hot(lbl, CLASSES).float()
zparam = torch.randn(1, 32).to(dev)
zparam = zparam.detach().requires_grad_(True)
zoptim = LARS(torch.optim.SGD([zparam], lr=beta,momentum=0.9, nesterov=True))
Uparam = []
L_s = []
for itr in range(0, M): ## projection
zoptim.zero_grad()
xhat = vae.decode(zparam).to(dev)
xhat = xhat.view(1, FEATURE_DIM)
x_real = x_real.view(1, FEATURE_DIM)
xhat = F.normalize(xhat, dim=1)
x_real = F.normalize(x_real, dim=1)
xhat = xhat.view(FEATURE_DIM)
x_real = x_real.view(FEATURE_DIM)
fnetloss = 1 - torch.dot(xhat,x_real)
fnetloss.backward()
zoptim.step()
l = fnetloss.detach().cpu().numpy()
u_param = zparam.detach().cpu().numpy()
L_s.append(l)
Uparam.append(u_param)
L_s = np.asarray(L_s)
Uparam = np.asarray(Uparam)
smooth_L_s = np.cumsum(np.insert(L_s, 0, 0))
s_vec = (smooth_L_s[W:] - smooth_L_s[:-W]) / W
double_derivative=[]
s_len=len(s_vec)
for i in range(1,s_len-1):
double_derivative.append(s_vec[i+1] + s_vec[i-1] - 2 * s_vec[i])
double_derivative=np.asarray(double_derivative)
zstar = torch.from_numpy(Uparam[np.argmax(double_derivative)])
z_in = vae.decode(zstar.to(dev))
z_in = z_in.view(-1, FEATURE_DIM)
pred = classifier(z_in.to(dev))
accuracy = (pred.argmax(dim=1) == no_1hot).float().sum()/pred.shape[0]
step_wise_accuracy.append(accuracy.detach().cpu().numpy())
print(np.mean(step_wise_accuracy))
accuracy_per_run.append(np.mean(step_wise_accuracy))
print(np.mean(accuracy_per_run))
|
13,385 | 87f9935695c845dc68248bd4b3d3867e3e7c3124 | from django.db import models
from django.contrib.postgres.fields import ArrayField, JSONField
from django.conf import settings
from courses.models import Section
from .validators import coursebin_validator, preference_validator, setting_validator
from copy import deepcopy
# Create your models here.
def createDefaultPreference():
return deepcopy({
"early_time": "10:00",
"early_weight": 75,
"late_time": "15:00",
"late_weight": 25,
"break_time": "00:10",
"break_weight": 50,
"reserved": [
{
"key": "#default0",
"from": "11:30",
"to": "12:30",
"wiggle": "01:00",
"weight": 50
},
{
"key": "#default1",
"from": "17:30",
"to": "18:30",
"wiggle": "01:00",
"weight": 50
}
]
})
def createDefaultSetting():
return deepcopy({
"course": "",
"term": settings.CURRENT_SEMESTER,
"toolsOpen": False,
"clearedSections": "",
"clearedOnly": False,
"excludeClosed": False,
"exemptedSections": "",
"savedOnly": True,
"publicOnly": False
})
class RequestData(models.Model):
coursebin = JSONField(validators=[coursebin_validator, ], default=list)
preference = JSONField(
validators=[preference_validator, ], default=createDefaultPreference)
setting = JSONField(
validators=[setting_validator, ], default=createDefaultSetting)
class Task(models.Model):
class Meta:
ordering = ['-created']
PENDING = 'PD'
PROCESSING = 'PS'
DONE = 'DN'
WARNING = 'WN'
ERROR = 'ER'
EXCEPT = 'EX'
STATUS_CHOICES = [
(PENDING, 'Pending'),
(PROCESSING, 'Processing'),
(DONE, 'Done'),
(WARNING, 'Waring'),
(ERROR, 'Error'),
(EXCEPT, 'Uncaught Exception'),
]
created = models.DateTimeField(auto_now_add=True)
status = models.CharField(max_length=2, choices=STATUS_CHOICES)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name="tasks",
on_delete=models.CASCADE,
blank=True, null=True
)
request_data = models.OneToOneField(
RequestData, related_name="task", on_delete=models.PROTECT)
name = models.CharField(max_length=100, blank=True, default="")
description = models.CharField(max_length=500, blank=True, default="")
message = models.CharField(max_length=200, null=True, default=None)
count = models.PositiveIntegerField(default=0)
class Schedule(models.Model):
class Meta:
ordering = ['-task__created', 'id']
early_score = models.FloatField()
late_score = models.FloatField()
break_score = models.FloatField()
reserved_score = models.FloatField()
total_score = models.FloatField()
task = models.ForeignKey(
Task, related_name='schedules', on_delete=models.PROTECT)
public = models.BooleanField(default=False)
sections = models.ManyToManyField(Section)
name = models.CharField(max_length=100, blank=True, default="")
description = models.CharField(max_length=200, blank=True, default="")
saved = models.BooleanField(default=False)
|
13,386 | 232e3c2bb3bc883579acd62a1cf44d29245c27f8 | import sys
print("Float value information: ", sys.float_info)
print("Integer value information: ", sys.int_info)
print("Maximum size of an integer: ", sys.maxsize)
|
13,387 | 9e406c7065f493f987d9d8f7d4a6e3d2bff18ede | from myChecks.checkov.terraform.context_parsers.parsers import *
|
13,388 | e32865e468a86383268583280f682a31d4f396d9 | # binary route table
import struct
import socket
def bin_iter(i, start_bit, end_bit):
while start_bit > end_bit:
start_bit -= 1
yield (i >> start_bit) & 1
def ip2int(ip_str):
return struct.unpack('!I', socket.inet_aton(ip_str))[0]
def ip6int(ip6_str):
high64, low64 = struct.unpack('!QQ', socket.inet_pton(socket.AF_INET6, ip6_str))
return (high64 << 64) + low64
def get_natural_netmask(ip_int):
high8 = (ip_int >> 24) & 255
if 1 <= high8 < 127:
return 8
elif 128 <= high8 < 192:
return 16
elif 192 <= high8 < 224:
return 24
else:
return 32
def network2int(ip_network):
info = ip_network.split('/')
ip_int = ip2int(info[0])
if len(info) == 2:
prefix_len = int(info[1])
else:
prefix_len = get_natural_netmask(ip_int)
return ip_int, prefix_len
def ipv4_bin_iter(ip_network, isNetwork=False):
ip_int, prefix_len = network2int(ip_network)
if not isNetwork:
prefix_len = 32
return bin_iter(ip_int, 32, 32-prefix_len)
def ipv4_bin_iter_l(ip_network, isNetwork=False):
ip_int, prefix_len = network2int(ip_network)
if not isNetwork:
prefix_len = 32
print(prefix_len)
return bin_iter(ip_int, 32, 32-prefix_len)
def network6int(ip_network):
info = ip_network.split('/')
ip_int = ip6int(info[0])
if len(info) == 2:
prefix_len = int(info[1])
else:
prefix_len = 64
return ip_int, prefix_len
def ipv6_bin_iter(ip_network, isNetwork=False):
ip_int, prefix_len = network6int(ip_network)
if not isNetwork:
prefix_len = 128
return bin_iter(ip_int, 128, 128-prefix_len)
class RouteNode(object):
def __init__(self, indexes=None):
if indexes is None:
self.indexes = []
else:
self.indexes = indexes
self.children = [None, None]
class BinaryRouteTable(object):
def __init__(self, version=4):
self.root = RouteNode()
if version == 4:
self.ip_bin_iter = ipv4_bin_iter
else:
self.ip_bin_iter = ipv6_bin_iter
def add(self, ip_network, index):
node = self.root
for b in self.ip_bin_iter(ip_network, isNetwork=True):
if node.children[b] is None:
node.children[b] = RouteNode()
node = node.children[b]
node.indexes.append(index)
def lookup(self, ip):
node = self.root
indexes = []
for b in self.ip_bin_iter(ip, isNetwork=False):
if node.indexes:
indexes = node.indexes
if node.children[b] is None:
return indexes
node = node.children[b]
return indexes # just in case
def lookup_len(self, ip):
node = self.root
indexes = []
l = 0
for b in self.ip_bin_iter(ip, isNetwork=False):
if node.indexes:
indexes = node.indexes
if node.children[b] is None:
return l, indexes
node = node.children[b]
l += 1
return l, indexes # just in case
class ExtendRouteTable(BinaryRouteTable):
def __init__(self, version=4):
super().__init__(version)
self._entries = []
def add(self, ip_network, entry):
self._entries.append(entry)
index = len(self._entries) - 1
return super().add(ip_network, index)
def lookup(self, ip, isNetwork=False):
indexes = super().lookup(ip)
return [self._entries[ix] for ix in indexes]
def lookup_len(self, ip, isNetwork=False):
l, indexes = super().lookup_len(ip)
return l, [self._entries[ix] for ix in indexes]
def build_rt(rt_path, ip_version):
rt = ExtendRouteTable(ip_version)
with open(rt_path, 'r') as f:
next(f)
next(f) # skip header
for line in f:
route = line.split('|')
rt.add(route[1], route[6])
return rt
if __name__ == '__main__':
rt = build_rt('/path/to/route_table', 4)
asp = rt.lookup('1.1.1.0/24', isNetwork=True)
print(asp)
|
13,389 | ffb6f2da3262c10d21621adf6f06eb2576c88800 | def rope(n):
if n <= 0:
return 0
else:
return 1 + rope(n-1) + rope(n-2) + rope(n-5)
print(rope(5)) |
13,390 | 8559afa6ef9f4a3132cc80c76ae748dd24a990b4 | import common
lines = common.read_file('2020/05/data.txt').splitlines()
lines = list(map(lambda x: x.replace('F', '0').replace('B', '1').replace('L', '0').replace('R', '1'), lines))
nums = list(map(lambda x: int(x, base=2), lines))
# part 1
max_id = max(nums)
print(max_id)
# part 2
for i in range(min(nums), max(nums)+1):
if i not in nums:
print(i)
break
|
13,391 | 5a9b816e556e0bb9b9cb95484cb271a1cbf7b66f | import numpy as np
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# Save reference to the table
Measurement = Base.classes.measurement
Station = Base.classes.station
# Create our session (link) from Python to the DB
session = Session(engine)
#################################################
# Flask Setup
#################################################
app = Flask(__name__)
#################################################
# Flask Routes
#################################################
@app.route("/")
def welcome():
"""List all available api routes."""
return (
f"Available Routes:<br/>"
f"/api/v1.0/precipitation<br/>"
f"/api/v1.0/station<br/>"
f"/api/v1.0/tobs<br/>"
f"/api/v1.0/<start><br/>"
f"/api/v1.0/<start>/<end><br/>"
)
@app.route("/api/v1.0/precipitation")
def precipitation():
twelve_months = dt.date(2017, 8 ,23) - dt.timedelta(days=365)
precip_data = dict(session.query(measurement.date, measurement.tobs).\
filter(measurement.date >= twelve_months).all()
return jsonify(precip_data)
@app.route("/api/v1.0/station")
def station():
station_data = session.query(station.station).all()
stations = list(np.ravel(station_data))
return jsonify(stations)
@app.route("/api/v1.0/tobs")
def tobs()
tobs_data = session.query(measurement.tobs).\
filter(measurement.date >= twelve_months).all()
tobs = list(np.ravel(tobs_data))
return jsonify(tobs)
@app.route("/api/v1.0/temp/<start>")
def start(start):
data = engine.execute("select max(tobs), min(tobs), avg(tobs).\
from measurement.\
where date >= {start}")
titles = ('TMAX', 'TMIN', 'TAVG')
dic = {}
for i in x:
tup = i
break
for a,b in zip(y,tup):
dic[a]=b
return jsonify(dic)
@app.route("/api/v1.0/<start>/<end>")
def start_end(start,end):
x = engine.execute("select max(tobs), min(tobs), avg(tobs) from measurement where date >= '{0}' and date <= '{1}'".format(start,end))
y = ('TMAX','TMIN','TAVG')
dic = {}
for i in x:
tup = i
break
for a,b in zip(y,tup):
dic[a]=b
return jsonify(dic)
if __name__ == '__main__':
app.run(debug=True) |
13,392 | 3149591f5d319fe87b6f1a05bbb298e33a7bbe16 | from turtle import *
shape("turtle")
for x in range(6):
for i in range(360):
forward(2)
left(1)
left(45)
speed(0)
mainloop()
|
13,393 | 3a87ae5526ccd74fef89173f8c83d1730076a8a3 | import sys
from pizzabot.pizzabot import Pizzabot
from pizzabot.map import Map
from pizzabot.input_string_parser import InputStringParser
from pizzabot.console_output import ConsoleOutput
from pizzabot.sign_num_navigator import SignNumNavigator
if __name__ == '__main__':
pizzabot = Pizzabot(
InputStringParser(),
ConsoleOutput(),
Map(),
SignNumNavigator()
)
pizzabot.start(''.join(sys.argv[1:]))
|
13,394 | d036141e2a00442a5c930a5d47f445b3c0197308 | kredi1 = "hızlı kredi"
kredi2 = "maaşını halkbank alan"
kredi3 = "mutlu emekli"
print(kredi1)
print(kredi2)
print(kredi3)
krediler = ["hızlı kredi","maaşını halkbank alan","mutlu emekli"]
for kredi in krediler:
print(kredi)
|
13,395 | 179d76a05387b38536fb45a3235005479ab38d5a | #============= enthought library imports =======================
#============= standard library imports ========================
import time
#============= local library imports ==========================
from src.scripts.core.script_helper import smart_equilibrate#, equilibrate
from src.scripts.core.core_script import CoreScript
from bakeout_script_parser import BakeoutScriptParser
class BakeoutScript(CoreScript):
'''
G{classtree}
'''
parser_klass = BakeoutScriptParser
heat_ramp = 0
heat_scale = None
cool_ramp = 0
cool_scale = None
maintain_time = 1
controller = None
def get_documentation(self):
from src.scripts.core.html_builder import HTMLDoc, HTMLText
doc = HTMLDoc(attribs = 'bgcolor = "#ffffcc" text = "#000000"')
doc.add_heading('Bakeout Documentation', heading = 2, color = 'red')
doc.add_heading('Parameters', heading = 3)
doc.add_text('Setpoint (C), Duration (min)')
doc.add_list(['Setpoint (C) -- Bakeout Controller Setpoint',
'Duration (min) -- Heating duration'
])
table = doc.add_table(attribs = 'bgcolor="#D3D3D3" width="90%"')
r1 = HTMLText('Ex.', face = 'courier', size = '2')
table.add_row([r1])
r2 = HTMLText('150,360', face = 'courier', size = '2')
table.add_row([r2])
return str(doc)
def raw_statement(self, args):
#change the setpoint temp
if self.controller is not None:
self.controller.setpoint = float(args[0])
#wait for dur mins
self.wait(float(args[1]) * 60)
def kill_script(self):
if self.controller is not None:
self.controller.end(script_kill = True)
# super(BakeoutScript, self).kill_script()
CoreScript.kill_script(self)
def wait_for_setpoint(self, sp, mean_check = True, std_check = False, mean_tolerance = 5, std_tolerance = 1, timeout = 15):
'''
@type sp: C{str}
@param sp:
@type tolerance: C{str}
@param tolerance:
@type frequency: C{str}
@param frequency:
'''
self.info('waiting for setpoint equilibration')
if self.controller is not None:
if self.controller.simulation:
time.sleep(3)
else:
#equilibrate(sp, frequency = frequency, mean_check = True, mean_tolerance = tolerance)
smart_equilibrate(self,
self.controller.get_temperature,
sp,
mean_check = mean_check,
std_check = std_check,
mean_tolerance = mean_tolerance,
std_tolerance = std_tolerance,
timeout = timeout
)
def maintain(self, *args):
'''
'''
if self.controller is not None:
self.controller.led.state = 2
if self.controller.simulation:
mt = 3 / 60.
else:
mt = self.maintain_time
self.info('Maintaining setpoint for %s minutes' % mt)
mt *= 60
st = time.time()
while time.time() - st < mt and self.isAlive():
time.sleep(1)
def goto_setpoint(self, name):
'''
@type name: C{str}
@param name:
'''
controller = self.controller
r = getattr(self, '%s_ramp' % name)
sp = getattr(self, '%s_setpoint' % name)
s = getattr(self, '%s_scale' % name)
if controller is not None:
controller.led.state = -1
controller.ramp_to_setpoint(r, sp, s)
#wait until setpoint reached or ramping timed out
kw = dict()
if name == 'cool':
kw['mean_check'] = False
kw['std_check'] = True
kw['std_tolerance'] = 5
kw['timeout'] = 60
self.wait_for_setpoint(sp, **kw)
#============= EOF ====================================
# def load_file(self):
# '''
# '''
#
# def set_ramp(a, attr):
# scale = None
# if ',' in a:
# ramp, scale = a.split(',')
# else:
# ramp = a
# setattr(self, '%s_ramp' % attr, float(ramp))
# if scale is not None:
# setattr(self, '%s_scale' % attr, float(scale))
#
# f = self._file_contents_
# h, t = self.file_name.split('.')
# if t == 'bo':
# set_ramp(f[0], 'heat')
# self.heat_setpoint = float(f[1])
# self.maintain_time = int(f[2])
# set_ramp(f[3], 'cool')
# self.cool_setpoint = float(f[4])
# else:
# self.kind = 'step'
#
# return True
# def _execute_raw_line(self, line):
# args = line.split(',')
# sp = float(args[0])
# dur = 1
# if len(args) == 2:
# dur = float(args[1])
#
# #change the setpoint temp
## self.manager.setpoint = sp
#
# #wait for dur mins
# self.wait(dur * 60)
# def _run_(self):
# '''
# '''
# self.manager.led.state = 0
# if self.kind == 'ramp':
# for func, args in [(self.goto_setpoint, ('heat',)),
# (self.maintain, tuple()),
# (self.goto_setpoint, ('cool',)),
# # (self.manager.end, tuple())
# ]:
#
# if self.isAlive():
# func(*args)
# else:
# for i in self._file_contents_:
#
#
# args = i.split(',')
# sp = float(args[0])
# dur = 1
# if len(args) == 2:
# dur = float(args[1])
#
# self.manager.setpoint = sp
## if not self.isAlive():
## break
#
# self.wait(dur * 60)
## st = time.time()
## while time.time() - st < dur * 60.:
## if not self.isAlive():
## break
# #time.sleep(0.5)
|
13,396 | ff4a7f9d5984d7de65fc26b008011cd4da515fcc | from telegram.ext import CommandHandler
from ..language import get_text
def start_callback(update, context):
text = get_text("start", context)
update.message.reply_text(text)
start = CommandHandler("start", start_callback)
|
13,397 | 2a35c2aa2a5c4df1fd11bb222f390ee2d33bf5f0 | import sys
def find_dist(records):
x_0, y_0 = records[0]
dist = 0
for i in range(1, len(records)):
x_1, y_1 = records[i]
dist += ((x_0 - x_1)**2 + (y_0 - y_1)**2)**(1/2)
x_0, y_0 = x_1, y_1
return dist
n_t = sys.stdin.readline().split()
n = int(n_t[0])
t = float(n_t[1])
actual_run = []
GPS_record = []
t_prev = 0
t_gps = 0
# read the coords and timestamp
for i in range(n):
data = sys.stdin.readline().split()
# record coords in actual run list and find GPS coords after t sec
x = float(data[0])
y = float(data[1])
t_run = int(data[2])
actual_run.append((x,y))
x_0, y_0 = actual_run[i-1]
if i == 0:
continue
while t_gps <= t_run:
t_ratio = (t_gps - t_prev)/(t_run - t_prev)
x_gps = x_0 + t_ratio*(x - x_0)
y_gps = y_0 + t_ratio*(y - y_0)
GPS_record.append((x_gps, y_gps))
t_gps += t
t_prev = t_run
if GPS_record[-1] != actual_run[-1]:
GPS_record.append(actual_run[-1])
dist_actual = find_dist(actual_run)
dist_GPS = find_dist(GPS_record)
difference = abs(dist_actual - dist_GPS)
print((difference/dist_actual)*100) |
13,398 | 3da87b39e20c42601fbdcd893d2a642a65da239c | """
把只包含质因子2、3和5的数称作丑数(Ugly Number)。例如6、8都是丑数,但14不是,因为它包含质因子7。 习惯上我们把1当做是第一个丑数。求按从小到大的顺序的第N个丑数。
"""
class Solution:
def GetUglyNumber_Solution(self, index):
if not index:
return 0
ugly_number = [1] * index
next_index = 1
index2 = 0
index3 = 0
index5 = 0
while next_index < index:
minValue = min(ugly_number[index2] * 2, ugly_number[index3] * 3, ugly_number[index5] * 5)
ugly_number[next_index] = minValue
while ugly_number[index2] * 2 <= ugly_number[next_index]:
index2 += 1
while ugly_number[index3] * 3 <= ugly_number[next_index]:
index3 += 1
while ugly_number[index5] * 5 <= ugly_number[next_index]:
index5 += 1
next_index += 1
return ugly_number[-1]
|
13,399 | c34fd9577a31946e778aa665a17e238ac2a4ceeb | # adding a comment
list1 = list("hello")
print(list1) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.