index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
8,800 | 5ff0c6bde8f3ffcb1f5988b0bbd1dfdd7fa2e818 | # The project is based on Tensorflow's Text Generation with RNN tutorial
# Copyright Petros Demetrakopoulos 2020
import tensorflow as tf
import numpy as np
import os
import time
# The project is based on Tensorflow's Text Generation with RNN tutorial
# Copyright Petros Demetrakopoulos 2020
import tensorflow as tf
import numpy as np
import os
import time
from random import seed
from random import randint
import sys
import urllib.request
stopChars = [',', '(', ')', '.', '-', '[', ']', '"']
corpus_path = "/tmp/data.txt"
text = open(corpus_path, 'rb').read().decode(encoding='utf-8')
text = preprocessText(text)
corpus_words = corpusToList(text)
map(str.strip, corpus_words) # trim words
vocab = sorted(set(corpus_words))
print('Corpus length (in words):', len(corpus_words))
print('Unique words in corpus: {}'.format(len(vocab)))
word2idx = {u: i for i, u in enumerate(vocab)}
idx2words = np.array(vocab)
word_as_int = np.array([word2idx[c] for c in corpus_words])
# The maximum length sentence we want for a single input in words
seqLength = 10
examples_per_epoch = len(corpus_words)//(seqLength + 1)
# Create training examples / targets
wordDataset = tf.data.Dataset.from_tensor_slices(word_as_int)
# generating batches of 10 words each
sequencesOfWords = wordDataset.batch(seqLength + 1, drop_remainder=True)
def yuh():
corpus_path = "/tmp/data.txt"
text = open(corpus_path, 'rb').read().decode(encoding='utf-8')
text = preprocessText(text)
corpus_words = corpusToList(text)
map(str.strip, corpus_words) # trim words
vocab = sorted(set(corpus_words))
print('Corpus length (in words):', len(corpus_words))
print('Unique words in corpus: {}'.format(len(vocab)))
word2idx = {u: i for i, u in enumerate(vocab)}
idx2words = np.array(vocab)
word_as_int = np.array([word2idx[c] for c in corpus_words])
# The maximum length sentence we want for a single input in words
seqLength = 10
examples_per_epoch = len(corpus_words)//(seqLength + 1)
# Create training examples / targets
wordDataset = tf.data.Dataset.from_tensor_slices(word_as_int)
# generating batches of 10 words each
sequencesOfWords = wordDataset.batch(seqLength + 1, drop_remainder=True)
def preprocessText(text):
text = text.replace('\n', ' ').replace('\t', '')
processedText = text.lower()
for char in stopChars:
processedText = processedText.replace(char, ' ')
return processedText
def corpusToList(corpus):
corpusList = [w for w in corpus.split(' ')]
# removing empty strings from list
corpusList = [i for i in corpusList if i]
return corpusList
def split_input_target(chunk):
input_text = chunk[:-1]
target_text = chunk[1:]
return input_text, target_text
def loss(labels, logits):
return tf.keras.losses.sparse_categorical_crossentropy(labels, logits, from_logits=True)
def generateLyrics(model, startString, temp):
# Number of words to generate
num_generate = 30
# Converting our start string to numbers (vectorizing)
start_string_list = [w for w in startString.split(' ')]
input_eval = [word2idx[s] for s in start_string_list]
input_eval = tf.expand_dims(input_eval, 0)
text_generated = []
model.reset_states()
for i in range(num_generate):
predictions = model(input_eval)
predictions = tf.squeeze(predictions, 0)
predictions = predictions / temp
predicted_id = tf.random.categorical(
predictions, num_samples=1)[-1, 0].numpy()
input_eval = tf.expand_dims([predicted_id], 0)
text_generated.append(' ' + idx2words[predicted_id])
return (startString + ''.join(text_generated))
def doSomeWork(artist):
url = '''https://firebasestorage.googleapis.com/v0/b/shellhacks-327117.appspot.com/o/models%2Fkendrick.txt?alt=media&token=604b7b6c-2ef0-4611-ab6e-a08dd53e99be'''
urllib.request.urlretrieve(url, '/tmp/data.txt')
if artist == "kanye":
url = '''
https://firebasestorage.googleapis.com/v0/b/shellhacks-327117.appspot.com/o/models%2Fkanye.h5?alt=media&token=a0b94c61-e696-453d-9a16-110af66f6afd'''
if artist == "nas":
url = '''
https://firebasestorage.googleapis.com/v0/b/shellhacks-327117.appspot.com/o/models%2Fnas.h5?alt=media&token=037ef224-be5f-4449-a89c-c1897e164289'''
if artist == "biggie":
url = '''https://firebasestorage.googleapis.com/v0/b/shellhacks-327117.appspot.com/o/models%2Fbiggie.h5?alt=media&token=3244a8e2-017c-472f-a66b-7810a198d038'''
if artist == "jayz":
url = '''https://firebasestorage.googleapis.com/v0/b/shellhacks-327117.appspot.com/o/models%2Fjayz.h5?alt=media&token=500ff44d-60fe-4774-9c85-5ea6f06da81b'''
if artist == "ross" or artist == "kendrick" or artist == "50cent":
url = '''
https://firebasestorage.googleapis.com/v0/b/shellhacks-327117.appspot.com/o/models%2Fkendrick.h5?alt=media&token=6ceff75d-5a71-49d4-b927-e727888d872f
'''
named = "/tmp/" + artist + ".h5"
if (artist == "biggie") or artist == "50cent":
named = "/tmp/kendrick" + ".h5"
urllib.request.urlretrieve(url, named)
yuh()
model = tf.keras.models.load_model(named)
seed(1)
input_str = vocab[randint(0, len(vocab))]
lyricz = []
for i in range(10):
lyrics = generateLyrics(model, startString=input_str, temp=0.6)
temp = lyrics.replace("nigga", "homie").replace("niggas", "homies").replace("nigger", "homie").replace(
"niggers", "homies").replace("faggot", "maggot").replace("fag", "mag").replace('\r', '')
lyricz.append(lyrics.replace("nigga", "homie").replace('\r', ''))
input_str = temp.split()[-1]
return jsonify({
"Success": "It worked",
"Url": " ".join(lyricz)
})
|
8,801 | 9276c4106cbe52cf0e2939b5434d63109910a45c | from pymoo.model.duplicate import ElementwiseDuplicateElimination
class ChrDuplicates(ElementwiseDuplicateElimination):
"""Detects duplicate chromosome, which the base ElementwiseDuplicateElimination then removes."""
def is_equal(self, a, b):
"""
Checks whether two character chromosome elements are equal.
This is provided for fullness of the system - the core implementation is within the
Optimisation.Chromosome.__eq__ method overwrite.
:param a: the first character chromosome to compare
:type a: Optimisation.Chromosome
:param b: the second character chromosome to compare
:type b: Optimisation.Chromosome
:return: a boolean stating whether they're equal
"""
return a == b
|
8,802 | cc6e827eec5256ce0dbe13958b6178c59bcd94a7 | from scipy.stats import rv_discrete
import torch
import torch.nn.functional as F
import numpy as np
from utils import *
def greedy_max(doc_length,px,sentence_embed,sentences,device,sentence_lengths,length_limit=200,lamb=0.2):
'''
prob: sum should be 1
sentence embed: [doc_length, embed_dim]
'''
x = list(range(doc_length))
px = px.cpu().numpy()
score=px
prob = 1
summary_representation = []
bias = np.ones(px.shape)
selected = []
wc=0
lengths=[]
summary = []
while wc<=length_limit:
sample = np.argmax(score)
selected.append(sample)
wc+=sentence_lengths[sample]
lengths.append(sentence_lengths[sample])
summary.append(sentences[sample])
summary_representation.append(sentence_embed[sample])
s = torch.stack(summary_representation,1).unsqueeze(0)
all_sent = sentence_embed[:doc_length,:].unsqueeze(2)
redundancy_score =torch.max(F.cosine_similarity(all_sent,s,1),1)[0].cpu().numpy()
score = lamb*px - ((1-lamb)*redundancy_score) + (1-lamb)*bias
for i_sel in selected:
score[i_sel] = 0
# print(len(selected))
summary ='\n'.join(summary)
# summary_representation= summary_representation.to(device)
return summary, prob, selected
def greedy_nommr(doc_length,px,sentence_embed,sentences,device,sentence_lengths,length_limit=200,lamb=0.2):
'''
prob: sum should be 1
sentence embed: [doc_length, embed_dim]
'''
x = list(range(doc_length))
px = px.cpu().numpy()
score=px
prob = 1
bias = np.ones(px.shape)
summary_representation = []
selected = []
wc=0
lengths = []
summary=[]
while wc<=length_limit:
sample = np.argmax(score)
selected.append(sample)
wc+=sentence_lengths[sample]
lengths.append(sentence_lengths[sample])
summary.append(sentences[sample])
for i_sel in selected:
score[i_sel] = 0
summary = '\n'.join(summary)
return summary, prob, selected
def compute_reward(score_batch,input_lengths,output,sentences_batch,reference_batch,device,sentence_lengths_batch,number_of_sample=5,lamb=0.1):
reward_batch = []
rl_label_batch = torch.zeros(output.size()[:2]).unsqueeze(2)
for i_data in range(len(input_lengths)):
# summary_i = summary_embed[i_data]
doc_length = input_lengths[i_data]
scores = score_batch[i_data,:doc_length]
sentence_lengths = sentence_lengths_batch[i_data]
sentence_embed = output[:doc_length,i_data,:]
sentences = sentences_batch[i_data]
reference = reference_batch[i_data]
# final_choice = None
result,prob,selected = greedy_nommr(doc_length,scores,sentence_embed,sentences,device,sentence_lengths,lamb = lamb)
reward_greedy = get_rouge_single(result,reference)
result,prob,selected = greedy_max(doc_length,scores,sentence_embed,sentences,device,sentence_lengths,lamb = lamb)
reward_hi = get_rouge_single(result,reference)
final_choice = selected
# print(reward_hi-reward_greedy)
reward_batch.append(reward_hi-reward_greedy)
rl_label_batch[final_choice,i_data,:] = 1
reward_batch = torch.FloatTensor(reward_batch).unsqueeze(0).to(device)
rl_label_batch = rl_label_batch.to(device)
reward_batch.requires_grad_(False)
return reward_batch,rl_label_batch
|
8,803 | e54078f21176bbb7accb4164e7b56633b13cc693 | import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
BATCH_START=0
TIME_STEPS=20
BATCH_SIZE=50
INPUT_SIZE=1
OUTPUT_SIZE=1
CELL_SIZE=10
LR=0.006
#generate data
def get_batch():
global BATCH_START,TIME_STEPS
xs=np.arange(BATCH_START,BATCH_START+TIME_STEPS*BATCH_SIZE).reshape((BATCH_SIZE,TIME_STEPS))/(10*np.pi)
seq=np.sin(xs)
res=np.cos(xs)
#data move one
BATCH_START+=TIME_STEPS
# all return shape is (batch_size,time_step,input_size)
return [seq[:,:,np.newaxis],res[:,:,np.newaxis],xs]
#def RNN LSTM Structure
class LSTMRNN(object):
def __init__(self,n_steps,input_size,output_size,cell_size,batch_size):
self.n_steps=n_steps
self.input_size=input_size
self.output_size=output_size
self.cell_size=cell_size
self.batch_size=batch_size
with tf.name_scope('inputs'):
self.xs=tf.placeholder(tf.float32,[None,n_steps,input_size],name='xs')
self.ys=tf.placeholder(tf.float32,[None,n_steps,input_size],name='ys')
with tf.variable_scope('in_hidden'):
self.add_input_layer()
with tf.variable_scope('LSTM_cell'):
self.add_cell()
with tf.variable_scope('out_hidden'):
self.add_output_layer()
with tf.name_scope('cost'):
self.compute_cost()
with tf.name_scope('train'):
self.train_op=tf.train.AdamOptimizer(LR).minimize(self.cost)
#add input layer
def add_input_layer(self):
#shape(batch,step,input)=>(batch*step,input)
l_in_x=tf.reshape(self.xs,[-1,self.input_size],name='2_2D')
Ws_in=self._weight_variable([self.input_size,self.cell_size])
bs_in=self._bias_variable([self.cell_size])
with tf.name_scope('Wx_plus_b'):
l_in_y=tf.matmul(l_in_x,Ws_in)+bs_in
self.l_in_y=tf.reshape(l_in_y,[-1,self.n_steps,self.cell_size],name='2_3D')
#add cell
def add_cell(self):
lstm_cell=tf.contrib.rnn.BasicLSTMCell(self.cell_size,forget_bias=1.0,state_is_tuple=True)
with tf.name_scope('initial_state'):
self.cell_init_state=lstm_cell.zero_state(self.batch_size,dtype=tf.float32)
self.cell_outputs,self.cell_final_state=tf.nn.dynamic_rnn(lstm_cell,self.l_in_y,initial_state=self.cell_init_state,time_major=False)
#add output layer
def add_output_layer(self):
l_out_x=tf.reshape(self.cell_outputs,[-1,self.cell_size],name='2_2D')
Ws_out=self._weight_variable([self.cell_size,self.output_size])
bs_out=self._bias_variable([self.output_size,])
with tf.name_scope('Wx_plus_b'):
self.pred=tf.matmul(l_out_x,Ws_out)+bs_out
def compute_cost(self):
losses=tf.contrib.legacy_seq2seq.sequence_loss_by_example(
[tf.reshape(self.pred,[-1],name='reshape_pred')],
[tf.reshape(self.ys,[-1],name='reshape_target')],
[tf.ones([self.batch_size*self.n_steps],dtype=tf.float32)],
average_across_timesteps=True,
softmax_loss_function=self.ms_error,
name='losses'
)
with tf.name_scope('average_cost'):
self.cost=tf.div(
tf.reduce_sum(losses,name='losses_sum'),
self.batch_size,
name='average_cost'
)
tf.summary.scalar('cost',self.cost)
@staticmethod
def ms_error(labels,logits):
return tf.square(tf.subtract(labels,logits))
def _weight_variable(self,shape,name='weights'):
initializer=tf.random_normal_initializer(mean=0.,stddev=1.,)
return tf.get_variable(shape=shape,initializer=initializer,name=name)
def _bias_variable(self,shape,name='biases'):
initializer=tf.constant_initializer(0.1)
return tf.get_variable(shape=shape,initializer=initializer,name=name)
#train
if __name__=='__main__':
model=LSTMRNN(TIME_STEPS,INPUT_SIZE,OUTPUT_SIZE,CELL_SIZE,BATCH_SIZE)
sess=tf.Session()
#merge for tensorboard
merged=tf.summary.merge_all()
writer=tf.summary.FileWriter("lstmlogs",sess.graph)
sess.run(tf.global_variables_initializer())
#visiable
plt.ion()
plt.show()
#train for 200
for i in range(200):
seq,res,xs=get_batch()
if i==0:
feed_dict={model.xs:seq,model.ys:res,}
else:
feed_dict={model.xs:seq,model.ys:res,model.cell_init_state:state}
#train
_,cost,state,pred=sess.run([model.train_op,model.cost,model.cell_final_state,model.pred],feed_dict=feed_dict)
#plotting
plt.plot(xs[0,:],res[0].flatten(),'r',xs[0,:],pred.flatten()[:TIME_STEPS],'b--')
plt.ylim((-1.2,1.2))
plt.draw()
plt.pause(0.3)
if i%20==0:
# 4
print('cost',round(cost,4))
result=sess.run(merged,feed_dict)
writer.add_summary(result,i)
|
8,804 | b3cb94a44f64091714650efb81c4cad27b211cef | import os
import math
import shutil
from evoplotter import utils
from evoplotter.dims import *
from evoplotter import printer
import numpy as np
CHECK_CORRECTNESS_OF_FILES = 1
STATUS_FILE_NAME = "results/status.txt"
OPT_SOLUTIONS_FILE_NAME = "opt_solutions.txt"
class TableGenerator:
"""Generates table from data."""
def __init__(self, f_cell, dim_rows, dim_cols, headerRowNames, title="", color_scheme=None,
table_postprocessor=None, vertical_border=1, table_variants=None,
default_color_thresholds=None, layered_headline=True,
only_nonempty_rows=True, **kwargs):
self.f_cell = f_cell
self.dim_rows = dim_rows
self.dim_cols = dim_cols
self.title = title
self.color_scheme = color_scheme
self.table_postprocessor = table_postprocessor
self.vertical_border = vertical_border
self.headerRowNames = headerRowNames
# create a table for each variant and put them next to each other
self.table_variants = table_variants if table_variants is not None else [lambda p: True]
self.default_color_thresholds = default_color_thresholds
self.layered_headline = layered_headline
self.only_nonempty_rows = only_nonempty_rows
self.init_kwargs = kwargs.copy()
def apply(self, props, new_color_thresholds=None):
text = ""
for variant in self.table_variants: # each variant is some predicate on data
props_variant = [p for p in props if variant(p)]
if self.only_nonempty_rows:
dim_rows_variant = Dim([c for c in self.dim_rows.configs if len(c.filter_props(props_variant)) > 0])
else:
dim_rows_variant = self.dim_rows
txt = printer.latex_table(props_variant, dim_rows_variant, self.dim_cols, self.f_cell,
layered_headline=self.layered_headline, vertical_border=self.vertical_border,
headerRowNames=self.headerRowNames, **self.init_kwargs)
txt = self.table_postprocessor(txt)
ct = new_color_thresholds if new_color_thresholds is not None else self.default_color_thresholds
if self.color_scheme is not None and ct is not None:
cv0, cv1, cv2 = ct
txt = printer.table_color_map(txt, cv0, cv1, cv2, "colorLow", "colorMedium", "colorHigh")
text += r"\noindent"
text += txt
return text
class Experiment:
def __init__(self):
self.tables = []
self.listings = []
def delete_logs(props, pred, verbose=True, simulate=False):
for p in props:
if "evoplotter.file" in p and pred(p):
path = p["evoplotter.file"]
if not simulate:
os.remove(path)
if verbose:
print("File removed: {0}".format(path))
def print_props_filenames(props):
for p in props:
if "thisFileName" in p:
print(p["thisFileName"])
else:
print("'thisFileName' not specified! Printing content instead: " + str(p))
def create_errors_listing(error_props, filename):
f = open("results/listings/{0}".format(filename), "w")
print("Creating log of errors ({0})...".format(filename))
for i, p in enumerate(error_props):
if i > 0:
f.write("\n" + ("-" * 50) + "\n")
for k in sorted(p.keys()):
v = p[k]
f.write("{0} = {1}\n".format(k, v))
f.close()
def create_errors_solver_listing(error_props, filename, pred=None):
if pred is None:
pred = lambda x: True
f = open("results/listings/{0}".format(filename), "w")
print("Creating log of errors ({0})...".format(filename))
for i, p in enumerate(error_props):
if not pred(p): # ignore properties with certain features, e.g., types of errors
continue
if i > 0:
f.write("\n" + ("-" * 50) + "\n\n")
# read the whole original file, because multiline error messages are not preserved in dicts
with open(p["evoplotter.file"], 'r') as content_file:
content = content_file.read()
f.write(content)
f.close()
def load_correct_props(folders):
props_cdgpError = utils.load_properties_dirs(folders, exts=[".cdgp.error"], add_file_path=True)
exts = [".cdgp"]
props0 = utils.load_properties_dirs(folders, exts=exts, add_file_path=True)
def is_correct(p):
return "result.best.verificationDecision" in p
# Filtering props so only correct ones are left
props = [p for p in props0 if is_correct(p)]
# print("Filtered (props):")
# for p in props:
# if "resistance_par3_c1_10" in p["benchmark"] and p["method"] == "CDGP":
# print(p["evoplotter.file"])
# print("Filtered (props_cdgpError):")
# for p in props_cdgpError:
# if "resistance_par3_c1_10" in p["benchmark"] and p["method"] == "CDGP":
# print(p["evoplotter.file"])
# Clear log file
# print("[del] props")
# fun = lambda p: p["method"] == "CDGP" and p["partialConstraintsInFitness"] == "true"
# delete_logs(props, fun, simulate=True)
# print("[del] props_cdgpError")
# delete_logs(props_cdgpError, fun, simulate=True)
create_errors_solver_listing(props_cdgpError, "errors_solver.txt")
# Printing names of files which finished with error status or are incomplete.
if CHECK_CORRECTNESS_OF_FILES:
props_errors = [p for p in props0 if not is_correct(p)]
create_errors_listing(props_errors, "errors_run.txt")
if len(props_errors) > 0:
print("Files with error status:")
print_props_filenames(props_errors)
print("Loaded: {0} correct property files, {1} incorrect; All log files: {2}".format(len(props), len(props_errors), len(props) + len
(props_errors)))
print("Runs that ended with '.cdgp.error': {0}".format(len(props_cdgpError)))
print_props_filenames(props_cdgpError)
return props
def produce_status_matrix(dim, props):
"""Generates a status data in the form of a python list. It can be
later used to retry missing runs.
:param dim: (Dimension) dimensions on which data are to be divided.
:param props: (dict[str,str]) properties files.
:return: (str) Python code of a list containing specified data.
"""
text = "["
for config in dim:
numRuns = len(config.filter_props(props))
text += "({0}, {1}), ".format(config.stored_values, numRuns)
return text + "]"
def save_listings(props, dim_rows, dim_cols):
"""Saves listings of various useful info to separate text files."""
assert isinstance(dim_rows, Dim)
assert isinstance(dim_cols, Dim)
utils.ensure_dir("results/listings/errors/")
# Saving optimal verified solutions
for dr in dim_rows:
bench = dr.get_caption()
bench = bench[:bench.rfind(".")] if "." in bench else bench
f = open("results/listings/verified_{0}.txt".format(bench), "w")
f_errors = open("results/listings/errors/verified_{0}.txt".format(bench), "w")
props_bench = dr.filter_props(props)
for dc in dim_cols:
f.write("{0}\n".format(dc.get_caption()))
f_errors.write("{0}\n".format(dc.get_caption())) # TODO: finish
props_final = [p for p in dc.filter_props(props_bench) if is_verified_solution(p)]
for p in props_final:
fname = p["thisFileName"].replace("/home/ibladek/workspace/GECCO19/gecco19/", "")
best = p["result.best"]
fit = float(p["result.best.mse"])
if fit >= 1e-15:
f.write("{0}\t\t\t(FILE: {1}) (MSE: {2})\n".format(best, fname, fit))
else:
f.write("{0}\t\t\t(FILE: {1})\n".format(best, fname))
f.write("\n\n")
f.close()
f_errors.close()
def normalized_total_time(p, max_time=3600000):
"""If time was longer than max_time, then return max_time, otherwise return time. Time is counted in miliseconds."""
if "cdgp.wasTimeout" in p and p["cdgp.wasTimeout"] == "true":
v = 3600000
else:
v = int(float(p["result.totalTimeSystem"]))
return max_time if v > max_time else v
def is_verified_solution(p):
k = "result.best.verificationDecision"
return p["result.best.isOptimal"] == "true" and p[k] == "unsat"
def is_approximated_solution(p):
"""Checks if the MSE was below the threshold."""
tr = float(p["optThreshold"])
# TODO: finish
k = "result.best.verificationDecision"
return p["result.best.isOptimal"] == "true" and p[k] == "unsat"
def get_num_optimal(props):
props2 = [p for p in props if is_verified_solution(p)]
return len(props2)
def get_num_optimalOnlyMse(props):
# "cdgp.optThreshold" in p and
for p in props:
if "optThreshold" not in p:
print(str(p))
# Sometimes it is 'optThreshold', and sometimes 'cdgp.optThreshold'...
# props2 = [p for p in props if float(p["result.best.mse"]) <= float(p["optThreshold"])]
num = 0
for p in props:
if "optThreshold" in p:
tr = p["optThreshold"]
elif "optThreshold" in p:
tr = p["cdgp.optThreshold"]
else:
raise Exception("No optThreshold in log file")
if float(p["result.best.mse"]) <= tr:
num += 1
return num
def get_num_allPropertiesMet(props):
props2 = [p for p in props if p["result.best.verificationDecision"] == "unsat"]
return len(props2)
def get_num_computed(filtered):
return len(filtered)
def fun_successRate_full(filtered):
if len(filtered) == 0:
return "-"
num_opt = get_num_optimal(filtered)
return "{0}/{1}".format(str(num_opt), str(len(filtered)))
def get_successRate(filtered):
num_opt = get_num_optimal(filtered)
return float(num_opt) / float(len(filtered))
def fun_successRateMseOnly(filtered):
if len(filtered) == 0:
return "-"
n = get_num_optimalOnlyMse(filtered)
if n == 0:
return "-"
else:
sr = n / float(len(filtered))
return "{0}".format("%0.2f" % round(sr, 2))
def fun_average_mse(filtered):
res = 0.0
num = 0
# Sometimes there was "inf" in the results. We will ignore those elements.
for p in filtered:
x = float(p["result.best.mse"])
if not "n" in str(x):
res += x
num += 1
else:
print("Nan encountered")
if num == 0:
return "-"
else:
return res / num
def fun_average_mse_sd(filtered):
"""Returns average together with standard deviation."""
res = 0.0
num = 0
# Sometimes there was "inf" in the results. We will ignore those elements.
for p in filtered:
x = float(p["result.best.mse"])
if not "n" in str(x):
res += x
num += 1
else:
print("Nan encountered")
avg = res / num
sd = 0.0
for p in filtered:
x = float(p["result.best.mse"])
if not "n" in str(x):
sd += (x - avg) ** 2.0
sd = math.sqrt(sd / num)
if num == 0:
return "-"
else:
return r"${0} \pm{1}$".format(avg, sd)
def fun_successRate(filtered):
if len(filtered) == 0:
return "-"
sr = get_successRate(filtered)
return "{0}".format("%0.2f" % round(sr, 2))
def fun_allPropertiesMet(filtered):
if len(filtered) == 0:
return "-"
num_opt = get_num_allPropertiesMet(filtered)
sr = float(num_opt) / float(len(filtered))
return "{0}".format("%0.2f" % round(sr, 2))
def get_stats_size(props):
vals = [float(p["result.best.size"]) for p in props]
if len(vals) == 0:
return "-"#-1.0, -1.0
else:
return str(int(round(np.mean(vals)))) #, np.std(vals)
def get_stats_sizeOnlySuccessful(props):
vals = [float(p["result.best.size"]) for p in props if is_verified_solution(p)]
if len(vals) == 0:
return "-"#-1.0, -1.0
else:
return str(int(round(np.mean(vals)))) #, np.std(vals)
def get_stats_maxSolverTime(props):
if len(props) == 0 or "solver.allTimesCountMap" not in props[0]:
return "-"
times = []
for p in props:
timesMap = p["solver.allTimesCountMap"]
parts = timesMap.split(", ")[-1].split(",")
times.append(float(parts[0].replace("(", "")))
return "%0.3f" % max(times)
def get_stats_avgSolverTime(props):
if len(props) == 0 or "solver.allTimesCountMap" not in props[0] or props[0]["method"] != "CDGP":
return "-"
sum = 0.0
sumWeights = 0.0
for p in props:
timesMap = p["solver.allTimesCountMap"]
pairs = timesMap.split(", ")
if len(pairs) == 0:
continue
for x in pairs:
time = float(x.split(",")[0].replace("(", ""))
weight = float(x.split(",")[1].replace(")", ""))
sum += time * weight
sumWeights += weight
if sumWeights == 0.0:
return "%0.3f" % 0.0
else:
return "%0.3f" % (sum / sumWeights)
def get_avgSolverTotalCalls(props):
if len(props) == 0 or "solver.totalCalls" not in props[0]:
return "-"
vals = [float(p["solver.totalCalls"]) / 1000.0 for p in props]
return "%0.1f" % round(np.mean(vals), 1) # "%d"
def get_numSolverCallsOverXs(props):
if len(props) == 0 or "solver.allTimesCountMap" not in props[0]:
return "-"
TRESHOLD = 0.5
sum = 0
for p in props:
timesMap = p["solver.allTimesCountMap"]
pairs = timesMap.split(", ")
if len(pairs) == 0:
continue
for x in pairs:
time = float(x.split(",")[0].replace("(", ""))
if time > TRESHOLD:
# print("Name of file: " + p["thisFileName"])
weight = int(x.split(",")[1].replace(")", ""))
sum += weight
return sum
def get_avg_totalTests(props):
vals = [float(p["tests.total"]) for p in props]
if len(vals) == 0:
return "-" # -1.0, -1.0
else:
x = np.mean(vals)
if x < 1e-5:
x = 0.0
return str(int(round(x))) #"%0.1f" % x
def get_avg_mse(props):
vals = []
for p in props:
vals.append(float(p["result.best.mse"]))
if len(vals) == 0:
return "-" # -1.0, -1.0
else:
return "%0.5f" % np.mean(vals) # , np.std(vals)
def get_avg_runtime_helper(vals):
if len(vals) == 0:
return "n/a" # -1.0, -1.0
else:
x = np.mean(vals)
if x >= 10.0:
return "%d" % x
else:
return "%0.1f" % x # , np.std(vals)
def get_avg_runtimeOnlySuccessful(props):
if len(props) == 0:
return "-"
else:
vals = [float(normalized_total_time(p, max_time=1800000)) / 1000.0 for p in props if is_verified_solution(p)]
return get_avg_runtime_helper(vals)
def get_avg_runtime(props):
if len(props) == 0:
return "-"
else:
vals = [float(normalized_total_time(p, max_time=1800000)) / 1000.0 for p in props]
return get_avg_runtime_helper(vals)
def get_avg_generation(props):
if len(props) == 0:
return "-"
if len(props) > 0 and "result.totalGenerations" not in props[0]:
return "-"
vals = [float(p["result.totalGenerations"]) for p in props]
if len(vals) == 0:
return "-"
else:
return str(int(round(np.mean(vals)))) #"%0.1f" % np.mean(vals) # , np.std(vals)
def get_avg_generationSuccessful(props):
if len(props) == 0:
return "-"
else:
vals = [float(p["result.best.generation"]) for p in props if is_verified_solution(p)]
if len(vals) == 0:
return "n/a" # -1.0, -1.0
else:
return str(int(round(np.mean(vals)))) # "%0.1f" % np.mean(vals) # , np.std(vals)
def get_avg_evaluated(props):
if len(props) == 0:
return "-"
vals = []
for p in props:
if p["evolutionMode"] == "steadyState":
vals.append(float(p["result.totalGenerations"]))
else:
vals.append(float(p["result.totalGenerations"]) * float(p["populationSize"]))
return str(int(round(np.mean(vals)))) #"%0.1f" % np.mean(vals) # , np.std(vals)
def get_avg_evaluatedSuccessful(props):
if len(props) == 0:
return "-"
vals = []
for p in props:
if is_verified_solution(p):
if p["evolutionMode"] == "steadyState":
vals.append(float(p["result.totalGenerations"]))
else:
vals.append(float(p["result.totalGenerations"]) * float(p["populationSize"]))
if len(vals) == 0:
return "n/a" # -1.0, -1.0
else:
return str(int(round(np.mean(vals)))) # "%0.1f" % np.mean(vals) # , np.std(vals)
def get_avg_runtimePerProgram(props):
if len(props) == 0:
return "-" # -1.0, -1.0
sAvgGen = get_avg_generation(props)
if sAvgGen == "-" or sAvgGen is None:
return "-"
avgGen = float(sAvgGen) # avg number of generations in all runs
avgRuntime = float(get_avg_runtime(props)) # avg runtime of all runs
populationSize = float(props[0]["populationSize"])
if props[0]["evolutionMode"] == "steadyState":
approxNumPrograms = populationSize + avgGen # in steady state we have many generations, but in each of them created is one new program
else:
approxNumPrograms = populationSize * avgGen
approxTimePerProgram = avgRuntime / approxNumPrograms
return "%0.3f" % approxTimePerProgram
def get_sum_solverRestarts(props):
if len(props) == 0:
return "-"
vals = [int(p["solver.totalRestarts"]) for p in props if "solver.totalRestarts" in p]
if len(vals) != len(props):
print("WARNING: solver.totalRestarts was not present in all files.")
if len(vals) == 0:
return "0"
else:
return str(np.sum(vals))
def print_solved_in_time(props, upper_time):
if len(props) == 0:
return
# totalTimeSystem is in miliseconds
solved = 0
solvedRuns = 0
num = 0
for p in props:
if p["result.best.isOptimal"] == "false":
continue
num += 1
if int(normalized_total_time(p, max_time=1800000)) <= upper_time:
solved += 1
for p in props:
if int(normalized_total_time(p, max_time=1800000)) <= upper_time:
solvedRuns += 1
print("\nRuns which ended under {0} s: {1} / {2} ({3} %)".format(upper_time / 1000.0, solvedRuns, len(props), solvedRuns / len(props)))
print("Optimal solutions found under {0} s: {1} / {2} ({3} %)\n".format(upper_time / 1000.0, solved, num, solved / num))
|
8,805 | 027e53d69cfece0672556e34fa901412e483bc3e | class Solution:
def uniquePaths(self, A, B):
# A - rows
# B - columns
if A == 0 or B == 0:
return 0
grid = [[1 for _ in range(B)] for _ in range(A)]
for i in range(1, A):
for j in range(1, B):
grid[i][j] = grid[i-1][j] + grid[i][j-1]
return grid[A-1][B-1]
s = Solution()
print s.uniquePath(2, 2) |
8,806 | 3c2fb3d09edab92da08ac8850f650a2fa22fad92 | from django.db import transaction
from django.forms import inlineformset_factory
from django.shortcuts import render
from django.urls import reverse_lazy
from django.views.generic import CreateView, UpdateView
from forms.models.fund_operation import FundOperation
from forms.forms.fund_operation_forms import FundOperationForm, FundOperationLineForm, FundOperationFormSet
class FundOperationCreateView(CreateView):
model = FundOperation
template_name = "forms/fund_operation/create.html"
form_class = FundOperationForm
success_url = None
def get_context_data(self, **kwargs):
data = super().get_context_data(**kwargs)
if self.request.POST:
data['lines'] = FundOperationFormSet(self.request.POST)
else:
data['lines'] = FundOperationFormSet()
return data
def form_valid(self, form):
context = self.get_context_data()
lines = context['lines']
with transaction.atomic():
form.instance.create_user = self.request.user
self.object = form.save()
if lines.is_valid():
lines.instance = self.object
lines.save()
return super().form_valid(form)
def get_success_url(self):
return reverse_lazy('fund_operation:fund_operation_create')
class FundOperationUpdateView(UpdateView):
model =FundOperation
template_name = "forms/fund_operation/update.html"
form_class = FundOperationForm
success_url = None
def _get_initial_data(self):
if self.object.lines.all():
return None
initial = [
{
'body': 'प्रदेश सरकार',
},
{
'body': 'संघीय सरकार',
},
{
'body': 'स्थानीय तह',
},
{
'body': 'अन्य ब्यक्ति संस्था निकाय पदाधिकारी',
},
{
'body': 'अन्तरराष्ट्रिय गैर सरकारी संस्था',
},
{
'body': 'गैरसरकारी संस्था',
},
]
return initial
def get_context_data(self, **kwargs):
data = super().get_context_data(**kwargs)
initial = self._get_initial_data()
if self.request.POST:
data['lines'] = FundOperationFormSet(
self.request.POST,
instance=self.object,
initial=initial
)
else:
data['lines'] = FundOperationFormSet(
instance=self.object,
initial=initial
)
data['lines'].extra = len(initial) if initial else 1
return data
def form_valid(self, form):
context = self.get_context_data()
lines = context['lines']
with transaction.atomic():
form.instance.create_user = self.request.user
self.object = form.save()
if lines.is_valid():
lines.instance = self.object
lines.save()
else:
return self.form_invalid(form, lines)
return super().form_valid(form)
def form_invalid(self, form, lines=None):
return self.render_to_response(self.get_context_data(form=form, lines=lines))
def get_success_url(self):
return reverse_lazy('fund_operation:fund_operation_update', kwargs={'pk': self.object.pk})
|
8,807 | 3f80c4c212259a8f3ff96bcc745fd28a85dac3ba | # Import
import sys
from .step import Step
from .repeat import Repeat
# Workout
class Workout(object):
def __init__(self):
self.workout = []
self.steps = []
self.postfixEnabled = True
# TODO: check that len(name) <= 6
def addStep(self, name, duration):
self.workout.append(Step(name, duration))
# TODO: check that len(name) <= 6 - len(count)
def addRepeat(self, names, durations, count):
self.workout.append(Repeat(names, durations, count))
def generateCode(self, filename=None):
# Open
if not filename is None:
file = open(filename, 'w')
else:
file = sys.stdout
def wr(txt):
file.write(txt + '\n')
# Generate
wr('/* Reset */')
wr('if (SUUNTO_DURATION == 0) {')
wr(' STEP = 0;')
wr(' PREVSTEP = 0;')
wr(' STEPSTARTTIME = 0;')
wr(' STEPSTARTDIST = 0;')
wr(' STEPTIME = 0;')
wr(' STEPDIST = 0;')
wr('}')
wr('')
wr('/* Next step */')
wr('if (STEP != PREVSTEP) {')
wr(' Suunto.alarmBeep();')
wr(' STEPSTARTTIME = SUUNTO_DURATION;')
wr(' STEPSTARTDIST = SUUNTO_DISTANCE*1000;')
wr('}')
wr('')
wr('/* Update */')
wr('PREVSTEP = STEP;')
wr('STEPTIME = SUUNTO_DURATION - STEPSTARTTIME;')
wr('STEPDIST = SUUNTO_DISTANCE*1000 - STEPSTARTDIST;')
wr('')
step = 0
for w in self.workout:
step = w.generateCode(file,step,self.postfixEnabled)
wr('/* Check result */')
wr('if ( RESULT <= 0 ) {')
wr(' STEP = STEP + 1;')
wr(' RESULT = 0;')
wr('}')
# Close
if not filename is None:
file.close()
|
8,808 | c4ac7ff5d45af9d325f65b4d454a48ca0d8f86df | N, M = map(int, input().split()) # Nはスイッチの数、Mは電球の数
lights = [[0] * N for _ in range(M)]
for i in range(M):
temp = list(map(int, input().split())) # 0番目はスイッチの個数、1番目以降はスイッチを示す
k = temp[0]
switches = temp[1:]
for j in range(k):
lights[i][switches[j]-1] = 1
P = list(map(int, input().split())) # 個数を2で割ったあまりが要素と等しい場合に点灯する
answer_count = 0
for i in range(2**N):
flag = True
for k in range(M):
count = 0
for j in range(N):
if (i >> j) & 1:
count += lights[k][j]
if count % 2 != P[k]:
flag = False
break
if flag:
answer_count += 1
print(answer_count) |
8,809 | 24bc43c1fe035430afde05fec1330e27fb5f1d86 | import sys
import re
import math
s=sys.stdin.read()
digits=re.findall(r"-?\d+",s)
listline= [int(e) for e in digits ]
x=listline[-1]
del(listline[-1])
n=len(listline)//2
customers=listline[:n]
grumpy=listline[n:]
maxcus=0
if x==n:
print(sum(customers))
else:
for i in range(n-x):
total=0
for j in range(i,i+x):
total+=customers[i]
for j in range(i):
if grumpy[j]!=1:
total+=customers[j]
for j in range(i+x,n):
if grumpy[j]!=1:
total+=customers[j]
maxcus=max(total,maxcus)
print(maxcus)
|
8,810 | 2402188380bc0189b88e3cfcbaabf64a9919b3d5 | import pygame
import sys
# класс для хранения настроек
class Settings():
"""docstring for Setting"""
def __init__(self):
# параметры экрана
self.colour = (230, 230, 230)
self.screen_width = 1200
self.screen_height = 800
# параметры коробля
self.ship_speed = 1.5
# параметры пули
self.bullet_speed = 1
self.bullet_width = 3
self.bullet_height = 15
self.bullet_color = (60,60,60)
# скорость и перемещение флота
self.alien_speed = 1
self.alien_fleet = 1
self.alien_fleet_drop_speed = 10 |
8,811 | 9c751dece67ef33ba8e5cb8281f024d2143e0808 | import os
import sys
import winreg
import zipfile
class RwpInstaller:
railworks_path = None
def extract(self, target):
with zipfile.ZipFile(target) as z:
if z.testzip():
return self.output('Corrupt file {}\n'.format(target))
self.output('{} file valid\n\n'.format(target))
extracted = 0
to_be_extracted = len(z.infolist())
for file in z.infolist():
extracted_path = z.extract(file, self.railworks_path).replace(self.railworks_path, '')
extracted += 1
percent_complete = extracted / to_be_extracted
self.output('[{}/{} {}] {}\r'.format(
extracted, to_be_extracted,
(round(percent_complete * 10) * '*').ljust(10),
extracted_path[-55:]))
self.output('\n\n{} extracted successfully'.format(os.path.basename(target)))
def get_railworks_path(self):
steam_key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, 'Software\\Valve\\Steam')
steam_path = winreg.QueryValueEx(steam_key, 'SteamPath')[0]
return os.path.join(steam_path, 'steamApps', 'common', 'railworks')
def output(self, out, wait=False):
if wait:
input(out)
else:
sys.stdout.write(out)
def main(self):
targets = sys.argv[1:]
if not targets:
return self.output('No RWP files passed.', wait=True)
self.railworks_path = self.get_railworks_path()
for target in targets:
self.extract(target)
self.output('\n\nAll done. Thanks for using RWP Installer.', wait=True)
if __name__ == '__main__':
RwpInstaller().main()
|
8,812 | 97d128694709c4fe0d9ec2b2749d8e4ec5df7322 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from fieldsets import getSingleField, SortAsc
from sqlalchemy import func
from ladderdb import ElementNotFoundException, EmptyRankingListException
from db_entities import Player, Result
from bottle import route,request
from globe import db,env
@route('/player')
def output( ):
player_name = getSingleField( 'player', request )
order = getSingleField( 'order', request , 'nick')
ladder_id = getSingleField( 'ladder', request )
try:
s = db.sessionmaker()
if player_name:
player = db.GetPlayer( player_name )
ladders = db.GetLadderByPlayer( player.id )
played = dict()
positions = dict()
for ladder in ladders:
positions[ladder.id] = db.GetPlayerPosition( ladder.id, player.id )
played[ladder.id] = s.query( Result.id ).filter( Result.ladder_id == ladder.id ).filter( Result.player_id == player.id ).count()
results = s.query( Result ).filter( Result.player_id == player.id).order_by(Result.date.desc())[0:5]
matches = []
for r in results:
matches.append( r.match )
template = env.get_template('viewplayer.html')
s.close()
return template.render(player=player,ladders=ladders, positions=positions,played=played,matches=matches )
else:
asc = getSingleField( 'asc', request, 'False' )
if not asc:
asc = 'False'
q = s.query( Player, func.count(Result.id).label('played')).outerjoin( (Result, Result.player_id == Player.id ) )\
.filter( Player.id.in_(s.query( Result.player_id ).filter( Player.id == Result.player_id ) ) ) \
.filter( Result.player_id == Player.id ).group_by( Player.id )
if ladder_id:
q = q.filter( Player.id.in_( s.query( Result.player_id ).filter( Result.ladder_id == ladder_id ) ) )
if order == 'nick':
q = q.order_by( SortAsc( Player.nick, asc ) )
elif order == 'id' :
q = q.order_by( SortAsc( Player.id, asc ) )
else:
order = 'played'
q = q.order_by( SortAsc( func.count(Result.id), asc ) )
limit = int(getSingleField( 'limit', request, q.count() ))
offset = int(getSingleField( 'offset', request, 0 ))
players = q[offset:offset+limit-1]
template = env.get_template('viewplayerlist.html')
s.close()
return template.render(players=players,offset=offset,limit=limit,order=order,asc=asc )
except ElementNotFoundException, e:
err_msg="player %s not found"%(str(player_name))
except EmptyRankingListException, m:
err_msg=(str(m))
if s:
s.close()
template = env.get_template('error.html')
return template.render( err_msg=err_msg ) |
8,813 | 347d468f15dee8a8219d201251cedffe21352f7c | from django.contrib.auth.models import User
from django.test import Client
from django.utils.timezone import localdate
from pytest import fixture
from operations.models import ToDoList
@fixture
def user(db):
return User.objects.create(
username='test', email='saidazimovaziza@gmail.com',
password='test',
)
@fixture
def authenticated_author_client(
user, client: Client
) -> Client:
token = Token.objects.get_or_create(user=user)[0].key
client.defaults['HTTP_AUTHORIZATION'] = f'Token {token}'
print(client)
return client
@fixture
def todo(db, user):
return ToDoList.objects.create(
user=user,
title='Test task',
description='Uchet kz test task',
deadline=localdate(),
executed=False
)
|
8,814 | 8cc97ebe0ff7617eaf31919d40fa6c312d7b6f94 | # accessing array elements rows/columns
import numpy as np
a = np.array([[1, 2, 3, 4, 5, 6, 7], [9, 8, 7, 6, 5, 4, 3]])
print(a.shape) # array shape
print(a)
print('\n')
# specific array element [r,c]
# item 6
print(a[0][5])
# item 8
print(a[1][1]) # or
print(a[1][-6])
# get a specific row/specific column
print(a[1])
print(a[0])
print(a[0, :])
print(a[:, 1]) # prints second column
print('\n')
# get only the even numbers from first row [start_index:end_index:step]
print('even numbers from first row')
print(a[0, 1:8:2])
# change certain value of array
a[1, 2] = 90
print('new array is ',a) |
8,815 | a1b33d0a8a074bc7a2a3e2085b1ff01267e00d3b | def minutes to hours(minutes) :
hours = minutes/60
return hours
print(minutes to hours(70))
|
8,816 | 070330f8d343ff65852c5fbb9a3e96fe1bfc55b5 | # pylint: disable=not-callable, no-member, invalid-name, missing-docstring, arguments-differ
import argparse
import itertools
import os
import torch
import torch.nn as nn
import tqdm
import time_logging
from hanabi import Game
def mean(xs):
xs = list(xs)
return sum(xs) / len(xs)
@torch.jit.script
def swish_jit_fwd(x):
return x * torch.sigmoid(x) * 1.6768
@torch.jit.script
def swish_jit_bwd(x, grad_output):
x_sigmoid = torch.sigmoid(x)
return grad_output * (x_sigmoid * (1 + x * (1 - x_sigmoid))) * 1.6768
class SwishJitAutoFn(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return swish_jit_fwd(x)
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_tensors[0]
return swish_jit_bwd(x, grad_output)
class Swish(nn.Module):
def forward(self, x):
return SwishJitAutoFn.apply(x)
def orthogonal_(tensor, gain=1):
'''
Orthogonal initialization (modified version from PyTorch)
'''
if tensor.ndimension() < 2:
raise ValueError("Only tensors with 2 or more dimensions are supported")
rows = tensor.size(0)
cols = tensor[0].numel()
flattened = tensor.new_empty(rows, cols).normal_(0, 1)
for i in range(0, rows, cols):
# Compute the qr factorization
q, r = torch.qr(flattened[i:i + cols].t())
# Make Q uniform according to https://arxiv.org/pdf/math-ph/0609050.pdf
q *= torch.diag(r, 0).sign()
q.t_()
with torch.no_grad():
tensor[i:i + cols].view_as(q).copy_(q)
with torch.no_grad():
tensor.mul_(gain)
return tensor
def linear(in_features, out_features, bias=True):
'''
Linear Module initialized properly
'''
m = nn.Linear(in_features, out_features, bias=bias)
orthogonal_(m.weight)
nn.init.zeros_(m.bias)
return m
def play_and_train(args, policy, optim):
total_loss = 0
turns = 0
scores = []
while turns < args.bs:
log_probs = []
rewards = []
game = Game(4)
t = time_logging.start()
while True:
x = game.encode()
t = time_logging.end("encode", t)
x = torch.tensor(x, device=args.device, dtype=torch.float32)
x = args.beta * policy(x)
t = time_logging.end("policy", t)
loss = [0]
def sample(x, w=1):
if torch.rand(()) < args.randmove:
m = torch.distributions.Categorical(logits=torch.zeros_like(x))
else:
m = torch.distributions.Categorical(logits=x)
i = m.sample().item()
loss[0] += x.log_softmax(0)[i].mul(w)
return i
action = sample(x[:3])
score = game.score
if action == 0:
position = sample(x[3:3+5])
out = game.play(position)
if action == 1:
position = sample(x[3:3+5])
out = game.discard(position)
if action == 2:
target = sample(x[3+5:3+5+5], 0.5)
info = sample(x[3+5+5:3+5+5+10], 0.5)
if info < 5:
out = game.clue(target, info)
else:
out = game.clue(target, "rgbyp"[info-5])
t = time_logging.end("decode", t)
log_probs.append(loss[0])
if out is not None:
rewards.append(-1)
break
if game.gameover:
if game.score == 25:
rewards.append(game.score - score)
else:
rewards.append(-1)
break
rewards.append(game.score - score)
if len(log_probs) >= 3:
turns += len(log_probs)
R = 0
returns = []
for r in rewards[::-1]:
R = r + args.gamma * R
returns.insert(0, R)
returns = torch.tensor(returns, device=args.device, dtype=torch.float32)
returns = (returns - returns.mean()) / (returns.std() + 1e-5)
for log_prob, R in zip(log_probs, returns):
total_loss += -(log_prob * R)
scores.append(game.score)
total_loss /= turns
optim.zero_grad()
total_loss.backward()
optim.step()
t = time_logging.end("backward & optim", t)
return scores
def execute(args):
torch.backends.cudnn.benchmark = True
policy = nn.Sequential(
linear(2270, args.n), Swish(),
linear(args.n, args.n), Swish(),
linear(args.n, args.n), Swish(),
linear(args.n, args.n), Swish(),
linear(args.n, 23)
).to(args.device)
scores = [0]
optim = torch.optim.Adam(policy.parameters(), lr=args.lr)
if args.restore:
with open(args.restore, 'rb') as f:
torch.load(f)
x = torch.load(f, map_location=args.device)
scores = x['scores']
policy.load_state_dict(x['state'])
t = tqdm.tqdm()
for i in itertools.count(1):
new_scores = play_and_train(args, policy, optim)
scores.extend(new_scores)
if i % 1000 == 0:
print()
print(time_logging.text_statistics())
yield {
'args': args,
'state': policy.state_dict(),
'scores': scores,
}
avg_score = mean(scores[-args.n_avg:])
t.update(len(new_scores))
t.set_postfix_str("scores={} avg_score={:.2f}".format(scores[-5:], avg_score))
t.close()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--lr", type=float, default=1e-5)
parser.add_argument("--bs", type=int, default=10)
parser.add_argument("--n", type=int, default=500)
parser.add_argument("--n_avg", type=int, default=1000)
parser.add_argument("--beta", type=float, default=0.01)
parser.add_argument("--gamma", type=float, default=0.99)
parser.add_argument("--randmove", type=float, default=0.4)
parser.add_argument("--restore", type=str)
parser.add_argument("--device", type=str, required=True)
parser.add_argument("--pickle", type=str, required=True)
args = parser.parse_args()
new = True
torch.save(args, args.pickle)
try:
for res in execute(args):
with open(args.pickle, 'wb') as f:
torch.save(args, f)
torch.save(res, f)
new = False
except:
if new:
os.remove(args.pickle)
raise
if __name__ == "__main__":
main()
|
8,817 | 88af8b4eeb40ecf19622ecde1a5dea9a078bb66c | # Percy's playground.
from __future__ import print_function
import sympy as sp
import numpy as np
import BorderBasis as BB
np.set_printoptions(precision=3)
from IPython.display import display, Markdown, Math
sp.init_printing()
R, x, y = sp.ring('x,y', sp.RR, order=sp.grevlex)
I = [ x**2 + y**2 - 1.0, x + y ]
R, x, y, z = sp.ring('x,y,z', sp.RR, order=sp.grevlex)
I = [ x**2 - 1, y**2 - 4, z**2 - 9]
# n = 4 takes a long time
n = 4
Rvs = sp.ring(' '.join('v'+str(i) for i in range(1, n + 1)), sp.RR, order=sp.grevlex)
R, vs = Rvs[0], Rvs[1:]
I = []
I.extend([v**2 - 1 for v in vs])
#I.extend([(v-1)**2 for v in vs])
#I.extend([v-1 for v in vs])
#I.extend([vs[i] - vs[i-1] for i in range(1, len(vs))]) # Makes it fast
print('Generating')
B = BB.BorderBasisFactory(1e-5).generate(R,I)
print('Done')
print("=== Generator Basis:")
for f in B.generator_basis:
display(f.as_expr())
print("=== Quotient Basis:")
for f in B.quotient_basis():
display(f.as_expr())
# v2 is always zero
print("=== Variety:")
for v in B.zeros():
print(zip(R.symbols, v))
|
8,818 | 38a79f5b3ce1beb3dc1758880d42ceabc800ece7 | # Generated by Django 3.0 on 2019-12-15 16:20
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('blog', '0013_auto_20191215_1619'),
]
operations = [
migrations.AlterField(
model_name='categorie',
name='utimestamp',
field=models.DateTimeField(default=datetime.datetime(2019, 12, 15, 16, 20, 14, 660603, tzinfo=utc)),
),
migrations.AlterField(
model_name='post',
name='create_date',
field=models.DateTimeField(default=datetime.datetime(2019, 12, 15, 16, 20, 14, 657811, tzinfo=utc)),
),
migrations.AlterField(
model_name='tag',
name='utimestamp',
field=models.DateTimeField(default=datetime.datetime(2019, 12, 15, 16, 20, 14, 663436, tzinfo=utc)),
),
]
|
8,819 | 32e60c672d6e73600d442c4344743deccaed6796 | from .core import S3FileSystem, S3File
from .mapping import S3Map
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
|
8,820 | 9fbf994cb99369ba0c20383007ce52c99248bacf |
# code below
#taking filename as pyscript.py
from distutils.core import setup
import py2exe
setup(console=['pyscript.py'])
# command to run
# python setup.py pytoexe
|
8,821 | 78761eda403ad8f54187e5858a23c23d3dd79b09 | """"Module for miscellaneous behavior stuff
For example, stuff like extracting lick times or choice times.
TrialSpeak shouldn't depend on stuff like that.
# Also get the pldf and use that to get lick times
ldf = ArduFSM.TrialSpeak.read_logfile_into_df(bdf.loc[idx, 'filename'])
# Get the lick times
lick_times = ArduFSM.TrialSpeak.get_commands_from_parsed_lines(ldf, 'TCH')
# Group them by trial number and lick type and extract times
tt2licks = lick_times.groupby(['trial', 'arg0']).groups
for (trial, lick_type) in tt2licks:
tt2licks[(trial, lick_type)] = \
ldf.loc[tt2licks[(trial, lick_type)], 'time'].values / 1000.
# Get response window time as first transition into response window
state_change_df = ArduFSM.TrialSpeak.get_commands_from_parsed_lines(
ldf, 'ST_CHG2')
rwin_open_times = my.pick_rows(state_change_df,
arg1=state_name2num['RESPONSE_WINDOW'])
rwin_open_times_by_trial = rwin_open_times.groupby(
'trial').first()['time'] / 1000.
# Get choice time as first transition out of response window
state_change_df = ArduFSM.TrialSpeak.get_commands_from_parsed_lines(
ldf, 'ST_CHG2')
rwin_close_times = my.pick_rows(state_change_df,
arg0=state_name2num['RESPONSE_WINDOW'])
rwin_close_times_by_trial = rwin_close_times.groupby(
'trial').first()['time'] / 1000.
"""
import MCwatch
import ArduFSM
import numpy as np
def get_choice_times(behavior_filename, verbose=False):
"""Calculates the choice time for each trial in the logfile"""
# Find the state number for response window
state_num2names = MCwatch.behavior.db.get_state_num2names()
resp_win_num = dict([(v, k) for k, v in list(state_num2names.items())])[
'RESPONSE_WINDOW']
# Get the lines
lines = ArduFSM.TrialSpeak.read_lines_from_file(behavior_filename)
parsed_df_by_trial = \
ArduFSM.TrialSpeak.parse_lines_into_df_split_by_trial(lines,
verbose=verbose)
# Identify times of state change out of response window
# No sense in warning because there's also multiple state changes on
# rewarded trials
choice_times = ArduFSM.TrialSpeak.identify_state_change_times(
parsed_df_by_trial, state0=resp_win_num, show_warnings=False)
return choice_times
def get_included_trials(trial_times, data_range, t_start=0, t_stop=0):
"""Identify the trials included in a temporal range.
trial_times : Series of trial times (e.g., rwin times) indexed by
trial labels
data_range : 2-tuple (start, stop) specifying interval to include
t_start, t_stop : amount of time before (after) each trial time that
must be within data_range in order for that trial to be included.
Returns: trial_labels that are included
Ex:
## Get the trial matrix
tm = MCwatch.behavior.db.get_trial_matrix(vs.bsession_name, True)
# Include all random trials
tm = my.pick_rows(tm, isrnd=True, outcome=['hit', 'error'])
# Identify range of trials to include
video_range_bbase = extras.get_video_range_bbase(vs)
included_trials = extras.get_included_trials(tm['rwin_time'],
data_range=video_range_bbase, t_start=-2, t_stop=0)
tm = tm.loc[included_trials]
"""
return trial_times[
(trial_times + t_start >= data_range[0]) &
(trial_times + t_stop < data_range[1])
].index
|
8,822 | f76a3fac75e7e2b156f4bff5094f11009b65b599 | # Generated by Django 3.1.7 on 2021-03-25 00:33
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('restaurante', '0003_auto_20210324_1932'),
]
operations = [
migrations.AlterModelOptions(
name='comprobantemodel',
options={'verbose_name': 'Comprobante'},
),
migrations.AlterModelTable(
name='comprobantemodel',
table='t_comprobante',
),
]
|
8,823 | 6e9fd8ee2a187888df07c9dd1c32fe59a111c869 | #downloads project detail reports from the web and places them in the correct project folder created by makeFolders.py
import os, openpyxl, time, shutil
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
wb = openpyxl.load_workbook('ProjectSummary.xlsx')
sheet = wb.active
browser = webdriver.Firefox()
browser.get('https://safetynet.predictivesolutions.com/CRMApp/default_login.jsp?loginZoneID=10459&originalHostName=jdc.predictivesolutions.com')
userElem = browser.find_element_by_id('username')
userElem.send_keys('temp')
passElem = browser.find_element_by_id('password')
passElem.send_keys('temp')
passElem.submit()
time.sleep(3)
linkElem = browser.find_element_by_link_text('Reports')
linkElem.click()
time.sleep(2)
linkElem = browser.find_element_by_link_text('Detail Report')
linkElem.click()
time.sleep(4)
def pdfToFolder(projectName):
os.chdir('/home/gmclaughlin/Downloads')
if projectName.find("DEM") != -1:
shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf','/home/gmclaughlin/Python/Safety Project/Demo/%s/%s-Detail Report.pdf' % (projectName, projectName))
elif projectName.find("JDC") != -1:
shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf','/home/gmclaughlin/Python/Safety Project/JDC/%s/%s-Detail Report.pdf' % (projectName, projectName))
elif projectName.find("NEW") != -1:
shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf','/home/gmclaughlin/Python/Safety Project/NewRoads/%s/%s-Detail Report.pdf' % (projectName, projectName))
elif projectName.find("Site") != -1:
shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf','/home/gmclaughlin/Python/Safety Project/SiteCrew/%s/%s-Detail Report.pdf' % (projectName, projectName))
else:
shutil.move('/home/gmclaughlin/Downloads/Detail Report - Basic.pdf','/home/gmclaughlin/Python/Safety Project/Other/%s/%s-Detail Report.pdf' % (projectName, projectName))
finsihedFlag = False
addValue = 0
counter = 0
for cellObj in sheet['A']:
if cellObj.value != 'Project' and cellObj.value != 'JDC-Winchester HS Enabling (CONSIG':
linkElem = browser.find_element_by_name('clear') #clear existing settings
linkElem.click()
time.sleep(4)
linkElem = browser.find_element_by_name('showSafeAndUnsafeDetails') #select all reports
linkElem.click()
time.sleep(1)
linkElem = browser.find_element_by_name('showImages') #show images in reports
linkElem.click()
time.sleep(1)
linkElem = browser.find_element_by_name('datePickerRadio')
linkElem.click()
time.sleep(1)
projectElem = browser.find_elements_by_xpath("//input[@type='text']") #find and use text fields
print(cellObj.value)
#projectElem = browser.find_element_by_xpath("//input[4]")
#time.sleep(2)
#projectElem[5+addValue].clear()
projectElem[5+addValue].send_keys('01/01/2010')
time.sleep(1)
#projectElem[6+addValue].clear()
projectElem[6+addValue].send_keys('08/15/2017')
time.sleep(1)
projectElem[8+addValue].clear() #this is the project name box
projectElem[8+addValue].send_keys(cellObj.value)
time.sleep(1)
projectElem[8+addValue].send_keys(Keys.ENTER)
time.sleep(3)
linkElem = browser.find_element_by_xpath("//input[@type='submit']") #submit request for report
linkElem.click()
time.sleep(10)
linkElem = browser.find_element_by_name('pdf') #download as PDF
linkElem.click()
time.sleep(70)
addValue = 1
pdfToFolder(cellObj.value)
counter = counter + 1
|
8,824 | c5605f4770d61d435cc1817bad4d5cbe0aaf1d18 | from sys import stdin
read = lambda: stdin.readline().strip()
class Trie:
def __init__(self, me, parent=None):
self.me = me
self.parent = parent
self.children = {}
def get_answer(trie, count):
print(("--" * count) + trie.me)
trie.children = dict(sorted(trie.children.items(), key=lambda x: x[0]))
for k in trie.children.keys():
get_answer(trie.children[k], count + 1)
def main():
trie_dict = {}
for i in range(int(read())):
data = read().split()
if data[1] not in trie_dict:
trie_dict[data[1]] = Trie(data[1])
cur = trie_dict[data[1]]
for j in range(2, len(data)):
# cur에 같은 데이터가 없을 경우
if data[j] not in cur.children:
cur.children[data[j]] = Trie(data[j])
cur = cur.children[data[j]]
trie_dict = dict(sorted(trie_dict.items(), key=lambda x: x[0]))
for k in trie_dict.keys():
get_answer(trie_dict[k], 0)
if __name__ == "__main__":
main() |
8,825 | 3dd9ce6d5d1ba0bebadae4068e2c898802180e1d | #!/usr/bin/env python
# $Id: iprscan5_urllib2.py 2809 2015-03-13 16:10:25Z uludag $
# ======================================================================
#
# Copyright 2009-2014 EMBL - European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ======================================================================
# InterProScan 5 (REST) Python client using urllib2 and
# xmltramp (http://www.aaronsw.com/2002/xmltramp/).
#
# Tested with:
# Python 2.6.5 (Ubuntu 10.04 LTS)
# Python 2.7.3 (Ubuntu 12.04 LTS)
#
# See:
# http://www.ebi.ac.uk/Tools/webservices/services/pfa/iprscan5_rest
# http://www.ebi.ac.uk/Tools/webservices/tutorials/python
# ======================================================================
# Base URL for service
import urllib.request, urllib.error, urllib.parse
import urllib.request, urllib.parse, urllib.error
import time
import sys
import re
import os
import platform
import argparse
import xmltramp
baseUrl = 'http://www.ebi.ac.uk/Tools/services/rest/iprscan5'
# Load libraries
# Set interval for checking status
checkInterval = 10
# Output level
outputLevel = 1
# Debug level
debugLevel = 0
# Number of option arguments.
numOpts = len(sys.argv)
# Usage message
parser = argparse.ArgumentParser()
# Tool specific options
parser.add_argument('--input', required=True, help='input FASTA file')
parser.add_argument('--appl',
help='signature methods to use, see --paramDetail appl')
parser.add_argument('--crc', action="store_true",
help='enable InterProScan Matches look-up (ignored)')
parser.add_argument('--nocrc', action="store_true",
help='disable InterProScan Matches look-up (ignored)')
parser.add_argument('--goterms', action="store_true",
help='enable inclusion of GO terms')
parser.add_argument('--nogoterms', action="store_true",
help='disable inclusion of GO terms')
parser.add_argument('--pathways', action="store_true",
help='enable inclusion of pathway terms')
parser.add_argument('--nopathways', action="store_true",
help='disable inclusion of pathway terms')
parser.add_argument('--sequence', help='input sequence file name')
# General options
parser.add_argument('--email', required=True, help='e-mail address')
parser.add_argument('--title', help='job title')
parser.add_argument('--outfile', help='file name for results')
parser.add_argument('--outformat', help='output format for results')
parser.add_argument('--async', action='store_true', help='asynchronous mode')
parser.add_argument('--jobid', help='job identifier')
parser.add_argument('--polljob', action="store_true", help='get job result')
parser.add_argument('--status', action="store_true", help='get job status')
parser.add_argument('--resultTypes', action='store_true',
help='get result types')
parser.add_argument('--params', action='store_true',
help='list input parameters')
parser.add_argument('--paramDetail', help='get details for parameter')
parser.add_argument('--quiet', action='store_true',
help='decrease output level')
parser.add_argument('--verbose', action='store_true',
help='increase output level')
parser.add_argument('--baseURL', default=baseUrl, help='Base URL for service')
parser.add_argument('--debugLevel', type=int,
default=debugLevel, help='debug output level')
options = parser.parse_args()
# Increase output level
if options.verbose:
outputLevel += 1
# Decrease output level
if options.quiet:
outputLevel -= 1
# Debug level
if options.debugLevel:
debugLevel = options.debugLevel
# Debug print
def printDebugMessage(functionName, message, level):
if(level <= debugLevel):
print('[' + functionName + '] ' + message, file=sys.stderr)
# User-agent for request (see RFC2616).
def getUserAgent():
printDebugMessage('getUserAgent', 'Begin', 11)
# Agent string for urllib2 library.
urllib_agent = 'Python-urllib/%s' % urllib2.__version__
clientRevision = '$Revision: 2809 $'
clientVersion = '0'
if len(clientRevision) > 11:
clientVersion = clientRevision[11:-2]
# Prepend client specific agent string.
user_agent = 'EBI-Sample-Client/%s (%s; Python %s; %s) %s' % (
clientVersion, os.path.basename(__file__),
platform.python_version(), platform.system(),
urllib_agent
)
printDebugMessage('getUserAgent', 'user_agent: ' + user_agent, 12)
printDebugMessage('getUserAgent', 'End', 11)
return user_agent
# Wrapper for a REST (HTTP GET) request
def restRequest(url):
printDebugMessage('restRequest', 'Begin', 11)
printDebugMessage('restRequest', 'url: ' + url, 11)
# Errors are indicated by HTTP status codes.
try:
# Set the User-agent.
user_agent = getUserAgent()
http_headers = {'User-Agent': user_agent}
req = urllib.request.Request(url, None, http_headers)
# Make the request (HTTP GET).
reqH = urllib.request.urlopen(req)
result = reqH.read()
reqH.close()
# Errors are indicated by HTTP status codes.
except urllib.error.HTTPError as ex:
# Trap exception and output the document to get error message.
print(ex.read(), file=sys.stderr)
raise
printDebugMessage('restRequest', 'End', 11)
return result
# Get input parameters list
def serviceGetParameters():
printDebugMessage('serviceGetParameters', 'Begin', 1)
requestUrl = baseUrl + '/parameters'
printDebugMessage('serviceGetParameters', 'requestUrl: ' + requestUrl, 2)
xmlDoc = restRequest(requestUrl)
doc = xmltramp.parse(xmlDoc)
printDebugMessage('serviceGetParameters', 'End', 1)
return doc['id':]
# Print list of parameters
def printGetParameters():
printDebugMessage('printGetParameters', 'Begin', 1)
idList = serviceGetParameters()
for id in idList:
print(id)
printDebugMessage('printGetParameters', 'End', 1)
# Get input parameter information
def serviceGetParameterDetails(paramName):
printDebugMessage('serviceGetParameterDetails', 'Begin', 1)
printDebugMessage('serviceGetParameterDetails',
'paramName: ' + paramName, 2)
requestUrl = baseUrl + '/parameterdetails/' + paramName
printDebugMessage('serviceGetParameterDetails',
'requestUrl: ' + requestUrl, 2)
xmlDoc = restRequest(requestUrl)
doc = xmltramp.parse(xmlDoc)
printDebugMessage('serviceGetParameterDetails', 'End', 1)
return doc
# Print description of a parameter
def printGetParameterDetails(paramName):
printDebugMessage('printGetParameterDetails', 'Begin', 1)
doc = serviceGetParameterDetails(paramName)
print(str(doc.name) + "\t" + str(doc.type))
print(doc.description)
for value in doc.values:
print(value.value, end=' ')
if str(value.defaultValue) == 'true':
print('default', end=' ')
print()
print("\t" + str(value.label))
if(hasattr(value, 'properties')):
for wsProperty in value.properties:
print("\t" + str(wsProperty.key) + "\t" + str(wsProperty.value))
#print doc
printDebugMessage('printGetParameterDetails', 'End', 1)
# Submit job
def serviceRun(email, title, params):
printDebugMessage('serviceRun', 'Begin', 1)
# Insert e-mail and title into params
params['email'] = email
if title:
params['title'] = title
requestUrl = baseUrl + '/run/'
printDebugMessage('serviceRun', 'requestUrl: ' + requestUrl, 2)
# Signature methods requires special handling (list)
applData = ''
if 'appl' in params:
# So extract from params
applList = params['appl']
del params['appl']
# Build the method data options
for appl in applList:
applData += '&appl=' + appl
# Get the data for the other options
requestData = urllib.parse.urlencode(params)
# Concatenate the two parts.
requestData += applData
printDebugMessage('serviceRun', 'requestData: ' + requestData, 2)
# Errors are indicated by HTTP status codes.
try:
# Set the HTTP User-agent.
user_agent = getUserAgent()
http_headers = {'User-Agent': user_agent}
req = urllib.request.Request(requestUrl, None, http_headers)
# Make the submission (HTTP POST).
reqH = urllib.request.urlopen(req, requestData)
jobId = reqH.read()
reqH.close()
except urllib.error.HTTPError as ex:
# Trap exception and output the document to get error message.
print(ex.read(), file=sys.stderr)
raise
printDebugMessage('serviceRun', 'jobId: ' + jobId, 2)
printDebugMessage('serviceRun', 'End', 1)
return jobId
# Get job status
def serviceGetStatus(jobId):
printDebugMessage('serviceGetStatus', 'Begin', 1)
printDebugMessage('serviceGetStatus', 'jobId: ' + jobId, 2)
requestUrl = baseUrl + '/status/' + jobId
printDebugMessage('serviceGetStatus', 'requestUrl: ' + requestUrl, 2)
status = restRequest(requestUrl)
printDebugMessage('serviceGetStatus', 'status: ' + status, 2)
printDebugMessage('serviceGetStatus', 'End', 1)
return status
# Print the status of a job
def printGetStatus(jobId):
printDebugMessage('printGetStatus', 'Begin', 1)
status = serviceGetStatus(jobId)
print(status)
printDebugMessage('printGetStatus', 'End', 1)
# Get available result types for job
def serviceGetResultTypes(jobId):
printDebugMessage('serviceGetResultTypes', 'Begin', 1)
printDebugMessage('serviceGetResultTypes', 'jobId: ' + jobId, 2)
requestUrl = baseUrl + '/resulttypes/' + jobId
printDebugMessage('serviceGetResultTypes', 'requestUrl: ' + requestUrl, 2)
xmlDoc = restRequest(requestUrl)
doc = xmltramp.parse(xmlDoc)
printDebugMessage('serviceGetResultTypes', 'End', 1)
return doc['type':]
# Print list of available result types for a job.
def printGetResultTypes(jobId):
printDebugMessage('printGetResultTypes', 'Begin', 1)
resultTypeList = serviceGetResultTypes(jobId)
for resultType in resultTypeList:
print(resultType['identifier'])
if(hasattr(resultType, 'label')):
print("\t", resultType['label'])
if(hasattr(resultType, 'description')):
print("\t", resultType['description'])
if(hasattr(resultType, 'mediaType')):
print("\t", resultType['mediaType'])
if(hasattr(resultType, 'fileSuffix')):
print("\t", resultType['fileSuffix'])
printDebugMessage('printGetResultTypes', 'End', 1)
# Get result
def serviceGetResult(jobId, type_):
printDebugMessage('serviceGetResult', 'Begin', 1)
printDebugMessage('serviceGetResult', 'jobId: ' + jobId, 2)
printDebugMessage('serviceGetResult', 'type_: ' + type_, 2)
requestUrl = baseUrl + '/result/' + jobId + '/' + type_
result = restRequest(requestUrl)
printDebugMessage('serviceGetResult', 'End', 1)
return result
# Client-side poll
def clientPoll(jobId):
printDebugMessage('clientPoll', 'Begin', 1)
result = 'PENDING'
while result == 'RUNNING' or result == 'PENDING':
result = serviceGetStatus(jobId)
print(result, file=sys.stderr)
if result == 'RUNNING' or result == 'PENDING':
time.sleep(checkInterval)
printDebugMessage('clientPoll', 'End', 1)
# Get result for a jobid
def getResult(jobId):
printDebugMessage('getResult', 'Begin', 1)
printDebugMessage('getResult', 'jobId: ' + jobId, 1)
# Check status and wait if necessary
clientPoll(jobId)
# Get available result types
resultTypes = serviceGetResultTypes(jobId)
for resultType in resultTypes:
# Derive the filename for the result
if options.outfile:
filename = options.outfile + '.' + \
str(resultType['identifier']) + '.' + \
str(resultType['fileSuffix'])
else:
filename = jobId + '.' + \
str(resultType['identifier']) + '.' + \
str(resultType['fileSuffix'])
# Write a result file
if not options.outformat or options.outformat == str(resultType['identifier']):
# Get the result
result = serviceGetResult(jobId, str(resultType['identifier']))
fh = open(filename, 'w')
fh.write(result)
fh.close()
print(filename)
printDebugMessage('getResult', 'End', 1)
# Read a file
def readFile(filename):
printDebugMessage('readFile', 'Begin', 1)
fh = open(filename, 'r')
data = fh.read()
fh.close()
printDebugMessage('readFile', 'End', 1)
return data
# No options... print help.
if numOpts < 2:
parser.print_help()
# List parameters
elif options.params:
printGetParameters()
# Get parameter details
elif options.paramDetail:
printGetParameterDetails(options.paramDetail)
# Submit job
elif options.email and not options.jobid:
params = {}
if 1 > 0:
if os.access(options.input, os.R_OK): # Read file into content
params['sequence'] = readFile(options.input)
else: # Argument is a sequence id
params['sequence'] = options.input
elif options.sequence: # Specified via option
if os.access(options.sequence, os.R_OK): # Read file into content
params['sequence'] = readFile(options.sequence)
else: # Argument is a sequence id
params['sequence'] = options.sequence
# Map flag options to boolean values.
# if options.crc:
# params['crc'] = True
# elif options.nocrc:
# params['crc'] = False
if options.goterms:
params['goterms'] = True
elif options.nogoterms:
params['goterms'] = False
if options.pathways:
params['pathways'] = True
elif options.nopathways:
params['pathways'] = False
# Add the other options (if defined)
if options.appl:
params['appl'] = re.split('[ \t\n,;]+', options.appl)
# Submit the job
jobid = serviceRun(options.email, options.title, params)
if options.async: # Async mode
print(jobid)
else: # Sync mode
print(jobid, file=sys.stderr)
time.sleep(5)
getResult(jobid)
# Get job status
elif options.status and options.jobid:
printGetStatus(options.jobid)
# List result types for job
elif options.resultTypes and options.jobid:
printGetResultTypes(options.jobid)
# Get results for job
elif options.polljob and options.jobid:
getResult(options.jobid)
else:
print('Error: unrecognised argument combination', file=sys.stderr)
parser.print_help()
|
8,826 | 5cced6d9f5e01b88951059bc89c5d10cfd160f60 | """
Write two functions:
1. `to_list()`, which converts a number to an integer list of its digits.
2. `to_number()`, which converts a list of integers back to its number.
### Examples
to_list(235) ➞ [2, 3, 5]
to_list(0) ➞ [0]
to_number([2, 3, 5]) ➞ 235
to_number([0]) ➞ 0
### Notes
All test cases will be weakly positive numbers: `>= 0`
"""
def to_list(num):
a=list(map(int,str(num)))
return a
def to_number(lst):
res=int("".join(map(str,lst)))
return res
|
8,827 | 46b1fc975fbeedcafaa66c85c378e2249a495647 |
def read_int():
return int(input().strip())
def read_ints():
return list(map(int, input().strip().split(' ')))
def solve():
K, S = read_ints()
# X+Y+Z = S
# 0 <= X,Y,Z <= K
total = 0
for X in range(K+1):
if S-X < 0:
break
# Y+Z=S-X
Y_min = max(S-X-K, 0)
Y_max = min(S-X, K)
if Y_min <= Y_max:
total += Y_max-Y_min+1
return total
if __name__ == '__main__':
print(solve())
|
8,828 | 8ce468460a81c7869f3abb69035a033c58e0f699 | import numpy as np
"""
function for calculating integrals using the trapezoid method
x is a vector of independent variables
y is a vector of dependent variables
a is the initial value
b is the final value
n is the number of intervals
y_generator is the function to be integrated
"""
def trapezoid_integral(**kwargs):
a = kwargs.get('a', None)
b = kwargs.get('b', None)
n = kwargs.get('n', 2)
y_generator = kwargs.get('y_generator', None)
x = kwargs.get('x', None)
y = kwargs.get('y', None)
if y is None:
h = (b-a)/n
x = np.linspace(a, b, n+1)
y = [y_generator(x[i]) for i in range(n+1)]
vectors_length = len(x)
integral_value = y[0]
for i in range(2, vectors_length):
integral_value += 2*y[i - 1]
integral_value += y[vectors_length - 1]
integral_value *= h/2
return integral_value
else:
sum = 0
for i in range(len(x) - 1):
sum += ((y[i] + y[i+1])/2 * (x[i+1] - x[i]))
return sum
|
8,829 | e2f6e6e872f95471ebbc8b25bde08247fe8f7e61 | import media
import fresh_tomatoes
toy_story = media.Movie("Toy Story",
"A story of a boy and his toys that come to life",
'<p><a href="https://en.wikipedia.org/wiki/File:Toy_Story.jpg#/media/File:Toy_Story.jpg"><img src="https://upload.wikimedia.org/wikipedia/en/1/13/Toy_Story.jpg" alt="The poster features Woody anxiously holding onto Buzz Lightyear as he flies in Andy\'s room. Below them sitting on the bed are Bo Peep, Mr. Potato Head, Troll, Hamm, Slinky, Sarge and Rex. In the lower right center of the image is the film\'s title. The background shows the cloud wallpaper featured in the bedroom."></a><br>By From <a rel="nofollow" class="external text" href="http://www.impawards.com/1995/toy_story_ver1.html">impawards</a>., <a href="https://en.wikipedia.org/w/index.php?curid=26009601">Link</a></p>',
"https://youtu.be/KYz2wyBy3kc")
avatar = media.Movie("Avatar",
"A marine on an alien planet",
'<p><a href="https://en.wikipedia.org/wiki/File:Avatar-Teaser-Poster.jpg#/media/File:Avatar-Teaser-Poster.jpg"><img src="https://upload.wikimedia.org/wikipedia/en/b/b0/Avatar-Teaser-Poster.jpg" alt="Avatar-Teaser-Poster.jpg"></a><br>By Source, <a href="//en.wikipedia.org/wiki/File:Avatar-Teaser-Poster.jpg" title="Fair use of copyrighted material in the context of Avatar (2009 film)">Fair use</a>, <a href="https://en.wikipedia.org/w/index.php?curid=23732044">Link</a></p>',
"https://youtu.be/5PSNL1qE6VY")
# print(avatar.storyline)
# avatar.show_trailer()
movies = [toy_story, avatar]
fresh_tomatoes.open_movies_page(movies)
# print(media.Movie.__doc__)
# print(media.Movie.__name__)
# print(media.Movie.__module__)
|
8,830 | 03f3fcb38877570dea830a56460061bd3ccb8927 | import os
import matplotlib.pyplot as plt
import cv2
import numpy as np
def divide_img(img_path, img_name, save_path):
imgg = img_path +'\\' +img_name
print(imgg)
img = cv2.imread(imgg)
print(img)
# img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
h = img.shape[0]
w = img.shape[1]
n = 8
m = 8
print('h={},w={},n={},m={}'.format(h, w, n, m))
dis_h = int(np.floor(h / n))
dis_w = int(np.floor(w / m))
num = 0
for i in range(n):
for j in range(m):
num += 1
print('i,j={}{}'.format(i, j))
sub = img[dis_h * i:dis_h * (i + 1), dis_w * j:dis_w * (j + 1), :]
cv2.imwrite(save_path + '_{}.tif'.format(num), sub)
if __name__ == '__main__':
img_path = r'E:\个人文件夹\土地利用编码\tif'
save_path = r'E:\个人文件夹\土地利用编码\tif1'
img_list = os.listdir(img_path)
for name in img_list:
print(name)
divide_img(img_path, name, save_path) |
8,831 | 23f491bbf26ede9052ecdab04b8c00cc78db5a7e | from sklearn import preprocessing
from random import shuffle
import numpy as np
import collections
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
from tensorflow.keras.layers import Dense, Dropout, Activation, Conv1D, GlobalMaxPooling1D
from tensorflow.keras.models import Sequential, model_from_json
from tensorflow.keras import backend as K
from gensim.models.keyedvectors import KeyedVectors
from nltk.tokenize import TreebankWordTokenizer
import re
import pickle
import os
import yaml
import pandas
from typing import List
from tensorflow.keras.utils import to_categorical
from tensorflow.keras import losses, optimizers
from early_stopping import EarlyStoppingAtMaxMacroF1
import json
import hashlib
SEED = 7
def read_csv_json(file_name) -> pandas.DataFrame:
if file_name.endswith('json') or file_name.endswith('jsonl'):
df = pandas.read_json(file_name, lines=True)
elif file_name.endswith('csv'):
df = pandas.read_csv(file_name)
else:
raise NotImplementedError
return df
def use_only_alphanumeric(input):
pattern = re.compile('[\W^\'\"]+')
output = pattern.sub(' ', input).strip()
return output
def tokenize_and_vectorize(tokenizer, embedding_vector, dataset, embedding_dims):
vectorized_data = []
# probably could be optimized further
ds1 = [use_only_alphanumeric(samp.lower()) for samp in dataset]
token_list = [tokenizer.tokenize(sample) for sample in ds1]
for tokens in token_list:
vecs = []
for token in tokens:
try:
vecs.append(embedding_vector[token].tolist())
except KeyError:
# print('token not found: (%s) in sentence: %s' % (token, ' '.join(tokens)))
np.random.seed(int(hashlib.sha1(token.encode()).hexdigest(), 16) % (10 ** 6))
unk_vec = np.random.rand(embedding_dims)
vecs.append(unk_vec.tolist())
continue
vectorized_data.append(vecs)
return vectorized_data
def pad_trunc(data, maxlen):
"""
For a given dataset pad with zero vectors or truncate to maxlen
"""
new_data = []
# Create a vector of 0s the length of our word vectors
zero_vector = []
for _ in range(len(data[0][0])):
zero_vector.append(0.0)
for sample in data:
if len(sample) > maxlen:
temp = sample[:maxlen]
elif len(sample) < maxlen:
temp = list(sample)
# Append the appropriate number 0 vectors to the list
additional_elems = maxlen - len(sample)
for _ in range(additional_elems):
temp.append(zero_vector)
else:
temp = sample
new_data.append(temp)
return new_data
def save(model, le, path, history):
'''
save model based on model, encoder
'''
if not os.path.exists(path):
os.makedirs(path, exist_ok=True)
print(f'saving model to {path}')
structure_file = os.path.join(path, 'structure.json')
weight_file = os.path.join(path, 'weight.h5')
labels_file = os.path.join(path, 'classes')
with open(structure_file, "w") as json_file:
json_file.write(model.to_json())
model.save_weights(weight_file)
np.save(labels_file, le.categories_[0])
with open(os.path.join(path, "log.json"), 'w') as f:
json.dump(history.history, f)
def load(path):
print(f'loading model from {path}')
structure_file = os.path.join(path, 'structure.json')
weight_file = os.path.join(path, 'weight.h5')
labels_file = os.path.join(path, 'classes.npy')
with open(structure_file, "r") as json_file:
json_string = json_file.read()
model = model_from_json(json_string)
model.load_weights(weight_file)
model._make_predict_function()
#le = preprocessing.LabelEncoder()
categories = np.load(labels_file)
le = preprocessing.OneHotEncoder(handle_unknown='ignore', sparse=False)
le.fit([[c] for c in categories])
json_file.close()
return model, le
def predict(session, graph, model, vectorized_input, num_classes):
if session is None:
raise ("Session is not initialized")
if graph is None:
raise ("Graph is not initialized")
if model is None:
raise ("Model is not initialized")
with session.as_default():
with graph.as_default():
probs = model.predict_proba(vectorized_input)
preds = model.predict_classes(vectorized_input)
preds = to_categorical(preds, num_classes=num_classes)
return (probs, preds)
class Model:
def __init__(self, word2vec_pkl_path, config_path, label_smoothing=0):
with open(config_path, 'r') as f:
self.model_cfg = yaml.safe_load(f)['model']
self.tokenizer = TreebankWordTokenizer()
with open(word2vec_pkl_path, 'rb') as f:
self.vectors = pickle.load(f)
self.model = None
self.session = None
self.graph = None
self.le_encoder = None
self.label_smoothing = label_smoothing
def train(self, tr_set_path: str, save_path: str, va_split: float=0.1, stratified_split: bool=False, early_stopping: bool=True):
"""
Train a model for a given dataset
Dataset should be a list of tuples consisting of
training sentence and the class label
Args:
tr_set_path: path to training data
save_path: path to save model weights and labels
va_split: fraction of training data to be used for validation in early stopping. Only effective when stratified_split is set to False. Will be overridden if stratified_split is True.
stratified_split: whether to split training data stratified by class. If True, validation will be done on a fixed val set from a stratified split out of the training set with the fraction of va_split.
early_stopping: whether to do early stopping
Returns:
history of training including average loss for each training epoch
"""
df_tr = read_csv_json(tr_set_path)
if stratified_split:
df_va = df_tr.groupby('intent').apply(lambda g: g.sample(frac=va_split, random_state=SEED))
df_tr = df_tr[~df_tr.index.isin(df_va.index.get_level_values(1))]
va_messages, va_labels = list(df_va.text), list(df_va.intent)
va_dataset = [{'data': va_messages[i], 'label': va_labels[i]} for i in range(len(df_va))]
tr_messages, tr_labels = list(df_tr.text), list(df_tr.intent)
tr_dataset = [{'data': tr_messages[i], 'label': tr_labels[i]} for i in range(len(df_tr))]
(x_train, y_train, le_encoder) = self.__preprocess(tr_dataset)
(x_va, y_va, _) = self.__preprocess(va_dataset, le_encoder)
else:
tr_messages, tr_labels = list(df_tr.text), list(df_tr.intent)
tr_dataset = [{'data': tr_messages[i], 'label': tr_labels[i]} for i in range(len(df_tr))]
(x_train, y_train, le_encoder) = self.__preprocess(tr_dataset)
K.clear_session()
graph = tf.Graph()
with graph.as_default():
session = tf.Session()
with session.as_default():
session.run(tf.global_variables_initializer())
model = self.__build_model(num_classes=len(le_encoder.categories_[0]))
model.compile(
loss=losses.CategoricalCrossentropy(label_smoothing=self.label_smoothing),
#metrics=['categorical_accuracy'],
optimizer=self.model_cfg.get('optimizer', 'adam') #default lr at 0.001
#optimizer=optimizers.Adam(learning_rate=5e-4)
)
# early stopping callback using validation loss
callback = tf.keras.callbacks.EarlyStopping(
monitor="val_loss",
min_delta=0,
patience=5,
verbose=0,
mode="auto",
baseline=None,
restore_best_weights=True,
)
#callback = EarlyStoppingAtMaxMacroF1(
# patience=100, # record all epochs
# validation=(x_va, y_va)
#)
print('start training')
history = model.fit(x_train, y_train,
batch_size=self.model_cfg['batch_size'],
epochs=100,
validation_split=va_split if not stratified_split else 0,
validation_data=(x_va, y_va) if stratified_split else None,
callbacks=[callback] if early_stopping else None)
history.history['train_data'] = tr_set_path
print(f'finished training in {len(history.history["loss"])} epochs')
save(model, le_encoder, save_path, history)
self.model = model
self.session = session
self.graph = graph
self.le_encoder = le_encoder
# return training history
return history.history
def __preprocess(self, dataset, le_encoder=None):
'''
Preprocess the dataset, transform the categorical labels into numbers.
Get word embeddings for the training data.
'''
shuffle(dataset)
data = [s['data'] for s in dataset]
#labels = [s['label'] for s in dataset]
labels = [[s['label']] for s in dataset]
#le_encoder = preprocessing.LabelEncoder()
if le_encoder is None:
le_encoder = preprocessing.OneHotEncoder(handle_unknown='ignore', sparse=False)
le_encoder.fit(labels)
encoded_labels = le_encoder.transform(labels)
print('%s intents with %s samples' % (len(le_encoder.get_feature_names()), len(data)))
#print('train %s intents with %s samples' % (len(set(labels)), len(data)))
#print(collections.Counter(labels))
print(le_encoder.categories_[0])
vectorized_data = tokenize_and_vectorize(self.tokenizer, self.vectors, data, self.model_cfg['embedding_dims'])
# split_point = int(len(vectorized_data) * .9)
x_train = vectorized_data # vectorized_data[:split_point]
y_train = encoded_labels # encoded_labels[:split_point]
x_train = pad_trunc(x_train, self.model_cfg['maxlen'])
x_train = np.reshape(x_train, (len(x_train), self.model_cfg['maxlen'], self.model_cfg['embedding_dims']))
y_train = np.array(y_train)
return x_train, y_train, le_encoder
def __build_model(self, num_classes=2, type='keras'):
print('Build model')
model = Sequential()
layers = self.model_cfg.get('layers', 1)
for l in range(layers):
self.__addLayers(model, self.model_cfg)
model.add(Dense(num_classes))
model.add(Activation('softmax'))
return model
def __addLayers(self, model, model_cfg):
maxlen = model_cfg.get('maxlen', 400)
strides = model_cfg.get('strides', 1)
embedding_dims = model_cfg.get('embedding_dims', 300)
filters = model_cfg.get('filters', 250)
activation_type = model_cfg.get('activation', 'relu')
kernel_size = model_cfg.get('kernel_size', 3)
hidden_dims = model_cfg.get('hidden_dims', 200)
model.add(Conv1D(
filters,
kernel_size,
padding='valid',
activation=activation_type,
strides=strides,
input_shape=(maxlen, embedding_dims)))
model.add(GlobalMaxPooling1D())
model.add(Dense(hidden_dims))
model.add(Activation(activation_type))
def load(self, path):
K.clear_session()
graph = tf.Graph()
with graph.as_default():
session = tf.Session()
with session.as_default():
self.session = session
self.graph = graph
(model, le) = load(path)
self.model = model
self.le_encoder = le
def predict(self, input: List[str]):
vectorized_data = tokenize_and_vectorize(self.tokenizer, self.vectors, input, self.model_cfg['embedding_dims'])
x_train = pad_trunc(vectorized_data, self.model_cfg['maxlen'])
vectorized_input = np.reshape(x_train, (len(x_train), self.model_cfg['maxlen'], self.model_cfg['embedding_dims']))
(probs, preds) = predict(self.session, self.graph, self.model, vectorized_input, len(self.le_encoder.categories_[0]))
probs = probs.tolist()
results = self.le_encoder.inverse_transform(preds)
output = [{'input': input[i],
'embeddings': x_train[i],
#'label': r,
'label': r.item(),
'highestProb': max(probs[i]),
#'prob': dict(zip(self.le_encoder.classes_, probs[i]))
'prob': dict(zip(self.le_encoder.categories_[0], probs[i]))
} for i, r in enumerate(results)]
return output
|
8,832 | f5f14e4d114855b7eef555db182ee991bdf26c39 | from django.contrib.auth.models import BaseUserManager
class MyUserManager(BaseUserManager):
def create_user(self, email, password, full_name, national_code, mobile, address):
if not email :
raise ValueError('ایمیل الزامی است')
if not full_name :
raise ValueError('نام و نام خانوادگی الزامی است')
if not national_code :
raise ValueError('کدملی الزامی است')
if not mobile :
raise ValueError('موبایل الزامی است')
if not address :
raise ValueError('آدرس الزامی است')
user = self.model(
email = self.normalize_email(email) ,
full_name = full_name ,
national_code = national_code ,
mobile = mobile ,
address = address,
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password, full_name, national_code, mobile, address):
user = self.create_user(email, password, full_name, national_code, mobile, address)
user.is_admin = True
user.save(using=self._db)
return user
|
8,833 | 75270fb4ed059f134b47b8937717cb7fe05d9499 | from threading import Lock
from typing import Callable, Any
from remote.domain.commandCallback import CommandCallback
from remote.domain.commandStatus import CommandStatus
from remote.service.remoteService import RemoteService
from ui.domain.subroutine.iSubroutineRunner import ISubroutineRunner
class RemoteSubroutineRunner(ISubroutineRunner):
def __init__(self, remote_service: RemoteService) -> None:
self._remote_service = remote_service
self._callback: CommandCallback = None
self._busy = False
self._busy_lock = Lock()
def execute_charge_subroutine(self, callback: CommandCallback) -> None:
"""
:raises BlockingIOError: command already running
"""
self._start_command(self._remote_service.execute_charge_subroutine, callback)
def execute_go_home_subroutine(self, callback: CommandCallback) -> None:
"""
:raises BlockingIOError: command already running
"""
self._start_command(self._remote_service.execute_go_home_subroutine, callback)
def execute_read_qr_subroutine(self, callback: CommandCallback) -> None:
"""
:raises BlockingIOError: command already running
"""
self._start_command(self._remote_service.execute_read_qr_subroutine, callback)
def execute_grab_subroutine(self, target: str, callback: CommandCallback) -> None:
"""
:raises BlockingIOError: command already running
"""
self._start_command(self._remote_service.execute_grab_subroutine, callback, target=target)
def execute_drop_subroutine(self, target: str, callback: CommandCallback) -> None:
"""
:raises BlockingIOError: command already running
"""
self._start_command(self._remote_service.execute_drop_subroutine, callback, target=target)
def execute_switch_light_subroutine(self, callback: CommandCallback) -> None:
"""
:raises BlockingIOError: command already running
"""
self._start_command(self._remote_service.execute_switch_light_subroutine, callback)
def execute_directional_movement(self, direction: str, speed: str, distance: float,
callback: CommandCallback) -> None:
"""
:raises BlockingIOError: command already running
"""
self._start_command(self._remote_service.execute_directional_movement, callback,
direction=direction, speed=speed, distance=distance)
def execute_rotational_movement(self, angle: float, callback: CommandCallback) -> None:
"""
:raises BlockingIOError: command already running
"""
self._start_command(self._remote_service.execute_rotational_movement, callback, angle=angle)
def execute_activate_magnet(self, callback: CommandCallback) -> None:
self._start_command(self._remote_service.execute_activate_magnet, callback)
def execute_deactivate_magnet(self, callback: CommandCallback) -> None:
self._start_command(self._remote_service.execute_deactivate_magnet, callback)
def execute_discharge_magnet(self, callback: CommandCallback) -> None:
self._start_command(self._remote_service.execute_discharge_magnet, callback)
def execute_update_directions_subroutine(self, callback: CommandCallback) -> None:
self._start_command(self._remote_service.execute_update_directions, callback)
def execute_championship_subroutine(self, callback: CommandCallback):
self._start_command(self._remote_service.execute_championship, callback)
def execute_look_down(self, callback: CommandCallback) -> None:
self._start_command(self._remote_service.execute_look_down, callback)
def execute_look_ahead(self, callback: CommandCallback) -> None:
self._start_command(self._remote_service.execute_look_ahead, callback)
def _command_done(self, status: CommandStatus) -> None:
with self._busy_lock:
self._busy = False
self._callback(status)
def _start_command(self, function: Callable[[Any], None], callback: CommandCallback, **kwargs) -> None:
"""
:raises BlockingIOError: command already running
"""
with self._busy_lock:
if self._busy:
raise BlockingIOError()
self._busy = True
self._callback = callback
kwargs["callback"] = self._command_done
function(**kwargs)
|
8,834 | 8f1e6ea93b2dd7add256cb31d2c621aa69721609 | import wx
import os
# os.environ["HTTPS_PROXY"] = "http://user:pass@192.168.1.107:3128"
import wikipedia
import wolframalpha
import pyttsx3
import webbrowser
import winshell
import json
import requests
import ctypes
import random
from urllib.request import urlopen
import speech_recognition as sr
import ssl
import urllib.request
import urllib.parse
import re
from regression import Regression
# Remove SSL error
requests.packages.urllib3.disable_warnings()
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
# Legacy Python that doesn't verify HTTPS certificates by default
pass
else:
# Handle target environment that doesn't support HTTPS verification
ssl._create_default_https_context = _create_unverified_https_context
headers = {'''user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6)
AppleWebKit/537.36 (KHTML, like Gecko)
Chrome/53.0.2785.143 Safari/537.36'''}
#speak = wincl.Dispatch("SAPI.SpVoice")
speak = pyttsx3.init()
voices = speak.getProperty('voices')
voice = voices[1]
speak.setProperty('voice', voice.id)
# Requirements
videos = ['C:\\Users\\nEW u\\Videos\\Um4WR.mkv', 'C:\\Users\\nEW u\\Videos\\Jaatishwar.mkv']
app_id = 'GY6T92-YG5RXA85AV'
# GUI creation
class MyFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None,
pos=wx.DefaultPosition, size=wx.Size(450, 100),
style=wx.MINIMIZE_BOX | wx.SYSTEM_MENU | wx.CAPTION |
wx.CLOSE_BOX | wx.CLIP_CHILDREN,
title="Assistant")
panel = wx.Panel(self)
#ico = wx.Icon('programming.jpg', type=wx.ICON_ASTERISK, desiredWidth=-1, desiredHeight=-1)
#self.SetIcon(ico)
my_sizer = wx.BoxSizer(wx.VERTICAL)
lbl = wx.StaticText(panel,
label="Hello Sir. How can I help you?")
my_sizer.Add(lbl, 0, wx.ALL, 5)
self.txt = wx.TextCtrl(panel, style=wx.TE_PROCESS_ENTER,
size=(400, 30))
self.txt.SetFocus()
self.txt.Bind(wx.EVT_TEXT_ENTER, self.OnEnter)
my_sizer.Add(self.txt, 0, wx.ALL, 5)
panel.SetSizer(my_sizer)
self.Show()
speak.say('''Welcome back Sir, Your assistant at your service.''')
speak.runAndWait()
def OnEnter(self, event):
put = self.txt.GetValue()
put = put.lower()
link = put.split()
r = sr.Recognizer()
if put == '':
with sr.Microphone() as src:
r.adjust_for_ambient_noise(src)
speak.say("Yes? How can I help You?")
speak.runAndWait()
audio = r.listen(src)
try:
put = r.recognize_google(audio)
put = put.lower()
link = put.split()
self.txt.SetValue(put)
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio")
except sr.RequestError as e:
print("Could not request results from Google STT; {0}".format(e))
except:
print("Unknown exception occurred!")
# Open a webpage
if put.startswith('open '):
try:
speak.say("opening "+link[1])
speak.runAndWait()
webbrowser.open('http://www.'+link[1]+'.com')
except:
print('Sorry, No Internet Connection!')
# Play Song on Youtube
elif put.startswith('play '):
try:
link = '+'.join(link[1:])
s = link.replace('+', ' ')
query_string = urllib.parse.urlencode({"search_query" : link})
html_content = urllib.request.urlopen("http://www.youtube.com/results?" + query_string)
search_results = re.findall(r'href=\"\/watch\?v=(.{11})', html_content.read().decode())
print("http://www.youtube.com/watch?v=" + search_results[0])
speak.say("playing "+s)
speak.runAndWait()
webbrowser.open("http://www.youtube.com/watch?v=" + search_results[0])
except:
print('Sorry, No internet connection!')
# Google Search
elif put.startswith('search '):
try:
link = '+'.join(link[1:])
say = link.replace('+', ' ')
speak.say("searching on google for "+say)
speak.runAndWait()
webbrowser.open('https://www.google.co.in/search?q='+link)
except:
print('Sorry, No internet connection!')
# Empty Recycle bin
elif put.startswith('empty '):
try:
winshell.recycle_bin().empty(confirm=False,
show_progress=False, sound=True)
speak.say("Recycle Bin Empty")
speak.runAndWait()
except:
speak.say("Unknown Error")
speak.runAndWait()
# News
elif put.startswith('science '):
try:
jsonObj = urlopen('''https://newsapi.org/v1/articles?source=new-scientist&sortBy=top&apiKey=your_API_here''')
data = json.load(jsonObj)
i = 1
speak.say('''Here are some top science news from new scientist''')
speak.runAndWait()
print(''' ================NEW SCIENTIST=============
'''+'\n')
for item in data['articles']:
print(str(i)+'. '+item['title']+'\n')
print(item['description']+'\n')
i += 1
except:
print('Sorry, No internet connection')
elif put.startswith('headlines '):
try:
jsonObj = urlopen('''https://newsapi.org/v1/articles?source=the-times-of-india&sortBy=top&apiKey=your_API_here''')
data = json.load(jsonObj)
i = 1
speak.say('Here are some top news from the times of india')
speak.runAndWait()
print(''' ===============TIMES OF INDIA============'''
+'\n')
for item in data['articles']:
print(str(i)+'. '+item['title']+'\n')
print(item['description']+'\n')
i += 1
except Exception as e:
print(str(e))
# Lock the device
elif put.startswith('lock '):
try:
speak.say("locking the device")
speak.runAndWait()
ctypes.windll.user32.LockWorkStation()
except Exception as e:
print(str(e))
# Play videos in boredom
elif put.endswith('bored'):
try:
speak.say('''Sir, I\'m playing a video.
Hope you like it''')
speak.runAndWait()
video = random.choice(videos)
os.startfile(video)
except Exception as e:
print(str(e))
# Say Whats up
elif put.startswith('whats up'):
try:
speak.say('''Nothing much, just trying to become the perfect assistant!''')
speak.runAndWait()
except Exception as e:
print(str(e))
#Show stocks
elif put.startswith('show stocks'):
try:
Regression.execute()
except Exception as e:
print(str(e))
# Other Cases
else:
try:
# wolframalpha
client = wolframalpha.Client(app_id)
res = client.query(put)
ans = next(res.results).text
print(ans)
speak.say(ans)
speak.runAndWait()
except:
# wikipedia/google
put = put.split()
put = ' '.join(put[:])
#print(put)
print(wikipedia.summary(put))
speak.say('Searched google for '+put)
speak.runAndWait()
webbrowser.open('https://www.google.co.in/search?q='+put)
# Trigger GUI
if __name__ == "__main__":
app = wx.App(True)
frame = MyFrame()
app.MainLoop() |
8,835 | 661b622708692bd9cd1b3399835f332c86e39bf6 | class Error(Exception):
pass
class TunnelInstanceError(Error):
def __init__(self, expression, message):
self.expression = expression
self.message = message
class TunnelManagerError(Error):
def __init__(self, expression, message):
self.expression = expression
self.message = message
|
8,836 | 11db76cba3dd76cad0d660a0e189d3e4c465071b | from typing import Any, Optional
from aiogram import types
from aiogram.dispatcher.middlewares import BaseMiddleware
from scene_manager.loader.loader import Loader
from scene_manager.utils import content_type_checker
class ScenesMiddleware(BaseMiddleware):
def __init__(self, *, loader: Optional[Loader] = None, default_scene_name: Optional[str] = None):
self._default_scene_name = default_scene_name or "start"
self._loader = loader or Loader.get_current()
if self._loader is None:
self._loader = Loader()
if not self._loader.is_scenes_loaded:
self._loader.load_scenes()
self._storage = self._loader.data_storage
super().__init__()
async def on_post_process_message(self, message: types.Message, results: tuple, data: dict):
if data:
return
user_scene_name = await self._get_scene_name(message)
for scene_model in self._loader.handlers_storage.get_message_scene(user_scene_name):
if content_type_checker(message, scene_model.config.get("content_types")):
await scene_model.handler(message)
else:
otherwise_handler = scene_model.config.get("otherwise_handler")
if otherwise_handler is not None:
await otherwise_handler(message)
async def on_post_process_callback_query(
self, callback_query: types.CallbackQuery, results: tuple, data: dict
):
if data:
return
user_scene_name = await self._get_scene_name(callback_query)
for scene_model in self._loader.handlers_storage.get_callback_query_scene(user_scene_name):
await scene_model.handler(callback_query)
async def _get_scene_name(self, ctx) -> Any:
user_id = ctx.from_user.id
user_scene = await self._storage.get(user_id)
if user_scene is None:
await self._storage.put(user_id, self._default_scene_name)
user_scene = self._default_scene_name
return user_scene
|
8,837 | f14ff29a1a76c2916cb211c476a56aaa5061bf71 | # -*- coding: utf-8 -*-
import sys
import setuptools
from distutils.core import setup
with open("README.md", "r") as fh:
long_description = fh.read()
def get_info():
init_file = 'PIKACHU/__init__.py'
with open(init_file, 'r') as f:
for line in f.readlines():
if "=" in line:
exec(compile(line, "", 'exec'))
return locals()['name'], locals()['author'], locals()['version']
NAME, AUTHOR, VERSION = get_info()
sys.dont_write_bytecode = True
setuptools.setup(
name=NAME,
version=VERSION,
author=AUTHOR,
author_email="fufu.bluesand@gmail.com",
description="a PIKA based, Cuter and more Human rabbitmq queue Utility (´_ゝ`)",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/smilefufu/PIKACHU",
data_files = [("", ["LICENSE"])],
packages=setuptools.find_packages(),
install_requires=[
"pika",
],
classifiers=(
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Operating System :: OS Independent'
),
)
|
8,838 | decd5d50025fc3b639be2f803d917ff313cf7219 | from collections import Counter
N = int(input())
lst = list(map(int, input().split()))
ans = []
for i in range(N):
ans.append(abs(i+1-lst[i]))
s = Counter(ans)
rst = []
for i in s:
rst.append([i, s[i]])
rst.sort(key=lambda x: x[0], reverse=True)
for i in rst:
if i[1] > 1:
print(i[0], i[1])
|
8,839 | 728f9402b3ce4b297be82b3ba1a17c4180ac7c0d | '''
Statistics models module. This module contains the database models for the
Statistics class and the StatisticsCategory class.
@author Hubert Ngu
@author Jason Hou
'''
from django.db import models
class Statistics(models.Model):
'''
Statistics model class. This represents a single tuple in the
statitics_generator_statistics table in the database.
'''
number_surveys = models.IntegerField()
number_listings = models.IntegerField()
number_buyer_surveys = models.IntegerField()
number_seller_surveys = models.IntegerField()
number_buyer_listings = models.IntegerField()
number_seller_listings = models.IntegerField()
average_transaction_amount = models.FloatField()
buyer_transaction_amount = models.FloatField()
seller_transaction_amount = models.FloatField()
successful_transaction_amount = models.FloatField()
average_transaction_time = models.IntegerField()
buyer_transaction_success_rate = models.FloatField()
seller_transaction_success_rate = models.FloatField()
total_transaction_success_rate = models.FloatField()
class StatisticsCategory(models.Model):
'''
StatisticsCategory model class. This represents a single tuple in the
statitics_generator_statisticscategory table in the database.
'''
statistics_id = models.IntegerField()
category = models.CharField(max_length=30)
survey_count = models.IntegerField()
buyer_count = models.IntegerField()
seller_count = models.IntegerField()
amount = models.IntegerField() |
8,840 | ae475dc95c6a099270cf65d4b471b4b430f02303 | """
Kernel desnity estimation plots for geochemical data.
"""
import copy
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import MaxNLocator
from ...comp.codata import close
from ...util.log import Handle
from ...util.meta import get_additional_params, subkwargs
from ...util.plot.axes import add_colorbar, init_axes
from ...util.plot.density import (
get_axis_density_methods,
percentile_contour_values_from_meshz,
plot_Z_percentiles,
)
from ...util.plot.style import DEFAULT_CONT_COLORMAP
from .grid import DensityGrid
from .ternary import ternary_heatmap
logger = Handle(__name__)
def density(
arr,
ax=None,
logx=False,
logy=False,
bins=25,
mode="density",
extent=None,
contours=[],
percentiles=True,
relim=True,
cmap=DEFAULT_CONT_COLORMAP,
shading="auto",
vmin=0.0,
colorbar=False,
**kwargs
):
"""
Creates diagramatic representation of data density and/or frequency for either
binary diagrams (X-Y) or ternary plots.
Additional arguments are typically forwarded
to respective :mod:`matplotlib` functions
:func:`~matplotlib.pyplot.pcolormesh`,
:func:`~matplotlib.pyplot.hist2d`,
:func:`~matplotlib.pyplot.hexbin`,
:func:`~matplotlib.pyplot.contour`, and
:func:`~matplotlib.pyplot.contourf` (see Other Parameters, below).
Parameters
----------
arr : :class:`numpy.ndarray`
Dataframe from which to draw data.
ax : :class:`matplotlib.axes.Axes`, `None`
The subplot to draw on.
logx : :class:`bool`, `False`
Whether to use a logspaced *grid* on the x axis. Values strictly >0 required.
logy : :class:`bool`, `False`
Whether to use a logspaced *grid* on the y axis. Values strictly >0 required.
bins : :class:`int`, 20
Number of bins used in the gridded functions (histograms, KDE evaluation grid).
mode : :class:`str`, 'density'
Different modes used here: ['density', 'hexbin', 'hist2d']
extent : :class:`list`
Predetermined extent of the grid for which to from the histogram/KDE. In the
general form (xmin, xmax, ymin, ymax).
contours : :class:`list`
Contours to add to the plot, where :code:`mode='density'` is used.
percentiles : :class:`bool`, `True`
Whether contours specified are to be converted to percentiles.
relim : :class:`bool`, :code:`True`
Whether to relimit the plot based on xmin, xmax values.
cmap : :class:`matplotlib.colors.Colormap`
Colormap for mapping surfaces.
vmin : :class:`float`, 0.
Minimum value for colormap.
shading : :class:`str`, 'auto'
Shading to apply to pcolormesh.
colorbar : :class:`bool`, False
Whether to append a linked colorbar to the generated mappable image.
{otherparams}
Returns
-------
:class:`matplotlib.axes.Axes`
Axes on which the densityplot is plotted.
.. seealso::
Functions:
:func:`matplotlib.pyplot.pcolormesh`
:func:`matplotlib.pyplot.hist2d`
:func:`matplotlib.pyplot.contourf`
Notes
-----
The default density estimates and derived contours are generated based on
kernel density estimates. Assumptions around e.g. 95% of points lying within
a 95% contour won't necessarily be valid for non-normally distributed data
(instead, this represents the approximate 95% percentile on the kernel
density estimate). Note that contours are currently only generated; for
`mode="density"`; future updates may allow the use of a histogram
basis, which would give results closer to 95% data percentiles.
Todo
----
* Allow generation of contours from histogram data, rather than just
the kernel density estimate.
* Implement an option and filter to 'scatter' points below the minimum threshold
or maximum percentile contours.
"""
if (mode == "density") & np.isclose(vmin, 0.0): # if vmin is not specified
vmin = 0.02 # 2% max height | 98th percentile
if arr.shape[-1] == 3:
projection = "ternary"
else:
projection = None
ax = init_axes(ax=ax, projection=projection, **kwargs)
pcolor, contour, contourf = get_axis_density_methods(ax)
background_color = (*ax.patch.get_facecolor()[:-1], 0.0)
if cmap is not None:
if isinstance(cmap, str):
cmap = plt.get_cmap(cmap)
cmap = copy.copy(cmap) # without this, it would modify the global cmap
cmap.set_under((1, 1, 1, 0))
if mode == "density":
cbarlabel = "Kernel Density Estimate"
else:
cbarlabel = "Frequency"
valid_rows = np.isfinite(arr).all(axis=-1)
if (mode in ["hexbin", "hist2d"]) and contours:
raise NotImplementedError(
"Contours are not currently implemented for 'hexbin' or 'hist2d' modes."
)
if (arr.size > 0) and valid_rows.any():
# Data can't be plotted if there's any nans, so we can exclude these
arr = arr[valid_rows]
if projection is None: # binary
x, y = arr.T
grid = DensityGrid(
x,
y,
bins=bins,
logx=logx,
logy=logy,
extent=extent,
**subkwargs(kwargs, DensityGrid)
)
if mode == "hexbin":
# extent values are exponents (i.e. 3 -> 10**3)
mappable = ax.hexbin(
x,
y,
gridsize=bins,
cmap=cmap,
extent=grid.get_hex_extent(),
xscale=["linear", "log"][logx],
yscale=["linear", "log"][logy],
**subkwargs(kwargs, ax.hexbin)
)
elif mode == "hist2d":
_, _, _, im = ax.hist2d(
x,
y,
bins=[grid.grid_xe, grid.grid_ye],
range=grid.get_range(),
cmap=cmap,
cmin=[0, 1][vmin > 0],
**subkwargs(kwargs, ax.hist2d)
)
mappable = im
elif mode == "density":
zei = grid.kdefrom(
arr,
xtransform=[lambda x: x, np.log][logx],
ytransform=[lambda y: y, np.log][logy],
mode="edges",
**subkwargs(kwargs, grid.kdefrom)
)
if percentiles: # 98th percentile
vmin = percentile_contour_values_from_meshz(zei, [1.0 - vmin])[1][0]
logger.debug(
"Updating `vmin` to percentile equiv: {:.2f}".format(vmin)
)
if not contours:
# pcolormesh using bin edges
mappable = pcolor(
grid.grid_xei,
grid.grid_yei,
zei,
cmap=cmap,
vmin=vmin,
shading=shading,
**subkwargs(kwargs, pcolor)
)
mappable.set_edgecolor(background_color)
mappable.set_linestyle("None")
mappable.set_lw(0.0)
else:
mappable = _add_contours(
grid.grid_xei,
grid.grid_yei,
zi=zei.reshape(grid.grid_xei.shape),
ax=ax,
contours=contours,
percentiles=percentiles,
cmap=cmap,
vmin=vmin,
**kwargs
)
if relim and (extent is not None):
ax.axis(extent)
elif projection == "ternary": # ternary
if shading == "auto":
shading = "flat" # auto cant' be passed to tripcolor
# zeros make nans in this case, due to the heatmap calculations
arr[~(arr > 0).all(axis=1), :] = np.nan
arr = close(arr)
if mode == "hexbin":
raise NotImplementedError
# density, histogram etc parsed here
coords, zi, _ = ternary_heatmap(arr, bins=bins, mode=mode)
if percentiles: # 98th percentile
vmin = percentile_contour_values_from_meshz(zi, [1.0 - vmin])[1][0]
logger.debug("Updating `vmin` to percentile equiv: {:.2f}".format(vmin))
# remove coords where H==0, as ax.tripcolor can't deal with variable alpha :'(
fltr = (zi != 0) & (zi >= vmin)
coords = coords[fltr.flatten(), :]
zi = zi[fltr]
if not contours:
tri_poly_collection = pcolor(
*coords.T,
zi.flatten(),
cmap=cmap,
vmin=vmin,
shading=shading,
**subkwargs(kwargs, pcolor)
)
mappable = tri_poly_collection
else:
mappable = _add_contours(
*coords.T,
zi=zi.flatten(),
ax=ax,
contours=contours,
percentiles=percentiles,
cmap=cmap,
vmin=vmin,
**kwargs
)
ax.set_aspect("equal")
else:
if not arr.ndim in [0, 1, 2]:
raise NotImplementedError
if colorbar:
cbkwargs = kwargs.copy()
cbkwargs["label"] = cbarlabel
add_colorbar(mappable, **cbkwargs)
return ax
def _add_contours(
*coords,
zi=None,
ax=None,
contours=[],
cmap=DEFAULT_CONT_COLORMAP,
vmin=0.0,
extent=None,
**kwargs
):
"""
Add density-based contours to a plot.
"""
# get the contour levels
percentiles = kwargs.pop("percentiles", True)
levels = contours or kwargs.get("levels", None)
pcolor, contour, contourf = get_axis_density_methods(ax)
if percentiles and not isinstance(levels, int):
# plot individual percentile contours
_cs = plot_Z_percentiles(
*coords,
zi=zi,
ax=ax,
percentiles=levels,
extent=extent,
cmap=cmap,
**kwargs
)
mappable = _cs
else:
# plot interval contours
if levels is None:
levels = MaxNLocator(nbins=10).tick_values(zi.min(), zi.max())
elif isinstance(levels, int):
levels = MaxNLocator(nbins=levels).tick_values(zi.min(), zi.max())
else:
raise NotImplementedError
# filled contours
mappable = contourf(
*coords, zi, extent=extent, levels=levels, cmap=cmap, vmin=vmin, **kwargs
)
# contours
contour(
*coords, zi, extent=extent, levels=levels, cmap=cmap, vmin=vmin, **kwargs
)
return mappable
_add_additional_parameters = True
density.__doc__ = density.__doc__.format(
otherparams=[
"",
get_additional_params(
density,
plt.pcolormesh,
plt.hist2d,
plt.hexbin,
plt.contour,
plt.contourf,
header="Other Parameters",
indent=4,
subsections=True,
),
][_add_additional_parameters]
)
|
8,841 | 96086885e5353f3b4b3277c1daf4ee74831c3b73 | from kivy.uix.boxlayout import BoxLayout
from kivy.graphics import *
from kivy.clock import Clock
from kivy.properties import StringProperty, BooleanProperty
from kivy.uix.popup import Popup
import time
from math import sin, pi
from kivy.lang import Builder
from ui.custom_widgets import I18NPopup, I18NLabel
Builder.load_file('ui/peachy_widgets.kv')
class TouchyLabel(I18NLabel):
is_on = BooleanProperty(False)
def on_touch_down(self, touch):
if touch.is_triple_tap:
self.is_on = not self.is_on
class I18NHelpPopup(I18NPopup):
text_source = StringProperty()
class Dripper(BoxLayout):
def __init__(self, **kwargs):
super(Dripper, self).__init__(**kwargs)
self.index = 0.0
self.sections = 20
self.section_height = 1
self.lasttime = time.time()
Clock.schedule_once(self.redraw)
self.drip_history = []
self.count = 0
def update(self, data):
self.drip_history = data['drip_history']
self.count = data['drips']
def update_parts(self, drips, history):
self.drip_history = history
self.count = drips
def redraw(self, key):
self.index += (time.time() - self.lasttime) * self.sections
self.lasttime = time.time()
if self.index > self.section_height * 2:
self.index = 0
self.draw()
Clock.schedule_once(self.redraw, 1.0 / 30.0)
def on_height(self, instance, value):
self.section_height = self.height / self.sections
def draw(self):
self.canvas.clear()
top = time.time()
bottom = top - self.sections
self.canvas.add(Color(0.99, 0.99, 0.6, 1.0))
self.canvas.add(Rectangle(pos=self.pos, size=self.size))
for (index, drip) in zip(range(len(self.drip_history), 0, -1), self.drip_history):
if drip > bottom:
self.canvas.add(Color(0.35, 0.4, 1.0, 1.0))
y = ((drip - bottom) / self.sections) * self.height
s = sin((self.count - index) / (2 * pi))
self.canvas.add(Ellipse(pos=(self.x + abs(self.width / 2.0 * s), y), size=(self.width / 5.0, 5)))
class LaserWarningPopup(I18NPopup):
text_source = StringProperty()
accepted = StringProperty(None)
def __init__(self, **kwargs):
super(LaserWarningPopup, self).__init__(**kwargs)
def is_safe(self):
if self.accepted is "True":
return True
return False
|
8,842 | 4ca4d4bd684802b056417be4ee3d7d10e8f5dc85 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from .. import _utilities
import typing
# Export this package's modules as members:
from .authority import *
from .ca_pool import *
from .ca_pool_iam_binding import *
from .ca_pool_iam_member import *
from .ca_pool_iam_policy import *
from .certificate import *
from .certificate_template import *
from .certificate_template_iam_binding import *
from .certificate_template_iam_member import *
from .certificate_template_iam_policy import *
from .get_authority import *
from .get_ca_pool_iam_policy import *
from .get_certificate_template_iam_policy import *
from ._inputs import *
from . import outputs
|
8,843 | e553da92b1bb5dfaa0fb7c702f5be4f66201c75b | # coding: UTF-8
import fileinput
import io
from locale import str
import os
__author__ = 'lidong'
def getDirList( p ):
p = p.replace( "/","\\")
if p[ -1] != "\\":
p = p+"\\"
a = os.listdir( p )
for x in a:
if(os.path.isfile( p + x )):
a, b = os.path.splitext( p + x )
if(0<b.find("bak")):
print (p + x)
os.remove( p + x)
elif(os.path.isdir( p + x )): #.svn
if(0<( p + x ).find(".svn")):
for (p,d,f) in os.walk( p + x):
if p.find('.svn')>0:
print (p + x)
os.popen('rd /s /q %s'%p)
else :
getDirList(p + x)
def createFile( f ):
if(os.path.isfile(f)):
a_file = io.open( f, encoding='utf-8')
print(a_file.readline())
else :
return
while 1==1:
print ( getDirList( "D:\project" ) )
|
8,844 | ca7b0553e55e1c5e6cd23139a158101e72456a50 | from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from django.contrib.auth.models import User, Group
class UserTests(APITestCase):
def test_user_list(self):
# must be rejected without validation
response = self.client.get('/api/users/', {}, format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
# must be success
user = User.objects.create(username='user', email='user@example.com', password='user123', is_staff=True)
self.client.force_authenticate(user=user)
response = self.client.get('/api/users/', {}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
actual = response.data['results'][0]
self.assertEqual(actual['username'], user.username)
self.assertEqual(actual['email'], user.email)
|
8,845 | 252d6b381af09dbafb1d10c188eb154e53213033 | # -*- coding: utf-8 -*-
"""
Created on Thu Nov 15 06:50:48 2018
@author: Tony
"""
import glob
import pandas as pd
path =r'C:\Users\Tony\Downloads\daily_dataset\daily_dataset' # use your path
frame = pd.DataFrame()
list_ = []
def aggSumFn(path,grpByCol):
allFiles = glob.glob(path + "/*.csv")
for file_ in allFiles:
df = pd.read_csv(file_,index_col=None, header=0)
list_.append(df)
frame = pd.concat(list_)
frame[grpByCol] = pd.to_datetime(frame['day'], format='%Y-%m-%d')
frame=frame.resample('W-Mon', on=grpByCol)['energy_sum'].sum().reset_index().sort_values(by=grpByCol)
frame.columns=['week','total_consumption']
frame.to_csv(r'C:\Users\Tony\Downloads\daily_dataset\summary\weekly_dataset_summary.csv')
print('completed')
aggSumFn(path,'day')
#
|
8,846 | 118380f58cd173d2de5572a1591766e38ca4a7f8 | import os
basedir = os.path.abspath(os.path.dirname(__file__))
from datetime import datetime
class Config(object):
# ...
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'postgres' or 'sqlite:///' + os.path.join(basedir, 'app.db')
SQLALCHEMY_TRACK_MODIFICATIONS = False
MONGODB_DB = 'project1'
MONGODB_HOST = 'mongodb'
MONGODB_PORT = 27017
SECRET_KEY = os.environ.get('SECRET_KEY') or 'you-will-never-guess' |
8,847 | 53cf2dfe3319c39ca6f1dc890eea578fae654b5b | # Evolutionary Trees contains algorithms and methods used in determining phylogenetic inheritance of various species.
# Main algos UPGMA and CLUSTALW
from dataclasses import dataclass
import FormattingET
@dataclass
class Node:
age: int
num: int
label: str
alignment: []
def __init__(self, child1=None, child2=None):
self.child1 = child1
self.child2 = child2
#UPGMA algos
def initializeMatrix(m, n):
mtx = [[0 for x in range(n)] for y in range(m)]
return mtx
def initializeClusters(t):
numNodes = len(t)
numLeaves = (numNodes + 1) / 2
clusters = [0]*int(numLeaves)
for i in range(int(numLeaves)):
clusters[i] = t[i]
return clusters
def initializeTree(speciesNames):
numLeaves = len(speciesNames)
t = [Node]*(2*numLeaves - 1)
for i in range(len(t)):
vx = Node()
if i < numLeaves:
vx.label = speciesNames[i]
else:
vx.label = "Ancestor species" + str(i)
vx.num = i
t[i] = vx
return t
def countLeaves(v: Node):
if v.child1 is None or v.child2 is None:
return 1
return countLeaves(v.child1) + countLeaves(v.child2)
def delClusters(clusters, row, col):
del clusters[col]
del clusters[row]
return clusters
def findMinElement(mtx):
minRow = 0
minCol = 1
minElement = mtx[0][1]
for row in range(0, len(mtx)):
for col in range(row+1, len(mtx)):
if mtx[row][col] < minElement:
minRow = row
minCol = col
minElement = mtx[row][col]
return minRow, minCol, minElement
def delRowCol(mtx, row, col):
del mtx[col]
del mtx[row]
for i in range(len(mtx)):
del mtx[i][col]
del mtx[i][row]
return mtx
def addRowCol(mtx, clusters, row, col):
newRow = [0]*(len(mtx) + 1)
for i in range(len(newRow) - 1):
if i != row and i != col:
size1 = countLeaves(clusters[row])
size2 = countLeaves(clusters[col])
avg = (size1*mtx[row][i] + size2*mtx[i][col]) / (size1 + size2)
newRow[i] = avg
mtx.append(newRow)
for i in range(len(newRow) - 1):
mtx[i].append(newRow[i])
return mtx
def upgma(mtx, speciesNames):
tree = initializeTree(speciesNames)
clusters = initializeClusters(tree)
numLeaves = len(mtx)
for i in range(numLeaves, 2*numLeaves - 1):
minElements = findMinElement(mtx)
row = minElements[0]
col = minElements[1]
min = minElements[2]
tree[i].age = min/2
tree[i].child1 = clusters[row]
tree[i].child2 = clusters[col]
mtx = addRowCol(mtx, clusters, row, col)
clusters.append(tree[i])
mtx = delRowCol(mtx, row, col)
clusters = delClusters(clusters, row, col)
return tree
#CLUSTALW algos
def sumPairScores(align1, align2, idx1, idx2, match, mismatch, gap):
alignment1 = ['']*len(align1)
for i in range(len(align1)):
alignment1[i] = align1[i][idx1]
alignment2 = [''] * len(align2)
for i in range(len(align2)):
alignment2[i] = align2[i][idx2]
score = 0.0
for char in alignment1:
for char2 in alignment2:
if char == '-' and char2 == '-':
continue
elif char == char2:
score += match
elif char != '-' and char2 != '-':
score -= mismatch
else:
score -= gap
return score
def generateScoreTable(align1, align2, match, mismatch, gap, supergap):
scoreTable = [[0 for j in range(len(align2[0]) + 1)] for i in range(len(align1[0]) + 1)]
for i in range(len(scoreTable)):
scoreTable[i][0] = i * (-supergap)
for i in range(len(scoreTable[0])):
scoreTable[0][i] = i * (-supergap)
for i in range(1, len(align1[0]) + 1):
for j in range(1, len(align2[0]) + 1):
up = scoreTable[i-1][j] - supergap
left = scoreTable[i][j-1] - supergap
diag = scoreTable[i-1][j-1] + sumPairScores(align1, align2, i-1, j-1, match, mismatch, gap)
scoreTable[i][j] = max(up, left, diag)
return scoreTable
def progressiveBacktrack(scoreTable, align1, align2, match, mismatch, gap, supergap):
numRows = len(align1[0]) + 1
numCols = len(align2[0]) + 1
backtrack = [['' for i in range(numCols)] for j in range(numRows)]
for i in range(1, numCols):
backtrack[0][i] = "LEFT"
for i in range(1, numRows):
backtrack[i][0] = "UP"
for i in range(1, numRows):
for j in range(1, numCols):
if (scoreTable[i][j] == scoreTable[i-1][j] - supergap):
backtrack[i][j] = "UP"
elif scoreTable[i][j] == scoreTable[i][j-1] - supergap:
backtrack[i][j] = "LEFT"
else:
backtrack[i][j] = "DIAG"
return backtrack
def backtracker(string, backtrack, orientation):
aligned = ""
row = len(backtrack) - 1
col = len(backtrack[0]) - 1
while(row != 0 or col != 0):
k = len(string)
if backtrack[row][col] == "UP":
if (orientation == "top"):
aligned = "-" + aligned
elif orientation == "side":
aligned = str(string[k - 1]) + aligned
string = string[:k - 1]
row -= 1
elif backtrack[row][col] == "LEFT":
if (orientation == "side"):
aligned = "-" + aligned
elif orientation == "top":
aligned = str(string[k-1]) + aligned
string = string[:k-1]
col -= 1
else:
aligned = str(string[k-1]) + aligned
string = string[:k-1]
row -= 1
col -= 1
return aligned
def outputProgressiveAlign(align1, align2, backtrack):
a = [[""] for i in range(len(align1) + len(align2))]
for i in range(len(align1)):
a[i] = backtracker(align1[i], backtrack, "side")
for j in range(len(align1), len(align2) + len(align1)):
a[j] = backtracker(align2[j - len(align1)], backtrack, "top")
return a
def progressiveAlign(align1, align2, match, mismatch, gap, supergap):
scoreTable = generateScoreTable(align1, align2, match, mismatch, gap, supergap)
backtrack = progressiveBacktrack(scoreTable, align1, align2, match, mismatch, gap, supergap)
opt = outputProgressiveAlign(align1, align2, backtrack)
return opt
def clustalw(guideTree, dnaStrings, match, mismatch, gap, supergap):
for i in range(len(dnaStrings)):
guideTree[i].alignment = [dnaStrings[i]]
for j in range(len(dnaStrings), len(guideTree)):
child1 = guideTree[j].child1
child2 = guideTree[j].child2
guideTree[j].alignment = progressiveAlign(child1.alignment, child2.alignment, match, mismatch, gap, supergap)
return guideTree[len(guideTree) - 1].alignment
#main
if __name__ == "__main__":
print("UPGMA Test")
mtx = [[0, 3, 4, 3], [3, 0, 4, 5], [4, 4, 0, 2], [3, 5, 2, 0]]
labels = ["H", "C", "W", "S"]
tree = upgma(mtx, labels)
print("CLUSTALW Test")
#cats = ["USA", "CHN", "ITA"]
mtxreturn = FormattingET.readMatrixFromFile("Datasets/Input/Test-Example/distance.mtx")
mtx1 = mtxreturn[0]
labels1 = mtxreturn[1]
t = upgma(mtx1, labels1)
match = 1.0
mismatch = 1.0
gap = 1.0
supergap = 6.0
dnaMap = FormattingET.readDNAStringsFromFile("Datasets/Input/Test-Example/RAW/toy-example.fasta")
keyvalues = FormattingET.getKeyValues(dnaMap)
newLabels = keyvalues[0]
newDnaStrings = keyvalues[1]
dnaStrings = FormattingET.rearrangeStrings(labels1, newLabels, newDnaStrings)
align = clustalw(t, dnaStrings, match, mismatch, gap, supergap)
FormattingET.writeAlignmentToFile(align, labels1, "Datasets/Output/Test-Example", "toy.aln")
print(align)
|
8,848 | 65bb3743ca569c295d85016c82c4f6f043778d3f | from django.contrib import admin
from .models import Recipe, Ingredient, ChosenIngredient, timezone
# Register your models here.)
admin.site.register(Ingredient)
admin.site.site_header = "Chef's Apprentice Admin"
admin.site.site_title = "Chef's Apprentice Admin Portal"
admin.site.index_title = "Welcome to Chef's Apprentice Admin Portal"
class ChosenIngredientInLine(admin.TabularInline):
model = ChosenIngredient
# definerer hva som skal vises på Recipe displayet i admin siden
class RecipeAdmin(admin.ModelAdmin):
list_display = ("title", "visible", "author")
actions = ["make_visible", "make_hidden", "delete_selected"]
exclude = ('date_posted', 'ingredients')
inlines = [
ChosenIngredientInLine,
]
class Meta:
model = Recipe
# funksjon for å sette make_visible og hidden som actions i admin siden
def make_visible(self, request, queryset):
queryset.update(visible=True)
queryset.update(date_posted=timezone.now())
def make_hidden(self, request, queryset):
queryset.update(visible=False)
# synliggjør disse modellene i admin-siden
admin.site.register(Recipe, RecipeAdmin)
|
8,849 | 72bbbe78db746febc9a36a676e0fa2d97bf5e81e | """ Crie um programa onde o usuario possa digitar sete valores numericos e cadastre-os em uma lisa unicaque mantenha
separados os valores pares e impares. No final, mostre os valores ares e impares em ordem crescente """
n = [[],[]]
for c in range(0,7):
num = int(input(f'Digite o {c+1} valor: '))
res = num % 2
if res == 0:
n[0].append(num)
else:
n[1].append(num)
n[0].sort()
n[1].sort()
print(f'Numeros pares: {n[0]}')
print(f'Numeros impares {n[1]}') |
8,850 | 81f49c55edff7678e9d1745e39a8370e2c31c9ea | """
___________________________________________________
| _____ _____ _ _ _ |
| | __ \ | __ (_) | | | |
| | |__) |__ _ __ __ _ _ _| |__) || | ___ | |_ |
| | ___/ _ \ '_ \ / _` | | | | ___/ | |/ _ \| __| |
| | | | __/ | | | (_| | |_| | | | | | (_) | |_ |
| |_| \___|_| |_|\__, |\__,_|_| |_|_|\___/ \__| |
| __/ | |
| GNU/Linux based |___/ Multi-Rotor UAV Autopilot |
|___________________________________________________|
Movement Activity Class
Copyright (C) 2014 Tobias Simon, Integrated Communication Systems Group, TU Ilmenau
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details. """
from math import hypot
from time import sleep
from util.geomath import LinearInterpolation
from numpy import array, zeros
from pilot_pb2 import *
from activity import Activity, StabMixIn
from util.geomath import gps_add_meters, gps_meters_offset
from util.srtm import SrtmElevMap
_srtm_elev_map = SrtmElevMap()
class MoveActivity(Activity, StabMixIn):
Z_SPEED_MAX = 2
SRTM_SAFETY_ALT = 20
def __init__(self, icarus):
Activity.__init__(self, icarus)
self.canceled = False
def run(self):
# shortcut identifiers:
arg = self.icarus.arg
move_data = arg.move_data
pilot = self.icarus.pilot
params = pilot.params
fsm = self.icarus.fsm
prev_setp_rel = self.icarus.setpoints
start_gps = (params.start_lat, params.start_lon)
prev_setp_gps = gps_add_meters(start_gps, prev_setp_rel[0 : 2])
# calculate target x, y, z and move
coord = [None, None, None] # x, y, z setpoints
if arg.glob:
# set global lat, lon postion:
glob_sp = [None, None, None]
for i in xrange(3):
name = 'p%d' % i
if move_data.HasField(name):
glob_sp[i] = getattr(move_data, name)
print 'p0, p1, p2 = ', glob_sp
if arg.rel:
print 'glob, rel'
# interpret lat, lon, alt as relative
# covert previous x and y setpoints to rad, using start_lat, start_lon:
gps = list(prev_setp_gps)
for i in range(0, 2):
if glob_sp[i] != None:
gps[i] += glob_sp[i]
# convert from wsg84 to relative:
coord[0 : 2] = gps_meters_offset(start_gps, gps)
# add z value:
coord[2] = prev_setp_rel[2]
if glob_sp[2] != None:
coord[2] += glob_sp[2]
else:
print 'glob, abs'
# interpret lat, lon, alt as absolute
for i in range(0, 2):
if glob_sp[i] == None:
glob_sp[i] = prev_setp_gps[i]
print start_gps, glob_sp[0 : 2]
coord[0 : 2] = gps_meters_offset(start_gps, glob_sp[0 : 2])
if glob_sp[2] != None:
coord[2] = glob_sp[2] - params.start_alt
else:
coord[2] = prev_setp_rel[2]
else:
# local position update:
for i in xrange(3):
name = 'p%d' % i
if move_data.HasField(name):
if arg.rel:
print 'local, rel'
# relative local coordinate:
coord[i] = prev_setp_rel[i] + getattr(move_data, name)
else:
print 'local, abs'
# absolute local coordinate:
coord[i] = getattr(move_data, name)
else:
coord[i] = prev_setp_rel[i]
print 'coord output:', coord
self.icarus.setpoints = coord
# set position
pilot.set_ctrl_param(POS_E, coord[0])
pilot.set_ctrl_param(POS_N, coord[1])
"""
# did the altitude change?:
if coord[2] != prev_setp_rel[2]:
# set up linear z interpolation between start and destination points:
dist = hypot(prev_setp_rel[0] - coord[0], prev_setp_rel[1] - coord[1])
z_interp = LinearInterpolation(0.0, start_z, dist, coord[2])
# update z setpoint linearly between starting position and destination:
target_dist = hypot(pilot.mon[5], pilot.mon[6])
while target_dist > self.LAT_STAB_EPSILON:
sleep(1)
if self.canceled:
pilot.set_ctrl_param(POS_N, pilot.mon[0])
pilot.set_ctrl_param(POS_E, pilot.mon[1])
pilot.set_ctrl_param(POS_U, pilot.mon[2])
self.stabilize()
return # not going into hovering state
z = z_interp(dist - target_dist)
# check elevation map:
srtm_z = 1000.0 #_srtm_elev_map.lookup(lat, lon) - params.start_alt
if z < srtm_alt + self.SRTM_SAFETY_ALT:
z = srtm_alt + self.SRTM_SAFETY_ALT
pilot.set_ctrl_param(POS_Z, z)
"""
self.stabilize()
if not self.canceled:
fsm.handle('done')
def _cancel(self):
self.canceled = True
|
8,851 | 52426ec670dd5ca522c7fb0b659e3a42b16ff326 | #!/usr/bin/python
import sys
f = open('/etc/passwd','r')
users_and_ids = []
for line in f:
u,_,id,_ = line.split(':',3)
users_and_ids.append((u,int(id)))
users_and_ids.sort(key = lambda pair:pair[1])
for id,usr in users_and_ids:
print id,usr
|
8,852 | 806bdb75eed91d1429d8473a50c136b58a736147 | """
Visualize the predictions of a GQCNN on a dataset Visualizes TP, TN, FP, FN..
Author: Vishal Satish
"""
import copy
import logging
import numpy as np
import os
import sys
from random import shuffle
import autolab_core.utils as utils
from autolab_core import YamlConfig, Point
from perception import BinaryImage, ColorImage, DepthImage, GdImage, GrayscaleImage, RgbdImage, RenderMode
from gqcnn import Grasp2D, GQCNN, ClassificationResult, InputDataMode, ImageMode, ImageFileTemplates
from gqcnn import Visualizer as vis2d
import IPython
class GQCNNPredictionVisualizer(object):
""" Class to visualize predictions of GQCNN on a specified dataset. Visualizes TP, TN, FP, FN. """
def __init__(self, config):
"""
Parameters
----------
config : dict
dictionary of configuration parameters
"""
# setup config
self.cfg = config
# setup for visualization
self._setup()
def visualize(self):
""" Visualize predictions """
logging.info('Visualizing ' + self.datapoint_type)
# iterate through shuffled file indices
for i in self.indices:
im_filename = self.im_filenames[i]
pose_filename = self.pose_filenames[i]
label_filename = self.label_filenames[i]
logging.info('Loading Image File: ' + im_filename + ' Pose File: ' + pose_filename + ' Label File: ' + label_filename)
# load tensors from files
metric_tensor = np.load(os.path.join(self.data_dir, label_filename))['arr_0']
label_tensor = 1 * (metric_tensor > self.metric_thresh)
image_tensor = np.load(os.path.join(self.data_dir, im_filename))['arr_0']
hand_poses_tensor = np.load(os.path.join(self.data_dir, pose_filename))['arr_0']
pose_tensor = self._read_pose_data(hand_poses_tensor, self.input_data_mode)
# score with neural network
pred_p_success_tensor = self._gqcnn.predict(image_tensor, pose_tensor)
# compute results
classification_result = ClassificationResult([pred_p_success_tensor],
[label_tensor])
logging.info('Error rate on files: %.3f' %(classification_result.error_rate))
logging.info('Precision on files: %.3f' %(classification_result.precision))
logging.info('Recall on files: %.3f' %(classification_result.recall))
mispred_ind = classification_result.mispredicted_indices()
correct_ind = classification_result.correct_indices()
# IPython.embed()
if self.datapoint_type == 'true_positive' or self.datapoint_type == 'true_negative':
vis_ind = correct_ind
else:
vis_ind = mispred_ind
num_visualized = 0
# visualize
for ind in vis_ind:
# limit the number of sampled datapoints displayed per object
if num_visualized >= self.samples_per_object:
break
num_visualized += 1
# don't visualize the datapoints that we don't want
if self.datapoint_type == 'true_positive':
if classification_result.labels[ind] == 0:
continue
elif self.datapoint_type == 'true_negative':
if classification_result.labels[ind] == 1:
continue
elif self.datapoint_type == 'false_positive':
if classification_result.labels[ind] == 0:
continue
elif self.datapoint_type == 'false_negative':
if classification_result.labels[ind] == 1:
continue
logging.info('Datapoint %d of files for %s' %(ind, im_filename))
logging.info('Depth: %.3f' %(hand_poses_tensor[ind, 2]))
data = image_tensor[ind,...]
if self.display_image_type == RenderMode.SEGMASK:
image = BinaryImage(data)
elif self.display_image_type == RenderMode.GRAYSCALE:
image = GrayscaleImage(data)
elif self.display_image_type == RenderMode.COLOR:
image = ColorImage(data)
elif self.display_image_type == RenderMode.DEPTH:
image = DepthImage(data)
elif self.display_image_type == RenderMode.RGBD:
image = RgbdImage(data)
elif self.display_image_type == RenderMode.GD:
image = GdImage(data)
vis2d.figure()
if self.display_image_type == RenderMode.RGBD:
vis2d.subplot(1,2,1)
vis2d.imshow(image.color)
grasp = Grasp2D(Point(image.center, 'img'), 0, hand_poses_tensor[ind, 2], self.gripper_width_m)
grasp.camera_intr = grasp.camera_intr.resize(1.0 / 3.0)
vis2d.grasp(grasp)
vis2d.subplot(1,2,2)
vis2d.imshow(image.depth)
vis2d.grasp(grasp)
elif self.display_image_type == RenderMode.GD:
vis2d.subplot(1,2,1)
vis2d.imshow(image.gray)
grasp = Grasp2D(Point(image.center, 'img'), 0, hand_poses_tensor[ind, 2], self.gripper_width_m)
grasp.camera_intr = grasp.camera_intr.resize(1.0 / 3.0)
vis2d.grasp(grasp)
vis2d.subplot(1,2,2)
vis2d.imshow(image.depth)
vis2d.grasp(grasp)
else:
vis2d.imshow(image)
grasp = Grasp2D(Point(image.center, 'img'), 0, hand_poses_tensor[ind, 2], self.gripper_width_m)
grasp.camera_intr = grasp.camera_intr.resize(1.0 / 3.0)
vis2d.grasp(grasp)
vis2d.title('Datapoint %d: Pred: %.3f Label: %.3f' %(ind,
classification_result.pred_probs[ind,1],
classification_result.labels[ind]))
vis2d.show()
# cleanup
self._cleanup()
def _cleanup(self):
""" Close GQCNN TF session"""
self._gqcnn.close_session()
def _setup(self):
""" Setup for visualization """
# setup logger
logging.getLogger().setLevel(logging.INFO)
logging.info('Setting up for visualization.')
#### read config params ###
# dataset directory
self.data_dir = self.cfg['dataset_dir']
# visualization params
self.display_image_type = self.cfg['display_image_type']
self.font_size = self.cfg['font_size']
self.samples_per_object = self.cfg['samples_per_object']
# analysis params
self.datapoint_type = self.cfg['datapoint_type']
self.image_mode = self.cfg['image_mode']
self.input_data_mode = self.cfg['data_format']
self.target_metric_name = self.cfg['metric_name']
self.metric_thresh = self.cfg['metric_thresh']
self.gripper_width_m = self.cfg['gripper_width_m']
# setup data filenames
self._setup_data_filenames()
# setup shuffled file indices
self._compute_indices()
# load gqcnn
logging.info('Loading GQ-CNN')
self.model_dir = self.cfg['model_dir']
self._gqcnn = GQCNN.load(self.model_dir)
self._gqcnn.open_session()
def _setup_data_filenames(self):
""" Setup image and pose data filenames, subsample files, check validity of filenames/image mode """
# read in filenames of training data(poses, images, labels)
logging.info('Reading filenames')
all_filenames = os.listdir(self.data_dir)
if self.image_mode== ImageMode.BINARY:
self.im_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.binary_im_tensor_template) > -1]
elif self.image_mode== ImageMode.DEPTH:
self.im_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.depth_im_tensor_template) > -1]
elif self.image_mode== ImageMode.BINARY_TF:
self.im_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.binary_im_tf_tensor_template) > -1]
elif self.image_mode== ImageMode.COLOR_TF:
self.im_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.color_im_tf_tensor_template) > -1]
elif self.image_mode== ImageMode.GRAY_TF:
self.im_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.gray_im_tf_tensor_template) > -1]
elif self.image_mode== ImageMode.DEPTH_TF:
self.im_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.depth_im_tf_tensor_template) > -1]
elif self.image_mode== ImageMode.DEPTH_TF_TABLE:
self.im_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.depth_im_tf_table_tensor_template) > -1]
else:
raise ValueError('Image mode %s not supported.' %(self.image_mode))
self.pose_filenames = [f for f in all_filenames if f.find(ImageFileTemplates.hand_poses_template) > -1]
self.label_filenames = [f for f in all_filenames if f.find(self.target_metric_name) > -1]
self.im_filenames.sort(key = lambda x: int(x[-9:-4]))
self.pose_filenames.sort(key = lambda x: int(x[-9:-4]))
self.label_filenames.sort(key = lambda x: int(x[-9:-4]))
# check that all file categories were found
if len(self.im_filenames) == 0 or len(self.label_filenames) == 0 or len(self.label_filenames) == 0:
raise ValueError('1 or more required training files could not be found')
def _compute_indices(self):
""" Generate random file index so visualization starts from a
different random file everytime """
self.indices = np.arange(len(self.im_filenames))
np.random.shuffle(self.indices)
def _read_pose_data(self, pose_arr, input_data_mode):
""" Read the pose data and slice it according to the specified input_data_mode
Parameters
----------
pose_arr: :obj:`ndArray`
full pose data array read in from file
input_data_mode: :obj:`InputDataMode`
enum for input data mode, see optimizer_constants.py for all
possible input data modes
Returns
-------
:obj:`ndArray`
sliced pose_data corresponding to input data mode
"""
if input_data_mode == InputDataMode.TF_IMAGE:
return pose_arr[:,2:3]
elif input_data_mode == InputDataMode.TF_IMAGE_PERSPECTIVE:
return np.c_[pose_arr[:,2:3], pose_arr[:,4:6]]
elif input_data_mode == InputDataMode.RAW_IMAGE:
return pose_arr[:,:4]
elif input_data_mode == InputDataMode.RAW_IMAGE_PERSPECTIVE:
return pose_arr[:,:6]
elif input_data_mode == InputDataMode.REGRASPING:
# depth, approach angle, and delta angle for reorientation
return np.c_[pose_arr[:,2:3], pose_arr[:,4:5], pose_arr[:,6:7]]
else:
raise ValueError('Input data mode %s not supported' %(input_data_mode))
|
8,853 | 06339e9cd506f147d03c54aee82473e233b4ec2e | from .routes import generate_routes |
8,854 | 5f50b20bd044471ebb8e1350d1a75a250b255d8f | # ********************************************************************************** #
# #
# Project: Data Frame Explorer #
# Author: Pawel Rosikiewicz #
# Contact: prosikiewicz(a)gmail.com #
# #
# License: MIT License #
# Copyright (C) 2021.01.30 Pawel Rosikiewicz #
# #
# Permission is hereby granted, free of charge, to any person obtaining a copy #
# of this software and associated documentation files (the "Software"), to deal #
# in the Software without restriction, including without limitation the rights #
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #
# copies of the Software, and to permit persons to whom the Software is #
# furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in all #
# copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #
# SOFTWARE. #
# #
# ********************************************************************************** #
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import pandas as pd
import random
import glob
import re
import os
import seaborn as sns
from IPython.display import display
from pandas.api.types import is_numeric_dtype
from pandas.api.types import is_string_dtype
# Function, ............................................................................
def find_and_display_patter_in_series(*, series, pattern):
"I used that function when i don't remeber full name of a given column"
res = series.loc[series.str.contains(pattern)]
return res
# Function, ...........................................................................................
def load_csv(*, path, filename, sep="\t", verbose=True):
"""
Loads csv into pandas df, based on pandas.read_scv(),
Returns error, if file or directoy not found
Parameters/Input
_________________ _______________________________________________________________________________
* path full path to directory
* csv_name. full csv file name
* separator "\t", by default
* display_head bool, True, by default, display df.head(),
irrespectively when the futions was called.
Returns
_________________ _______________________________________________________________________________
* DataFrame by Pandas
"""
os.chdir(path)
if len(glob.glob(filename))==1:
df = pd.read_csv(filename, sep=sep, low_memory=False)
# display example,
if verbose==True:
display(df.head(3))
print(df.shape)
else:
pass
# return,
return df
else:
if verbose==True:
print(f"""ERROR :csv file {filename}, was not found in: \n {path}""")
else:
pass
# Function, ............................................................................
def find_patter_in_series(*, s, pat, tolist=True):
'''
I used that function when i don't remeber full name of a given column
'''
res = s.loc[s.str.contains(pat)]
if tolist==True:
return res.values.tolist()
else:
return res
# Function, ...........................................................................................
def format_to_datetime(*, data, pattern_list, timezone='UTC', unixtime=False, dt_format='%Y-%m-%d %H:%M:%S', verbose=False):
'''
formats columns in df into datetime dtype, and set all times to UTC
work with unix time units, ie. second number since 1970
columns in df, are find using full comlumn name or keywords in column name
'''
assert type(data)==pd.DataFrame, "please provide data in pandas dataframe format"
if isinstance(pattern_list, str):
pattern_list = [pattern_list]
else:
pass
for pat in pattern_list:
# find column names using provided patterns or their full names,
columns_with_potential_datetime_obj = list(find_and_display_patter_in_series(series=pd.Series(data.columns), pattern=pat))
# replace
for i in columns_with_potential_datetime_obj:
# keep example of old cell
before_formatting = str(data.loc[0, i])
# convert to one format
if unixtime==True:
s = pd.to_datetime(data.loc[:, i], errors="coerce", unit='s').copy()#,format cannot be used with unit="s", but it will be the same
data.loc[:, i] = s
if timezone!=None:
data.loc[:, i] = data.loc[:, i].dt.tz_localize(timezone)
else:
pass
else:
s = pd.to_datetime(data.loc[:, i], errors="coerce",format=dt_format).copy()
data.loc[:, i] = s
if timezone!=None:
data.loc[:, i] = data.loc[:, i].dt.tz_convert(timezone)
else:
pass
# info
if verbose==True:
print(f"date time formatted in: {i}")
print(f" - {data.loc[:, i].isnull().sum()} NaN were instroduced by coerce")
print(f" - Example: {before_formatting} -->> {str(data.loc[0, i])}", end="\n")
else:
pass
return data
# Function, ...........................................................................................
def replace_text(*,df ,pat="", colnames="all", fillna=np.nan, verbose=True):
"""
searches string with a given pattern and replace it with a new patter (fillna), eg: nan,
Parameters/Input
_________________ _______________________________________________________________________________
* df Pandas Dataframe
* searched_pattern "", str literal, used by pd.Series.str.contains()
* colnames default, "all", or list with selected colnames in df
* fillna default numpy.nan, or str literal
- what do you want to place instead of searched pattern in df
Returns
_________________ _______________________________________________________________________________
* DataFrame DataFramne.copy() with new values,
* display messages. number of replaced straings in each column, and examples of replcaced values
"""
# for older version,
searched_pattern = pat
col_names = colnames
# check col_names with values to replace,
if col_names=="all":
sel_col_names = list(df.columns)
else:
sel_col_names = col_names
# display message header,
if verbose==True:
print(f"""\nReplacing Text in {len(sel_col_names)} columns: {sel_col_names}\n""")
if verbose==False:
pass
# exchnage searched pattern in each column separately,
for i, col_name in enumerate(sel_col_names):
# .. test if you really have string values in that column, otherwise it masy be float for all NaN in a column, and no action will be taken
if is_string_dtype(df[col_name]):
try:
# .... find postions with a given pattern and select three examples to display for the user,
positions_to_replace = df[col_name].str.contains(searched_pattern, na=False).values# arr
examples_to_display = [str(x) for x in list(df.loc[list(positions_to_replace), col_name].str[0:20].values.tolist()[0:3])]
# .... replace postions, and find examples of unchnaged postions,
df.loc[list(positions_to_replace), col_name] = [fillna]*positions_to_replace.sum()
examples_of_positions_that_were_not_replaced = [str(x) for x in list(df.loc[list(positions_to_replace==False), col_name].str[0:20].values.tolist()[0:3])]
# .... diplay info,
if verbose==True:
perc_of_replaced_pos_in_col = "".join([str(positions_to_replace.sum()/df.shape[0]*100),"%"])
print(f"{i} - {col_name} - - {positions_to_replace.sum()} positions out of {df.shape[0]}, were replaced with {fillna}, ie. {perc_of_replaced_pos_in_col}")
print(f" - three examples of replaced postions: {'; '.join(examples_to_display)}", end="\n")
print(f" - three examples of unchanged postions: {'; '.join(examples_of_positions_that_were_not_replaced)}", end="\n\n")
# the second print returns three first examples of exchanged values, just to see what i did,
else:
pass
except:
if verbose==True:
print(f"{i} - {col_name} - - probably only missing data datected, Values were not replaced! \n")
else:
pass
else:
if verbose==True:
print(f"{i} - {col_name} - - is not of string type, Values were not replaced! \n")
else:
pass
return df.copy()
# Function, ...........................................................................................
def replace_numeric_values(*, df, colnames="all", lower_limit="none", upper_limit="none", equal=False, replace_with=np.nan, verbose=True):
"""
Replace numerical values that are outside of range of a values
prediced with a theoretical limits of a given variable,
eg less then 0 in weight of a product,
Provide examples and numbers of replaced instances
Parameters/Input
_________________ _______________________________________________________________________________
* df : Pandas DataFrame
* cols_in_df : list, exact colnames of selected or all columns in df
* lower_limit : int,float,"none", if "none" no action is taken
* upper_limit : int,float,"none", if "none" no action is taken
* replace_with : str, np.nan, int, float
* equal : bool, if True, >= and <= values then limits will be replaced,
if False (default), > and < values then limits will be replaced,
Returns
_________________ _______________________________________________________________________________
* DataFrame DataFramne.copy() with new values,
* display messages. number of replaced straings in each column, and examples of replcaced values
"""
cols_names = colnames
# .. check provided col_names,
if cols_names=="all":
cols = list(df.columns)
else:
cols = cols_names
# .. info, header,
if verbose==True:
print(f"""\n{"".join(["-"]*80)} \n Replacing Numerical Values in {len(cols)} columns""")
print(f" lower filter={lower_limit}, upper filter ={upper_limit}")
if equal==True:
print(f" Caution, equal=True, ie. values >= and <= then requested limits will be replaced")
print(f'{"".join(["-"]*80)}\n')
if verbose==False:
pass
# .. intelligent info,
total_count=[]
# .. count, to limit the number of displayed messages,
count = 0
# .. replace values and collect examples,
for i, j in enumerate(cols):
# ..... assume no values were replaced, so the messages work later,
info_lower_filter = 0
info_upper_filter = 0
# ..... test if the column is of the numeric type:
# from pandas.api.types import is_numeric_dtype
if is_numeric_dtype(df[j]):
# * replace values < or <= lower limit,
# - ----------------------------------
if lower_limit!="none":
if equal == True:
lower_filter = df.loc[:,j]<=lower_limit
if equal == False:
lower_filter = df.loc[:,j]<lower_limit
# info,
info_lower_filter=lower_filter.sum()
df.loc[list(lower_filter),j]=replace_with
# * replace values > or >= upper limit,
# - ----------------------------------
if upper_limit!="none":
if equal == True:
upper_filter = df.loc[:,j]>=upper_limit
if equal == False:
upper_filter = df.loc[:,j]>upper_limit
# info,
info_upper_filter=upper_filter.sum()
df.loc[list(upper_filter),j]=replace_with
# * find how many values were replaced, and add that to the total_count list
total_count.append(info_upper_filter+info_lower_filter)
# * display examples for 3 first columns with replaced values,
if verbose==True:
if info_upper_filter+info_lower_filter>0 and count <4:
print(f"eg: {i}, {j} : {info_lower_filter} values <{lower_limit}, ...{info_upper_filter} values <{upper_limit}")
else:
pass
# * add 1 to count, to limit the number of displayed examples,
count += 1
else:
if verbose==True:
print(f"{i, j} is not of numeric type, values were not replaced !")
else:
pass
# .. additional message, if more then 2 columns had replaced values,
if verbose==True:
if len(total_count)>3 and pd.Series(total_count).sum()>0:
print(f". and {len(total_count)-3} other columns had in total {pd.Series(total_count).sum()} replaced values \n")
# .. message in case no values vere replaced at all,
if pd.Series(total_count).sum()==0:
print("No values were replaced in requested columns....")
else:
pass
# .. return,
return df.copy()
# function, ...................................................
def drop_nan(df, method="any", row=True, verbose=True):
'''
function to dropna with thresholds from rows and columns
. method
. any : row/column wiht any missing data are removed
. all : row/column only wiht missing data are removed
. int, >0 : keeps row/clumns wiht this or larger number of non missing data
. float, >0 : as in the above, as fraction
'''
assert type(df)==pd.DataFrame, "incorrect df dtype"
df = df.copy()
if verbose==True:
print(df.shape)
else:
pass
# set funtion for rows or columns,
if row==True:
shapeidx, dfaxis = 1, 0
else:
shapeidx, dfaxis = 0, 1
# use threshold or "all", or None for do nothing,
if method==None:
pass
elif isinstance(method, str):
df = df.dropna(how=method, axis=dfaxis) # removes rows with NaN in all columns
elif isinstance(method, int):
tr = method
if tr==0:
pass
else:
if tr>=df.shape[shapeidx]:
tr=df.shape[shapeidx]
else:
pass
df = df.dropna(thresh=tr, axis=dfaxis) # eg Keep only the rows with at least 2 non-NA value
elif isinstance(method, float):
tr = int(np.ceil(df.shape[shapeidx]*(method)))
if tr==0:
pass
else:
if tr>=df.shape[shapeidx]:
tr=df.shape[shapeidx]
else:
pass
df = df.dropna(thresh=tr, axis=dfaxis) # eg Keep only the rows with at least 2 non-NA value
else:
pass
# info and return
if verbose==True:
print(df.shape)
else:
pass
return df
# Function, ...........................................................................................
def drop_columns(*, df, columns_to_drop, verbose=True):
"""
Small function to quickly remove columns from,
by column names stored in the list
- created to give info on removed columns and whether I am chnaging df in proper way,
- the function allows for column name duplicates,
"""
assert type(df)==pd.DataFrame, "please provide df in pandas dataframe format"
df = df.copy()
# find unique values in a list, just in case I made the mistake,
columns_to_drop = list(pd.Series(columns_to_drop).unique())
# .. info, header,
if verbose==True:
print(f"""Removing {len(columns_to_drop)} columns from df""")
else:
pass
# remove columns one by one,
for i,j in enumerate(columns_to_drop):
try:
df.drop(columns=[j], axis=1, inplace=True)
if verbose==True:
print(f"{i} removing: {j}, ==> new df.shape: {df.shape}")
else:
pass
except:
if verbose==True:
print(f"{i} .... column: {j}, was not found in df, check if name is correct....")
else:
pass
return df
|
8,855 | 601ef4e1000348059dcfe8d34eec5f28368f2464 | /Users/alyssaliguori/anaconda3/lib/python3.7/tokenize.py |
8,856 | bbd5eb1f80843efdd2709aa19a65bf325a88f473 | # Developed by Lorenzo Mambretti, Justin Wang
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://github.com/jtwwang/hanabi/blob/master/LICENSE
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied
#
import rl_env
import numpy as np
import os
import sys
import random
import getopt
import pickle
from agents.neuroEvo_agent import NeuroEvoAgent
from predictors.conv_pred import conv_pred
# To find local modules
sys.path.insert(0, os.path.join(os.getcwd(), 'agents'))
def model_crossover(weights1, weights2):
new_weights = []
assert len(weights1) == len(weights2)
if random.uniform(0, 1) > 0.3:
print("crossover")
for layer in range(len(weights1)):
# alternate odd and even layers
if layer % 2 == 0:
new_weights.append(weights1[layer])
else:
new_weights.append(weights2[layer])
else:
print("no crossover")
new_weights = weights1
return new_weights
def mutate_weights(weights):
for xi in range(len(weights)):
for yi in range(len(weights[xi])):
if random.uniform(0, 1) > 0.9:
change = random.uniform(-0.1, 0.1)
weights[xi][yi] += change
return weights
def make_mutation(ix_to_mutate, best_ones):
p = np.sort(scores)[2:]
p = p / np.sum(p)
# select the weights from parents
randomA = np.random.choice(best_ones, p=p)
randomB = np.random.choice(best_ones, p=p)
while randomB == randomA:
randomB = np.random.choice(best_ones, p=p)
weights1 = weights[randomA]
weights2 = weights[randomB]
# generate new weights
new_weights = model_crossover(weights1, weights2)
new_weights = mutate_weights(new_weights)
# change the weights of the target agent
weights[ix_to_mutate] = new_weights
def run(ix, initialize=False):
# initialize env
env = rl_env.make('Hanabi-Full', num_players=flags['players'])
agent_config = {
'players': flags['players'],
'num_moves': env.num_moves(),
'observation_size': env.vectorized_observation_shape()[0],
'model_name': str(ix),
'initialize': initialize}
agent = NeuroEvoAgent(agent_config)
avg_reward = 0
avg_steps = 0
for eps in range(flags['num_episodes']):
obs = env.reset() # Observation of all players
done = False
agent_id = 0
while not done:
ob = obs['player_observations'][agent_id]
try:
action = agent.act(ob)
except ValueError:
print('Something went wrong. Try to reinitialize the agents'
'pool by using --initialize True')
exit()
obs, reward, done, _ = env.step(action)
avg_reward += reward
avg_steps += 1
if done:
break
# change player
agent_id = (agent_id + 1) % flags['players']
n_eps = float(flags['num_episodes'])
avg_steps /= n_eps
avg_reward /= n_eps
agent.save(model_name=str(ix))
scores[ix] = avg_reward * 1000 + avg_steps
if __name__ == "__main__":
global flags, scores, weights
flags = {'players': 2,
'num_episodes': 100,
'initialize': False,
'models': 20,
'generations': 100}
options, arguments = getopt.getopt(sys.argv[1:], '',
['players=',
'num_episodes=',
'initialize=',
'models=',
'generations='])
if arguments:
sys.exit('usage: neuroEvo.py [options]\n'
'--players number of players in the game.\n'
'--num_episodes number of game episodes to run.\n'
'--initialize whether to re-initialize the weights'
'for all agents.\n')
for flag, value in options:
flag = flag[2:] # Strip leading --.
flags[flag] = type(flags[flag])(value)
# Initialize all models
current_pool = []
scores = np.zeros(flags['models'])
weights = {}
to_mutate = 0
# create one agent
agent = conv_pred("NeuroEvo_agent")
# load the file
filepath = os.path.join("model", "NeuroEvo_agent")
filepath = os.path.join(filepath, "scores.pickle")
if not flags['initialize']:
try:
scores = pickle.load(open(filepath, "rb"))
loaded = True
except IOError:
loaded = False
else:
loaded = False
print("Initialize")
# do an initial loop to evaluate all models
for i in range(flags['models']):
if flags['initialize'] or not loaded:
run(i, flags['initialize'])
agent.load(model_name=str(i))
weights[i] = agent.model.get_weights()
for gen in range(flags['generations']):
print("Generation %i " % gen)
# sort the results
ranking = np.argsort(scores)
print("best: %i with score %f" % (ranking[-1], scores[ranking[-1]]))
print("worst: %i with score %f" % (ranking[0], scores[ranking[0]]))
print("avg: %f" % (sum(scores)/flags['models']))
# divide worst from best
worst_ones = ranking[:2]
best_ones = ranking[2:]
# select the one to mutate and the one to use for the simulation
ix_to_mutate = worst_ones[to_mutate]
ix_to_simulate = worst_ones[1 - to_mutate]
run(ix_to_simulate)
make_mutation(ix_to_mutate, best_ones)
# update weights of mutated agent
agent.model.set_weights(weights[ix_to_mutate])
agent.save(model_name=str(ix_to_mutate))
# prepare for next generation
to_mutate = (to_mutate + 1) % 2
# save the rankings
pickle.dump(scores, open(filepath, "wb"))
print("Saved scores.")
|
8,857 | 0b4f070d30642449536118accffa371a89dd3075 | # views which respond to ajax requests
from django.contrib import messages
from django.conf import settings
from django.contrib.auth.models import User
from social.models import Like, Post, Comment, Notification
from social.notifications import Notify
from social.forms import CommentForm
from django.http import HttpResponse, JsonResponse, HttpResponseRedirect
from django.template import loader
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from social.collections import Collections
from watson import search as watson
c = Collections()
data = {}
# like or unlike posts, kraks, users or comments
def like(request):
item_id = request.POST.get('itemId')
item_type = request.POST.get('itemType')
# get notification data
if item_type == "post":
liked_object = Post.objects.get(id=item_id)
elif item_type == "comment":
liked_object = Comment.objects.get(id=item_id)
target = liked_object.author if item_type != "user" else liked_object
# user must be authenticated to like/unlike
if request.user.is_authenticated:
like = Like.objects.filter(item_id=item_id, item_type=item_type, user=request.user)
if like.exists():
# unlike
like.delete()
# delete notification
try:
Notification.objects.get(
actor_id=request.user.id,
actor_type="user",
verb="like",
object_id=liked_object.id,
object_type=item_type,
target_id=target.id,
target_type="user"
).delete()
except Notification.DoesNotExist:
pass
else:
# like
like = Like.objects.create(item_id=item_id, item_type=item_type, user=request.user)
# create notification
# NB: users should not be notified of their actions on objects they created
if like.user != target:
Notification.objects.create(
actor_id=request.user.id,
actor_type="user",
verb="like",
object_id=liked_object.id,
object_type=item_type,
target_id=target.id,
target_type="user"
)
data['auth'] = True
else: # anonymous user
data['auth'] = False
return JsonResponse(data)
# follow or unfollow users
def follow(request):
action = request.POST.get('action') # follow/unfollow
followed_user_id = request.POST.get('followedUserId')
followed_user = User.objects.get(id=followed_user_id)
# users cannot follow themselves
if followed_user == request.user:
return JsonResponse({})
# user must be authenticated to follow/unfollow
if request.user.is_authenticated():
if action == 'follow':
followed_user.profile.followers.add(request.user)
request.user.profile.following.add(followed_user)
# create notification
Notification.objects.create(
actor_id=request.user.id,
actor_type="user",
verb="follow",
object_id=followed_user.id,
object_type="user",
target_id=followed_user.id,
target_type="user"
)
elif action == 'unfollow':
followed_user.profile.followers.remove(request.user)
request.user.profile.following.remove(followed_user)
try:
Notification.objects.get(
actor_id=request.user.id,
actor_type="user",
verb="follow",
object_id=followed_user.id,
object_type="user",
target_id=followed_user.id,
target_type="user"
).delete()
except Notification.DoesNotExist:
pass
data['auth'] = True
else:
data['auth'] = False
return JsonResponse(data)
def delete(request):
item_id = request.POST.get('itemId')
item_type = request.POST.get('itemType')
if item_type == 'post':
item = Post.objects.get(id=item_id)
messages.success(request, "Post deleted successfully!")
# delete notifications associated with this post
try:
Notification.objects.filter(
object_id=item.id,
object_type="post"
).delete()
except Notification.DoesNotExist:
pass
elif item_type == 'comment':
item = Comment.objects.get(id=item_id)
messages.success(request, "Comment deleted successfully!")
# delete notifications associated with this comment
try:
Notification.objects.get(
object_id=item.id,
object_type="comment"
).delete()
except Notification.DoesNotExist:
pass
if item.author == request.user:
item.delete()
data['error'] = False
return JsonResponse(data)
def comment(request):
if request.user.is_authenticated():
data['auth'] = True;
form = CommentForm(request.POST)
if form.is_valid():
post_id = request.POST.get('post_id')
content = request.POST.get('content')
page = request.POST.get('page')
post = Post.objects.get(id=post_id)
comment = Comment.objects.create(content=content, post=post, author=request.user)
show_comment_actions = True if page == "post" else False
comment_html = loader.render_to_string(
'social/partials/latest-comment.html', {
'comment': comment,
'current_user': request.user,
'show_comment_actions': show_comment_actions
},
)
data['comment_html'] = comment_html
data['errors'] = False
# create notification
if post.author != comment.author:
Notification.objects.create(
actor_id=request.user.id,
actor_type="user",
verb="comment",
object_id=comment.id,
object_type="comment",
target_id=post.author.id,
target_type="user"
)
else:
data['errors'] = form.errors
else:
data['auth'] = False
return JsonResponse(data)
def clear_image(request):
item_id = int(request.POST.get('itemId'))
item_type = request.POST.get('itemType')
if item_type == 'post':
Post.objects.get(id=item_id, author=request.user).featured_image.delete(save=True)
elif item_type == 'user' and item_id == request.user.id:
User.objects.get(id=item_id).profile.profile_photo.delete(save=True)
messages.success(request, 'Image successfully removed!')
return JsonResponse(data)
#### LAZY LOADING ####
######################
# META
def paginate_list(input_list, page, results_per_page=10):
paginator = Paginator(input_list, results_per_page)
# paginate
try:
output_list = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver 2nd page.
output_list = paginator.page(2)
except EmptyPage:
# If page is out of range (e.g. 9999), return empty list
output_list = []
# push to template
return output_list
def load_feeds(request):
page = request.POST.get('page')
posts = c.feed(request.user)
posts = paginate_list(posts, page, 15)
posts_html = loader.render_to_string(
'social/partials/posts.html',
{'posts': posts, 'user': request.user, 'MEDIA_URL': settings.MEDIA_URL},
)
data['has_next'] = posts.has_next()
data['list_html'] = posts_html
return JsonResponse(data)
def load_user_lists(request):
user_list = request.POST.get('userList') # posts, following, followers, liked posts
user_id = request.POST.get('userId')
page = request.POST.get('page')
user = User.objects.get(id=user_id)
if user_list == 'posts':
posts = user.profile.get_posts(request.user)
posts = paginate_list(posts, page)
posts_html = loader.render_to_string(
'social/partials/posts.html',
{'posts': posts, 'MEDIA_URL': settings.MEDIA_URL},
)
data['has_next'] = posts.has_next()
data['list_html'] = posts_html
elif user_list == 'following':
following = list(reversed(user.profile.following.all()))
following = paginate_list(following, page)
following_html = loader.render_to_string(
'social/partials/users.html',
{'user': request.user, 'users': following, 'MEDIA_URL': settings.MEDIA_URL},
)
data['has_next'] = following.has_next()
data['list_html'] = following_html
elif user_list == 'followers':
followers = list(reversed(user.profile.followers.all()))
followers = paginate_list(followers, page)
followers_html = loader.render_to_string(
'social/partials/users.html',
{'user': request.user, 'users': followers, 'MEDIA_URL': settings.MEDIA_URL},
)
data['has_next'] = followers.has_next()
data['list_html'] = followers_html
elif user_list == 'liked':
liked_posts = c.liked(request.user)
liked_posts = paginate_list(liked_posts, page)
liked_html = loader.render_to_string(
'social/partials/posts.html',
{'posts': liked_posts, 'MEDIA_URL': settings.MEDIA_URL},
)
data['has_next'] = liked_posts.has_next()
data['list_html'] = liked_html
return JsonResponse(data)
def load_comments(request):
post_id = request.POST.get('postId')
page = request.POST.get('page')
comments = Comment.objects.filter(post__id=post_id).order_by('-created_at')
comments = paginate_list(comments, page)
comments_html = loader.render_to_string(
'social/partials/comments.html',
{'comments': comments, 'user': request.user, 'MEDIA_URL': settings.MEDIA_URL},
)
data['has_next'] = comments.has_next()
data['comments_html'] = comments_html
return JsonResponse(data)
def load_popular(request):
page = request.POST.get('page')
popular_posts = c.popular(request.user)
popular_posts = paginate_list(popular_posts, page, 15)
popular_html = loader.render_to_string(
'social/partials/posts.html',
{'posts': popular_posts, 'user': request.user, 'MEDIA_URL': settings.MEDIA_URL},
)
data['has_next'] = popular_posts.has_next()
data['list_html'] = popular_html
return JsonResponse(data)
def load_users(request):
page = request.POST.get('page')
users = c.popular_users(request.user)
users = paginate_list(users, page, 15)
users_html = loader.render_to_string(
'social/partials/users.html',
{'user': request.user, 'users': users, 'MEDIA_URL': settings.MEDIA_URL},
)
data['has_next'] = users.has_next()
data['list_html'] = users_html
return JsonResponse(data)
def load_search_results(request):
q = request.POST.get('q')
page = request.POST.get('page')
results = watson.search(q)
results = paginate_list(results, page)
results_html = loader.render_to_string(
'social/partials/search-results.html',
{'results': results},
)
data['has_next'] = results.has_next()
data['results_html'] = results_html
return JsonResponse(data)
def load_notifications(request):
page = request.POST.get('page')
notifs = Notification.objects.filter(target_type="user", target_id=request.user.id).order_by('-created_at')
notifs = paginate_list(notifs, page)
notifications = []
for n in notifs:
notif = Notify(n)
notification = notif.get()
notifications.append({'message': notification, 'date': n.created_at})
# mark unread notification as read
if n.is_read == False:
n.is_read = True
n.save()
notifs_html = loader.render_to_string(
'social/partials/notifications.html',
{'notifications': notifications},
)
data['has_next'] = notifs.has_next()
data['notifs_html'] = notifs_html
return JsonResponse(data) |
8,858 | 4fc4bb81d47a33e4669df46033033fddeca6544e | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 1 11:52:48 2022
@author: ccamargo
"""
import xarray as xr
import numpy as np
import matplotlib.pyplot as plt
import os
# 1. get filelist
path = "/Volumes/LaCie_NIOZ/data/steric/data/"
path_to_original_files = path + "original/"
flist = [file for file in os.listdir(path_to_original_files) if file.endswith(".nc")]
path_to_regrided_files = path + "regrid_180x360/"
#%% 2. Regrid:
# for file in flist:
# fin=path_to_original_files+file
# fout=path_to_regrided_files+file
# command_list=str('cdo -L remapbil,r360x180 '+fin+' '+fout)
# _tmp=os.system(command_list)
#%% landmask
ds = xr.open_dataset("/Volumes/LaCie_NIOZ/data/masks/ETOPO_mask.nc")
ds = ds.where((ds.lat > -66) & (ds.lat < 66), np.nan)
mask = np.array(ds.landmask)
ds = xr.open_dataset(
"/Volumes/LaCie_NIOZ/data/barystatic/masks/"
+ "LAND_MASK_CRI-JPL_180x360_conservative.nc"
)
ds = ds.where((ds.lat > -66) & (ds.lat < 66), np.nan)
mask = np.array(ds.mask)
mask[mask == 1] = np.nan
mask[mask == 0] = 1
# %% 3. get data
flist = [file for file in os.listdir(path_to_regrided_files) if file.endswith(".nc")]
datasets = []
for file in flist:
print(file)
name = file.split(".nc")[0]
ds = xr.open_dataset(path_to_regrided_files + file, decode_times=False)
timespan = [ds.timespan]
print(timespan)
ti, tf = timespan[0].split(" to ")
yf = int(tf.split("-")[0])
mf = int(tf.split("-")[1])
if mf == 12:
yf = yf + 1
mf = "01"
else:
mf = mf + 1
tf = "{}-{}-28".format(yf, str(mf).zfill(2))
if name == "Ishii":
ti = "1990-01-31T00:00:00.000000"
tf = "2019-01-31T00:00:00.000000"
print("correct time: {} to {}".format(ti, tf))
# tf = '{}-{}-{}'.format(time[-1].year,str(time[-1].month).zfill(2),time[-1].day +15)
time = np.arange(ti, tf, dtype="datetime64[M]")
ds["time"] = np.array(time)
da = ds["data"].rename("sla_" + name)
da.data = da.data * mask
da.data = da.data - np.array(
da.sel(time=slice("2005-01-01", "2016-01-01")).mean(dim="time")
)
datasets.append(da)
# print(da)
#%% merge datasets
ds = xr.merge(datasets)
#% % select since 1993
ds = ds.sel(time=slice("1993-01-01", ds.time[-1]))
#% % compute ENS mean
var = [
key
for key in ds.variables
if key.split("_")[0] == "sla" and len(key.split("_")) == 2
]
data = np.zeros((len(var), len(ds.time), len(ds.lat), len(ds.lon)))
data.fill(np.nan)
names = [v.split("_")[-1] for v in var]
for i, v in enumerate(var):
data[i] = np.array(ds[v])
da = xr.Dataset(
data_vars={"data": (("names", "time", "lat", "lon"), data)},
coords={"lat": ds.lat, "lon": ds.lon, "time": ds.time, "names": names},
)
# ds['sla_ens'] = (['time','lat','lon'],np.nanmean(datamu,axis=0))
ds["sla_ens"] = da.data.mean(dim="names")
ens = np.zeros((1, len(ds.time), len(ds.lat), len(ds.lon)))
ens.fill(np.nan)
ens[0] = np.array(ds.sla_ens)
data2 = np.vstack([data, ens])
names.append("ENS")
ds = ds.assign_coords({"names": names})
ds["SLA"] = (["names", "time", "lat", "lon"], data2)
ds.attrs["units"] = "meters"
ds.attrs["description"] = "Steric sea-level height (m)"
ds.attrs["time_mean"] = "Removed time mean from 2005-2015 (full years)"
ds.attrs["script"] = "SLB-steric.py"
#%% save
path_save = "/Volumes/LaCie_NIOZ/data/budget/"
ds.to_netcdf(path_save + "steric_upper.nc")
|
8,859 | d267bf82aee2eca29628fcd1d874a337adc1ae09 | import math
class Solution:
# @param {integer} n
# @param {integer} k
# @return {string}
def getPermutation(self, n, k):
res = ''
k -= 1
nums = [str(i) for i in range(1, n+1)]
while n > 0:
tmp = math.factorial(n-1)
res += nums[k/tmp]
del nums[k/tmp]
k %= tmp
n -= 1
return res
# class Solution:
# def f(self,n,k):
# if n==1 :
# return [0]
# else:
# count=1
# for i in range(1,n):
# count*=i
# begin=(k-1)/count
# plus=k%count
# return [begin]+self.f(n-1,plus)
#
# # @return a string
# def getPermutation(self, n, k):
# res=self.f(n,k)
# print res
# lists=range(1,n+1)
# strs=''
# for i in range(n):
# strs+=str(lists[res[i]])
# lists.pop(res[i])
# return strs
if __name__=="__main__":
a=Solution()
print a.getPermutation(3, 1),"123"
print a.getPermutation(2,2)
print a.getPermutation(3,2)
#https://leetcode.com/discuss/16064/an-iterative-solution-for-reference
#TLE
# class Solution:
# def f(self,lists):
# if lists==None:
# return None
# tmpres=[]
#
# for idx,item in enumerate(lists):
# tmp=[i for i in lists]
# tmp.pop(idx)
# res=self.f(tmp)
# if len(res)>0:
# for i in res:
# tmpres.append(str(item)+i)
# else:
# tmpres.append(str(item))
# return tmpres
#
# # @return a string
# def getPermutation(self, n, k):
# if n==1:
# return '1'
# count=1
# begin=0
# plus=0
# for i in range(1,n):
# count*=i
# begin+=k/count
# plus=k%count
#
# tmp=[i for i in range(1,n+1)]
# if begin>0:
# tmp.pop(begin-1)
#
# tmp=self.f(tmp)
# if begin>0:
# return str(begin)+tmp[plus-1]
# else:
# return tmp[plus-1]
# TLE
# # class Solution:
# # def f(self,lists):
# # if lists==None:
# # return None
# # tmpres=[]
# #
# # for idx,item in enumerate(lists):
# # tmp=[i for i in lists]
# # tmp.pop(idx)
# # res=self.f(tmp)
# # if len(res)>0:
# # for i in res:
# # tmpres.append(str(item)+i)
# # else:
# # tmpres.append(str(item))
# # return tmpres
# #
# # # @return a string
# # def getPermutation(self, n, k):
# # tmp=self.f(range(1,n+1))
# # return tmp[k-1]
# #
|
8,860 | ad5cdcfd9d7a3c07abcdcb701422f3c0fdc2b374 | from Bio import BiopythonWarning, SeqIO
from Bio.PDB import MMCIFParser, Dice, PDBParser
from Bio.SeqUtils import seq1
import time
import requests
import re
import warnings
warnings.simplefilter('ignore', BiopythonWarning)
def get_response(url):
response = requests.get(url)
cnt = 20
while cnt != 0:
if response.status_code == 200:
return response.content.decode()
else:
time.sleep(1)
cnt -= 1
raise IOError(f"Some issues with PDB now. Try again later...\n(URL: {url}")
def get_seq_names(path_to_fasta):
values = list(zip(*[(str(record.seq), record.id)
for record in SeqIO.parse(path_to_fasta, "fasta")]))
if len(values) == 0:
return []
else:
_, names = values
return names
class Cif:
def get_chain(self):
return [chain for chain in list(self.structure.get_models())[0]
if chain.get_id() == self.chain_id][0]
def get_seq_from_pdb(self):
seq_from_pdb = seq1("".join([residue.get_resname() for residue in self.chain]))
seq_from_pdb = re.search("^X*(.*?)X*$", seq_from_pdb).group(1)
seq_from_pdb_ics = [residue.get_id()[1] for residue in self.chain]
return seq_from_pdb, seq_from_pdb_ics
def dump_slice(self, motif, out_file):
motif = motif.replace("-", "")
start_on_indices = self.seq.find(motif)
end_on_indices = start_on_indices + len(motif) - 1
start, end = self.indices[start_on_indices], self.indices[end_on_indices]
final_seq = \
[r.get_resname() for r in self.chain.get_residues()
if start <= r.get_id()[1] <= end]
if "UNK" in final_seq:
with open(out_file, "w") as f:
f.write("")
f.flush()
else:
Dice.extract(self.structure, self.chain_id, start, end, out_file)
def __init__(self, pdb_id, chain_id, cif_dir, file_type="cif"):
self.pdb_id = pdb_id
self.chain_id = str(chain_id)
if file_type == "cif":
self.parser = MMCIFParser()
else:
self.parser = PDBParser()
self.structure = self.parser.get_structure(pdb_id, cif_dir + f"{pdb_id}.{file_type}")
self.chain = self.get_chain()
self.seq, self.indices = self.get_seq_from_pdb()
|
8,861 | b005f4657a1036044c2e6051207641fe621eb17e | # Constructor without arguments
class Demo:
def __init__(self):
print("\nThis is constructor")
obj = Demo()
# Constructor with arguments
class Demo2:
def __init__(self, number1, number2):
sumOfNumbers = number1 + number2
print(sumOfNumbers)
obj2 = Demo2(50,75) |
8,862 | 4f84cf80292e2764ca3e4da79858058850646527 | import json, requests, math, random
#import datagatherer
# Constants:
start_elo = 0 # Starting elo
decay_factor = 0.9 # Decay % between stages
k = 30 # k for elo change
d = 200 # Difference in elo for 75% expected WR
overall_weight = 0.60 # Weigts for different types of elos
maptype_weight = 0.20
mapname_weight = 0.20
teams = ['ATL','BOS','CDH','DAL','FLA','GZC','HZS','HOU','LDN','GLA','VAL','NYE','PAR','PHI','SFS','SEO','SHD','TOR','VAN','WAS']
maptypes = ['control','assault','hybrid','escort']
mapnames = ['Havana', 'temple-of-anubis', 'kings-row', 'hanamura', 'gibraltar', 'numbani', 'volskaya',
'hollywood', 'dorado', 'nepal', 'route-66', 'lijiang', 'ilios', 'eichenwalde', 'oasis',
'horizon-lunar-colony', 'junkertown', 'blizzard-world', 'rialto', 'busan', 'paris']
postseasonmappool = ['lijiang','ilios','busan','horizon-lunar-colony','temple-of-anubis','hanamura','numbani','eichenwalde',
'kings-row','dorado','gibraltar','rialto']
colorrequests = requests.get("https://api.overwatchleague.com/teams",timeout=10).text
colordata = json.loads(colorrequests)['competitors']
class EloCalculations:
def __init__(self):
self.teamcolors = {}
for teamdata in colordata:
c = teamdata['competitor']
self.teamcolors[c['abbreviatedName']]=["#"+c['primaryColor'],"#"+c['secondaryColor']]
self.matchdata = json.loads(open("data.json",'r').read())
self.overall_elos = {t:start_elo for t in teams}
self.maptype_elos = {t:{m:start_elo for m in maptypes} for t in teams}
self.mapname_elos = {t:{m:start_elo for m in mapnames} for t in teams}
self.elorecords = {t:[[],[],[],[]] for t in teams}
self.stage4played = {t:0 for t in teams}
self.map_draws = {m:[0,0] for m in mapnames}
self.standings = {t:{'w':0,'l':0,'d':0} for t in teams}
self.margins_of_victory = []
def makeCopy(self, season):
self.overall_elos = {t:season.overall_elos[t] for t in teams}
self.maptype_elos = {t:{m:season.maptype_elos[t][m] for m in maptypes} for t in teams}
self.mapname_elos = {t:{m:season.mapname_elos[t][m] for m in mapnames} for t in teams}
self.map_draws = {m:[season.map_draws[m][0],season.map_draws[m][1]] for m in mapnames}
self.margins_of_victory = [x for x in season.margins_of_victory]
self.standings = {t:{'w':season.standings[t]['w'],'l':season.standings[t]['l'],'d':season.standings[t]['d']} for t in teams}
def calculateElos(self):
def applyStageDecay():
for t in teams:
self.overall_elos[t]*=decay_factor
for m in mapnames:
self.mapname_elos[t][m]*=decay_factor
for m in maptypes:
self.maptype_elos[t][m]*=decay_factor
for i in range(4):
stage = self.matchdata['stages'][i]
applyStageDecay()
for t in teams: self.elorecords[t][i].append(self.overall_elos[t])
for match in stage['regular']+stage['playoffs']:
if not match['completed']: continue
t1, t2 = match['t1'], match['t2']
if i==3:
self.stage4played[t1]+=1
self.stage4played[t2]+=1
# Season Standing W/L
if match in stage['regular']:
if len([x for x in match['maps'] if x['result']=='t1'])>len([x for x in match['maps'] if x['result']=='t2']):
self.standings[t1]['w']+=1
self.standings[t2]['l']+=1
else:
self.standings[t1]['l']+=1
self.standings[t2]['w']+=1
for map in match['maps']:
t1_elo = (self.overall_elos[t1]*overall_weight +
self.mapname_elos[t1][map['mapname']]*mapname_weight +
self.maptype_elos[t1][map['maptype']]*maptype_weight)
t2_elo = (self.overall_elos[t2]*overall_weight +
self.mapname_elos[t2][map['mapname']]*mapname_weight +
self.maptype_elos[t2][map['maptype']]*maptype_weight)
exp_t1 = 1/(1+10**((t2_elo-t1_elo)/d)) # Expected Scores
exp_t2 = 1/(1+10**((t1_elo-t2_elo)/d))
act_t1 = 1 if map['result']=='t1' else 0 if map['result']=='t2' else 0.5 # Actual Scores
act_t2 = 1 if map['result']=='t2' else 0 if map['result']=='t1' else 0.5
self.map_draws[map['mapname']][0] += 1 if act_t1==0.5 else 0 # Draw %
self.map_draws[map['mapname']][1] += 1
if match in stage['regular']:
self.standings[t1]['d']+= 1 if map['result']=='t1' else -1 if map['result']=='t2' else 0 # Standings Differential
self.standings[t2]['d']+= 1 if map['result']=='t2' else -1 if map['result']=='t1' else 0
MoV = 1 # Margin of Victory
elo_dif = 0 # Elo Difference
if act_t1==1: # The team that won determines the margin of victory
MoV = (map['deaths'][t2]+1)/(map['deaths'][t1]+1)
elo_dif = t1_elo-t2_elo
elif act_t2==1:
MoV = (map['deaths'][t1]+1)/(map['deaths'][t2]+1)
elo_dif = t2_elo-t1_elo
else: # In case of a draw, the team with higher elo determines margin of "victory"
if t1_elo>t2_elo:
MoV = (map['deaths'][t2]+1)/(map['deaths'][t1]+1)
elo_dif = t1_elo-t2_elo
elif t1_elo>t2_elo:
MoV = (map['deaths'][t1]+1)/(map['deaths'][t2]+1)
elo_dif = t2_elo-t1_elo
self.margins_of_victory.append(MoV)
mult = math.log(1 + MoV) * 1 / (elo_dif * 0.001 + 1)
t1_change = k * (act_t1 - exp_t1) * mult
t2_change = k * (act_t2 - exp_t2) * mult
self.overall_elos[t1] += t1_change
self.maptype_elos[t1][map["maptype"]] += t1_change
self.mapname_elos[t1][map["mapname"]] += t1_change
self.overall_elos[t2] += t2_change
self.maptype_elos[t2][map["maptype"]] += t2_change
self.mapname_elos[t2][map["mapname"]] += t2_change
self.elorecords[t1][i].append(self.overall_elos[t1])
self.elorecords[t2][i].append(self.overall_elos[t2])
def getMapType(self,name):
types = {
**dict.fromkeys(['hanamura','horizon-lunar-colony','temple-of-anubis','volskaya','paris'],'assault'),
**dict.fromkeys(['dorado','junkertown','rialto','route-66','gibraltar','Havana'],'escort'),
**dict.fromkeys(['blizzard-world','eichenwalde','hollywood','kings-row','numbani'],'hybrid'),
**dict.fromkeys(['busan','ilios','lijiang','nepal','oasis'],'control')
}
return types[name]
def predictMatch(self,team1, team2, maps, loops = 10000):
results = {}
team1wins = 0
maptypes = list(map(self.getMapType,maps))
for x in range(loops):
team1score = 0
team2score = 0
for i in range(len(maps)):
drawchance = self.map_draws[maps[i]][0]/self.map_draws[maps[i]][1]
elo1 = (self.overall_elos[team1]*overall_weight +
self.mapname_elos[team1][maps[i]]*mapname_weight +
self.maptype_elos[team1][maptypes[i]]*maptype_weight)
elo2 = (self.overall_elos[team2]*overall_weight +
self.mapname_elos[team2][maps[i]]*mapname_weight +
self.maptype_elos[team2][maptypes[i]]*maptype_weight)
random_roll = random.random()
team1winchance = 1/(1+10**((elo2-elo1)/d))
#drawchance *= min(team1winchance,1-team1winchance)*2
if random_roll < team1winchance - drawchance/2: team1score +=1
elif random_roll < team1winchance + drawchance/2: pass
else: team2score +=1
if team1score==team2score:
map5 = random.choice([m for m in ['ilios','busan','lijiang'] if m not in maps])
elo1 = (self.overall_elos[team1]*overall_weight +
self.maptype_elos[team1]['control']*maptype_weight +
self.mapname_elos[team1][map5]*mapname_weight)
elo2 = (self.overall_elos[team2]*overall_weight +
self.maptype_elos[team2]['control']*maptype_weight +
self.mapname_elos[team2][map5]*mapname_weight)
if random.random()< 1/(1+10**((elo2-elo1)/d)): team1score+=1
else: team2score +=1
scoreline = "{}-{}".format(team1score,team2score)
if scoreline not in results: results[scoreline]=0
results[scoreline]+=1
if team1score>team2score: team1wins += 1
results = {s:results[s]/loops for s in results}
return results, team1wins/loops
def simulateSingleMatch(self, team1, team2, maps, type='regular', updateelos=True, firstto=4):
'''
Type can be regular, or playoffs.
It is assumed team1 is the higher seed.
'''
types = [self.getMapType(m) for m in maps]
score = [0,0]
def simulateMap(mapname,maptype):
elo1 = (self.overall_elos[team1]*overall_weight +
self.mapname_elos[team1][mapname]*mapname_weight +
self.maptype_elos[team1][maptype]*maptype_weight)
elo2 = (self.overall_elos[team2]*overall_weight +
self.mapname_elos[team2][mapname]*mapname_weight +
self.maptype_elos[team2][maptype]*maptype_weight)
random_roll = random.random()
team1winchance = 1/(1+10**((elo2-elo1)/d))
drawchance = self.map_draws[mapname][0]/self.map_draws[mapname][1] * min(team1winchance,1-team1winchance)*2
if random_roll < team1winchance - drawchance/2: act_t1, act_t2 = 1,0
elif random_roll < team1winchance + drawchance/2: act_t1, act_t2 = 0.5,0.5
else: act_t1, act_t2 = 0,1
if updateelos:
MoV = random.choice(self.margins_of_victory)
exp_t1 = 1/(1+10**((elo2-elo1)/d)) # Expected Scores
exp_t2 = 1/(1+10**((elo1-elo2)/d))
if act_t1==1: elo_dif = elo1-elo2
elif act_t2==1: elo_dif = elo2-elo1
else:
if elo1>elo2: elo_dif = elo1-elo2
elif elo1>elo2: elo_dif = elo2-elo1
else: elo_dif = 0
mult = math.log(1 + MoV) * 1 / (elo_dif * 0.001 + 1)
t1_change = k * (act_t1 - exp_t1) * mult
t2_change = k * (act_t2 - exp_t2) * mult
self.overall_elos[team1] += t1_change
self.maptype_elos[team1][maptype] += t1_change
self.mapname_elos[team1][mapname] += t1_change
self.overall_elos[team2] += t2_change
self.maptype_elos[team2][maptype] += t2_change
self.mapname_elos[team2][mapname] += t2_change
return round(act_t1),round(act_t2)
if type=='regular':
for i in range(len(maps)):
score1,score2 = simulateMap(maps[i],types[i])
score[0]+=score1
score[1]+=score2
if score[0]==score[1]:
map5 = random.choice([x for x in mapnames if self.getMapType(x)=='control' and x not in maps])
score1,score2 = simulateMap(map5,'control')
score[0]+=score1
score[1]+=score2
if score[0]>score[1]:
self.standings[team1]['w']+=1
self.standings[team2]['l']+=1
else:
self.standings[team1]['l']+=1
self.standings[team2]['w']+=1
self.standings[team1]['d']+=score[0]-score[1]
self.standings[team2]['d']+=score[1]-score[0]
if type=='playoffs':
mappreferences = {t:{mt:[x for x in postseasonmappool if self.getMapType(x)==mt] for mt in maptypes} for t in [team1,team2]}
for t in [team1,team2]:
for mt in maptypes:
mappreferences[t][mt].sort(key=lambda x:self.mapname_elos[t][x]-self.mapname_elos[{team1:team2,team2:team1}[t]][x],reverse=True)
mapprogression = ['control','hybrid','assault','escort']
scores = [0,0]
mnum = 0
played = []
picker = team1
while max(score)<firstto:
mtype = mapprogression[mnum%4]
mname = [m for m in mappreferences[picker][mtype] if m not in played][0]
played.append(mname)
mnum += 1
score1,score2 = simulateMap(mname,mtype)
if score1==1:
picker=team2
score[0]+=1
elif score2==1:
picker=team1
score[1]+=1
if score[0]>score[1]: return [team1,team2]
else: return [team2,team1]
return
|
8,863 | de287d1bc644fdfd0f47bd8667580786b74444d0 | class Solution(object):
def smallestGoodBase(self, n):
"""
:type n: str
:rtype: str
"""
# k is the base and the representation is
# m bits of 1
# We then have from math
# (k**m - 1) / (k-1) = n
# m = log_k (n * k - n + 1)
# m needs to be integer
# we know that k = 2 m will be largest
m_max = int(math.ceil(math.log(1 + int(n), 2)))
for m in range(m_max, 1, -1):
# solve high order equation
# k**m - nk + n - 1 = 0
# Find k using newton approach
res = self.solve_equation(m, int(n))
if res != False:
return str(res)
# k**m - nk + n - 1 = 0
# TODO: Why newton approach always work here.
# Hard to prove they are always monotonic
def solve_equation(self, m, n):
k_l, k_h = 2, n - 1
while k_l <= k_h:
mid = (k_l + k_h) / 2
val = mid ** m - n * mid + n - 1
if val == 0:
return mid
elif val < 0:
k_l = mid + 1
else:
k_h = mid - 1
return False
|
8,864 | 6a4a5eac1b736ee4f8587adba298571f90df1cf9 | from .queue_worker import QueueWorker
import threading
class WorkersOrchestrator:
@classmethod
def worker_func(cls, worker):
worker.start_consumption()
def run_orchestrator(self, num_of_workers):
worker_list = []
for i in range(num_of_workers):
worker_list.append(QueueWorker())
worker_threads = list()
for worker in worker_list:
x = threading.Thread(target=self.worker_func, args=(worker,))
worker_threads.append(x)
x.start()
|
8,865 | 61179dc734069017adaabd53804ed0102d9416e3 | from django.contrib.auth.models import User
from django.db import models
class Chat(models.Model):
category = models.CharField(unique=True, max_length=100)
def __str__(self):
return self.category
class ChatMessage(models.Model):
context = models.CharField(max_length=1000)
user = models.ForeignKey(User, on_delete=models.CASCADE)
chat = models.ForeignKey(Chat, on_delete=models.CASCADE)
timestamp = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.context
|
8,866 | f5513bea4ca5f4c2ac80c4bf537a264a4052d1e9 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import random
a = random.sample(range(100), 10)
print("All items: {}".format(a))
it = iter(a) # call a.__iter__()
print("Num01: {}".format(next(it))) # call it.__next__()
print("Num02: {}".format(next(it)))
print("Num03: {}".format(it.__next__()))
it = iter(a)
i = 1
while True:
try:
x = next(it)
print("Num{:02d}: {}".format(i, x))
except StopIteration:
break
i += 1
class Node():
def __init__(self, value):
self._value = value
self._children = []
def __repr__(self):
return 'Node({!r})'.format(self._value)
def add_child(self, node):
self._children.append(node)
def __iter__(self):
return iter(self._children)
root = Node(0)
root.add_child(Node(1))
root.add_child(Node(2))
for x in root:
print(x)
class Node2():
def __init__(self, value):
self._value = value
self._children = []
self._idx = 0
def __repr__(self):
return 'Node2({!r})'.format(self._value)
def add_child(self, node):
self._children.append(node)
def __iter__(self):
self._idx = 0
return self # 返回自己, 说明自己是迭代器,须实现__next__()
def __next__(self):
if self._idx < len(self._children):
idx = self._idx
self._idx += 1
return self._children[idx]
raise StopIteration
root = Node2(10)
root.add_child(Node2(11))
root.add_child(Node2(22))
for x in root:
print(x)
class Node3():
def __init__(self, value):
self._value = value
self._children = []
self._idx = 0
def __repr__(self):
return 'Node3({!r})'.format(self._value)
def add_child(self, node):
self._children.append(node)
def has_children(self):
return len(self._children) != 0
def __iter__(self):
self._idx = 0
return self # 返回自己, 说明自己是迭代器,须实现__next__()
def __next__(self):
if self._idx < len(self._children):
idx = self._idx
self._idx += 1
return self._children[idx]
raise StopIteration
def recur_show(root):
print(root)
if root.has_children():
for node in root:
recur_show(node)
def recur_show2(root):
if root.has_children():
for node in root:
recur_show2(node)
print(root)
# 0
#
# 10 20 30
#
# 11 12 31
root = Node3(0)
c1 = Node3(10)
c2 = Node3(20)
c3 = Node3(30)
c11 = Node3(11)
c12 = Node3(12)
c31 = Node3(31)
root.add_child(c1)
root.add_child(c2)
root.add_child(c3)
c1.add_child(c11)
c1.add_child(c12)
c3.add_child(c31)
print("==================")
recur_show(root)
print("==================")
recur_show2(root)
|
8,867 | 67793c8851e7107c6566da4e0ca5d5ffcf6341ad | import csv
from functools import reduce
class Csvread:
def __init__(self, fpath):
self._path=fpath
with open (fpath) as file:
read_f=csv.reader(file)
print(read_f) #<_csv.reader object at 0x000002A53144DF40>
self._sheet = list(read_f)[1:] #utworzenie listy
def get_sheet(self):
return self._sheet
class Csvcalc:
def __init__(self, cont):
self._cont=cont
def row_count(self):
return len(self._cont)
def get_row (self, row_no):
return self._cont[row_no]
def col_count (self):
return len(self._cont[1])
def get_colum (self,no_col):
return list (x[no_col] for x in self._cont)
def sum_col (self,col_no):
return reduce(lambda x, y: x+y, self.get_colum(col_no))
def mul_col(self, col_no):
return sum(lambda x,y: x*y, self.get_colum(col_no))
csv1= Csvread('./data.csv')
print(csv1) #<__main__.Csvread object at 0x000002A5312B4040>
|
8,868 | 67ac5d82bc37b67cfdae73b6667b73b70ed33cfb | '''
Paulie Jo Gonzalez
CS 4375 - os
Lab 0
Last modified: 02/14/2021
This code includes a reference to C code for my_getChar method provided by Dr. Freudenthal.
'''
from os import read
next_c = 0
limit = 0
def get_char():
global next_c, limit
if next_c == limit:
next_c = 0
limit = read(0, 100) # allocate bytes
if limit == 0:
return ''
if next_c >= len(limit) - 1: # check upperbound
return ''
ch = chr(limit[next_c]) # convert to char (from ASCII)
next_c += 1
return ch
def my_read_line():
global next_c, limit
line = ''
ch = get_char()
# get each char of line
while (ch != '\n'): # while char is not new line
line += ch # build line
ch = get_char()
if ch == '':
return line # EOF
next_c = 0 # reset next_c and limit after line is read
limit = 0
line += '\n'
return line
# def my_read_lines():
# num_lines = 0
# in_line = my_read_line() # read line
# while len(in_line):
# num_lines += 1
# print(f'###line {num_lines}: <{str(in_line)}> ###\n')
# in_line = my_read_lines()
# print(f'eof after {num_lines}\n')
|
8,869 | 62c28b5eb31b90191dfbab4456fc5373ba51bf64 | import pytest
import os
import pandas as pd
import numpy as np
import math
import scipy
from scipy import stats
from sklearn import metrics, linear_model
from gpmodel import gpkernel
from gpmodel import gpmodel
from gpmodel import gpmean
from gpmodel import chimera_tools
n = 200
d = 10
X = np.random.random(size=(n, d))
xa = X[[0]]
xb = X[[1]]
Xc = X[[2]]
class_Y = np.random.choice((1, -1), size=(n,))
alpha = 1e-1
func = gpmean.GPMean(linear_model.Lasso, alpha=alpha)
X_test = np.random.random(size=(5, d))
kernel = gpkernel.SEKernel()
cov = kernel.cov(X, X, hypers=(1.0, 0.5))
variances = np.random.random(size=(n, ))
Y = np.random.multivariate_normal(np.zeros(n), cov=cov)
Y += np.random.normal(0, 0.2, n)
def test_init():
model = gpmodel.GPRegressor(kernel)
assert np.allclose(model.mean_func.mean(X), np.zeros((len(X), )))
assert model.objective == model._log_ML
assert model.kernel == kernel
assert model.guesses is None
model = gpmodel.GPRegressor(kernel, guesses=(0.1, 0.1, 0.1))
assert model.guesses == (0.1, 0.1, 0.1)
def test_normalize():
model = gpmodel.GPRegressor(kernel)
m, s, normed = model._normalize(Y)
assert np.isclose(m, Y.mean())
assert np.isclose(s, Y.std())
assert np.allclose(normed, (Y - m) / s)
model.std = s
model.mean = m
assert np.allclose(Y, model.unnormalize(normed))
def test_K():
model = gpmodel.GPRegressor(kernel)
model.kernel.fit(X)
K, Ky = model._make_Ks((1, 1, 1))
assert np.allclose(K, kernel.cov(X, X))
assert np.allclose(Ky, K + np.diag(np.ones(len(K))))
model.variances = variances
K, Ky = model._make_Ks((1, 1))
assert np.allclose(K, kernel.cov(X, X))
assert np.allclose(Ky, K + np.diag(variances))
def test_ML():
model = gpmodel.GPRegressor(kernel)
model.kernel.fit(X)
model.normed_Y = model._normalize(Y)[2]
model._ell = len(Y)
hypers = np.random.random(size=(3,))
y_mat = model.normed_Y.reshape((n, 1))
K, Ky = model._make_Ks(hypers)
first = 0.5 * y_mat.T @ np.linalg.inv(Ky) @ y_mat
second = 0.5 * np.log(np.linalg.det(Ky))
third = model._ell / 2.0 * np.log(2 * np.pi)
actual = first + second + third
assert np.isclose(actual, model._log_ML(hypers))
def test_fit():
model = gpmodel.GPRegressor(kernel)
model.fit(X, Y)
assert model._n_hypers == kernel._n_hypers + 1
assert np.allclose(model.X, X)
assert np.allclose(model.Y, Y)
m, s, normed = model._normalize(Y)
assert np.allclose(model.normed_Y, normed)
assert np.isclose(m, model.mean)
assert np.isclose(s, model.std)
vn, s0, ell = model.hypers
K = kernel.cov(X, X, (s0, ell))
Ky = K + np.diag(vn * np.ones(len(K)))
ML = model._log_ML(model.hypers)
L = np.linalg.cholesky(Ky)
alpha = np.linalg.inv(Ky) @ normed.reshape((n, 1))
assert np.isclose(model.ML, ML)
assert np.allclose(model._K, K)
assert np.allclose(model._Ky, Ky)
assert np.allclose(model._L, L)
assert np.allclose(model._alpha, alpha)
def test_predict():
model = gpmodel.GPRegressor(kernel)
model.fit(X, Y)
h = model.hypers[1::]
m, s, normed = model._normalize(Y)
k_star = model.kernel.cov(X_test, X, hypers=h)
k_star_star = model.kernel.cov(X_test, X_test, hypers=h)
K = kernel.cov(X, X, h)
Ky = K + np.diag(model.hypers[0] * np.ones(len(K)))
means = k_star @ np.linalg.inv(Ky) @ normed.reshape(len(Y), 1)
means = means * s + m
var = k_star_star - k_star @ np.linalg.inv(Ky) @ k_star.T
var *= s ** 2
m, v = model.predict(X_test)
print(v)
print(var)
print(model.hypers[0])
assert (np.abs(v - var) < 1e-1).all()
assert np.allclose(means[:, 0], m, rtol=1.e-8, atol=1e-4)
def test_pickles():
model = gpmodel.GPRegressor(kernel)
model.fit(X, Y)
m1, v1 = model.predict(X_test)
model.dump('test.pkl')
new_model = gpmodel.GPRegressor.load('test.pkl')
os.remove('test.pkl')
m2, v2 = new_model.predict(X_test)
assert np.allclose(m1, m2)
assert np.allclose(v1, v2)
if __name__ == "__main__":
test_init()
test_normalize()
test_K()
test_ML()
test_fit()
test_predict()
test_pickles()
# To Do:
# Test LOO_res and LOO_log_p and fitting with LOO_log_p
# Test with mean functions
# Test with given variances
|
8,870 | d49aa03cd6b8ba94d68a1bc1e064f77fded65000 | from bs4 import BeautifulSoup
from bs4 import BeautifulSoup
import requests,pymysql,random,time
import http.cookiejar
from multiprocessing import Pool,Lock
def get_proxies_ip():
db = pymysql.connect("localhost","root","xxx","xxx",charset='utf8')
cursor = db.cursor()
sql = "SELECT * FROM proxies_info;"
proxies_list = []
try:
cursor.execute(sql)
results = cursor.fetchall()
for row in results:
proxy_ip = row[1]
proxy_port = str(row[2])
proxies_list.append(proxy_ip+':'+proxy_port)
except:
db.rollback()
db.close()
porxite = {
'http':'http://'+random.choice(proxies_list)
}
return porxite
def get_headers():
USER_AGENTS = [
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)",
"Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
"Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
"Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
"Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20",
"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.11 TaoBrowser/2.0 Safari/536.11",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; LBBROWSER)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E; LBBROWSER)",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.84 Safari/535.11 LBBROWSER",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; QQBrowser/7.0.3698.400)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SV1; QQDownload 732; .NET4.0C; .NET4.0E; 360SE)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E)",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.89 Safari/537.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.89 Safari/537.1",
"Mozilla/5.0 (iPad; U; CPU OS 4_2_1 like Mac OS X; zh-cn) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148 Safari/6533.18.5",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0b13pre) Gecko/20110307 Firefox/4.0b13pre",
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:16.0) Gecko/20100101 Firefox/16.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11",
"Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10"
]
return random.choice(USER_AGENTS)
def handle():
global lock,session,GuangCai_Company_file
r_file = '1.csv'
w_file = 'w1.csv'
lock = Lock()
GuangCai_Company_file = open(w_file,'w')
headers= {'User-Agent': get_headers(),
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'zh-CN,zh;q=0.8,en;q=0.6,zh-TW;q=0.4',
'Connection': 'keep-alive',
'Accept-Encoding': 'gzip, deflate',
'Host':'www.gldjc.com',
'Origin':'http://www.gldjc.com',
'Referer':'http://www.gldjc.com/login?hostUrl=http://www.gldjc.com/membercenter/toRenewOrderPage'}
login_data = {
'userName':'13296385392',
'password':'qazwsxedc'
}
login_url = 'http://www.gldjc.com/dologin'
# 建立一个会话,可以把同一用户的不同请求联系起来;直到会话结束都会自动处理cookies
session = requests.Session()
filename = 'cookie'
# 建立LWPCookieJar实例,可以存Set-Cookie3类型的文件。
# 而MozillaCookieJar类是存为'/.txt'格式的文件
session.cookies = http.cookiejar.LWPCookieJar(filename)
# 若本地有cookie则不用再post数据了
try:
session.cookies.load(filename=filename, ignore_discard=True)
except:
print('Cookie未加载!')
content = session.post(login_url,data=login_data,headers=headers)
# print(content.content)
# 保存cookie到本地
session.cookies.save(ignore_discard=True, ignore_expires=True)
info_tuple_list = []
with open(r_file,'r') as GuangCai_file:
for info in GuangCai_file.readlines():
firs_cate = info.split('\t')[0].strip()
secd_cate = info.split('\t')[1].strip()
thir_cate = info.split('\t')[2].strip()
cate_url = info.split('\t')[4].strip()
info_tuple_list.append((firs_cate,secd_cate,thir_cate,cate_url))
pool = Pool(1)
pool.map(get_info,info_tuple_list)
pool.close()
pool.join()
GuangCai_Company_file.close()
def get_info(info_tuple_list):
firs_cate = info_tuple_list[0].strip()
secd_cate = info_tuple_list[1].strip()
thir_cate = info_tuple_list[2].strip()
cate_url = info_tuple_list[3].strip()
time.sleep(2)
print(cate_url)
headers = {
'User-Agent': get_headers(),
}
try:
req = session.get(cate_url,allow_redirects=False,headers=headers,proxies=get_proxies_ip(),timeout=40)
req.encoding = 'utf-8'
# print(req.text)
soup = BeautifulSoup(req.text,'html.parser')
# 具体详情页的spu
for next_page_id in soup.select('#a_checkMore'):
spu_id = next_page_id['onclick'].split("'")[1]
lock.acquire()
GuangCai_Company_file.write(firs_cate+'\t'+secd_cate+'\t'+thir_cate+'\t'+cate_url+'\t'+spu_id+'\n')
GuangCai_Company_file.flush()
lock.release()
print(spu_id)
except Exception as e:
lock.acquire()
with open('error.csv','a') as error_fil:
error_fil.write(cate_url+'\n')
lock.release()
print(e)
handle()
# with open('tehx.html','r') as tehx_file:
# soup = BeautifulSoup(tehx_file.read(),'html.parser')
# for next_page_id in soup.select('#a_checkMore'):
# print(next_page_id['onclick'].split("'")[1])
|
8,871 | f410a77d4041514383110d9fd16f896178924d59 | # coding: UTF-8
import os
import sys
if len(sys.argv) == 3:
fname = sys.argv[1]
out_dir = sys.argv[2]
else:
print "usage: vcf_spliter <input file> <output dir>"
exit()
count = 0
if not os.path.exists(out_dir):
os.makedirs(out_dir)
with open(fname, 'r') as f:
for l in f:
if l.strip() == "BEGIN:VCARD":
count += 1
fw = open(os.path.join(out_dir, str(count)+'.vcf'), 'w')
fw.write(l)
elif l.strip() == "END:VCARD":
fw.write(l)
fw.close()
else:
fw.write(l) |
8,872 | 25550cbaf6e0e5bdbbe3852bb8cdc05ac300d315 | # 运算符的优先级
# 和数学中一样,在Python运算也有优先级,比如先乘除 后加减
# 运算符的优先级可以根据优先级的表格来查询,
# 在表格中位置越靠下的运算符优先级越高,优先级越高的越优先计算
# 如果优先级一样则自左向右计算
# 关于优先级的表格,你知道有这么一个东西就够了,千万不要去记
# 在开发中如果遇到优先级不清楚的,则可以通过小括号来改变运算顺序
a = 1 + 2 * 3
# 一样 and高 or高
# 如果or的优先级高,或者两个运算符的优先级一样高
# 则需要先进行或运算,则运算结果是3
# 如果and的优先级高,则应该先计算与运算
# 则运算结果是1
a = 1 or 2 and 3
# print(a)
# 逻辑运算符(补充)
# 逻辑运算符可以连着使用
result = 1 < 2 < 3 # 相当于 1 < 2 and 2 < 3
result = 10 < 20 > 15
print(result) |
8,873 | 062b6133ba4de24f7eaf041e4b6c039501b47b9a | n_m_q=input().split(" ")
n=int(n_m_q[0])
m=int(n_m_q[1])
q=int(n_m_q[2])
dcc=[]
for i in range(n):
a=[]
dcc.append(a)
available=[]
for i in range(m):
x=input().split(" ")
a=int(x[0])
b=int(x[1])
available.append([a,b])
dcc[a-1].append(b)
dcc[b-1].append(a)
for i in range(q):
x=input().split(" ")
l=int(x[0])
r=int(x[1])
s=int(x[2])
t=int(x[3])
target=[]
target.append(s)
for j in range(l-1,r):
x=[]
for a in target:
x.append(a)
for y in dcc[a-1]:
if [a,y] in available:
if available.index([a,y])==j:
x.append(y)
if [y,a] in available:
if available.index([y,a])==j:
x.append(y)
target=x
print(target)
|
8,874 | 887ae9b7c629be679bf4f5fb4311c31bff605c73 | import os
import shutil
from tqdm import tqdm
from pathlib import Path
from eval_mead import PERCENT
DATAPATH = '../../../data/test'
# MEAD_DIR = 'mead'
MEAD_DIR = os.path.abspath('mead')
MEAD_DATA_PATH = f'{MEAD_DIR}/data'
MEAD_BIN = f'{MEAD_DIR}/bin'
MEAD_LIB = f'{MEAD_DIR}/lib'
MEAD_FORMATTING_ADDONS = f'{MEAD_BIN}/addons/formatting'
MEAD_DID = f'{MEAD_DIR}/did'
TARGET = 'MEAD_TEST'
DATA_DIR = os.path.join(MEAD_DATA_PATH, TARGET)
parse = True
if os.path.exists(DATA_DIR):
override = input('Data exist, override (delete and re-parse)? (Y/n): ')
if override.lower() == 'y':
shutil.rmtree(DATA_DIR)
else:
parse = False
os.makedirs(DATA_DIR, exist_ok=True)
cluster_file = os.path.join(DATA_DIR, 'MEAD_TEST.cluster')
config_file = os.path.join(DATA_DIR, 'MEAD_TEST.config')
CONFIG = f"""<?xml version='1.0' encoding='utf-8'?>
<MEAD-CONFIG LANG="ENG" TARGET="MEAD_TEST" CLUSTER-PATH="{DATA_DIR}" DOC-DIRECTORY="{DATA_DIR}/docsent">
<FEATURE-SET BASE-DIRECTORY="{DATA_DIR}/feature">
<FEATURE NAME="Position" SCRIPT="{MEAD_BIN}/feature-scripts/Position.pl" />
<FEATURE NAME="Length" SCRIPT="{MEAD_BIN}/feature-scripts/Length.pl" />
<FEATURE NAME="Centroid" SCRIPT="{MEAD_BIN}/feature-scripts/Centroid.pl enidf ENG" />
</FEATURE-SET>
<CLASSIFIER COMMAND-LINE="{MEAD_BIN}/default-classifier.pl Length 3 Centroid 4 Position 0" SYSTEM="MEADORIG" />
<COMPRESSION BASIS="sentences" PERCENT="1" />
</MEAD-CONFIG>
"""
if parse:
### Get raw text ###
with open(os.path.join(DATAPATH, 'test.txt.src'), 'r') as stream:
raw_papers = stream.readlines()
papers = [paper.strip().split('##SENT##') for paper in raw_papers]
# Setting Env. Var.
with open(os.path.join(MEAD_FORMATTING_ADDONS, 'MEAD_ADDONS_UTIL.pm'), 'r') as stream:
print('Make sure you have change the following line to absolute path to',
os.path.abspath(MEAD_DID))
print('line 18 of', os.path.join(
MEAD_FORMATTING_ADDONS, 'MEAD_ADDONS_UTIL.pm'))
print(stream.readlines()[17])
with open(os.path.join(MEAD_LIB, 'MEAD', 'MEAD.pm'), 'r') as stream:
print('Make sure you have change the following line to absolute path to',
os.path.abspath(MEAD_DIR))
print('line 31 of', os.path.join(MEAD_LIB, 'MEAD', 'MEAD.pm'))
print(stream.readlines()[30])
print('export PERL5LIB=' + os.path.abspath(MEAD_FORMATTING_ADDONS))
os.system('export PERL5LIB=' + os.path.abspath(MEAD_FORMATTING_ADDONS))
os.environ['PERL5LIB'] = os.path.abspath(MEAD_FORMATTING_ADDONS)
# Write raw text, cluster file
# This stuff should be generated by text2cluster.pl
# cluster_lines = []
# cluster_lines.append("<?xml version = '1.0' encoding='utf-8'?>\n")
# cluster_lines.append("<CLUSTER LANG='ENG'>\n")
print('Converting src to raw text...')
for i, paper in tqdm(enumerate(papers), total=len(papers)):
# did = f'raw_text_{i+1}.txt'
did = f'{i+1}'
text_file = os.path.join(DATA_DIR, did)
with open(text_file, 'w') as stream:
# make sure the sent split are the same as our annotation
stream.write('\n'.join(paper))
# delete </ pattern or XML might break
# os.system(f'find {DATA_DIR} -type f | xargs sed -i "s/<\///g"')
# https://stackoverflow.com/questions/8914435/awk-sed-how-to-remove-parentheses-in-simple-text-file
# os.system(f'find {DATA_DIR} -type f | xargs sed -i "s/[><]//g"')
# https://validator.w3.org/feed/docs/error/SAXError.html
# https://www.w3.org/TR/REC-xml/#dt-chardata
print('Clean up stuff that might influence XML parsing...')
os.system(f'find {DATA_DIR} -type f | xargs sed -i "s/</</g"')
os.system(f'find {DATA_DIR} -type f | xargs sed -i "s/&/&/g"')
os.system(f'find {DATA_DIR} -type f | xargs sed -i "s/>/>/g"')
# cluster_lines.append(f"\t<D DID='{did}' />\n")
# cluster_lines.append('</CLUSTER>\n')
# Get docsent
# with open(cluster_file, 'w') as stream:
# stream.writelines(cluster_lines)
# Path(cluster_file).touch()
print('Create cluster and docsent files...')
os.system(
f'perl {MEAD_FORMATTING_ADDONS}/text2cluster.pl {DATA_DIR}')
if os.system(f'mv {DATA_DIR}/../{TARGET}.cluster {DATA_DIR}') != 0:
print(
'MAKE SURE you have change $dir/$dir.cluster to $dir.cluster in {MEAD_FORMATTING_ADDONS}/text2cluster.pl')
print("Currently, it has bug and can't create file")
# Run config
# with open(config_file, 'w') as stream:
# stream.write(CONFIG)
# extract_file = os.path.join(DATA_DIR, f'{TARGET}.extract')
# os.system(
# f'cat {config_file} | {MEAD_BIN}/driver.pl > {extract_file}')
# https://askubuntu.com/questions/20414/find-and-replace-text-within-a-file-using-commands
os.system(
f'find {DATA_DIR} -name "*.cluster" | xargs sed -i "s/<?xml version=\'1.0\'?>/<?xml version=\'1.0\' encoding=\'utf-8\'?>/g"')
os.system(
f'find {DATA_DIR} -name "*.docsent" | xargs sed -i "s/<?xml version=\'1.0\'?>/<?xml version=\'1.0\' encoding=\'utf-8\'?>/g"')
OUTPUT_PATH = '../output'
OUTPUT_DIR = os.path.join(OUTPUT_PATH, 'mead')
if os.path.exists(OUTPUT_DIR):
override = input('Result exist, do you want to re-run? (Y/n): ')
if override.lower() == 'y':
shutil.rmtree(OUTPUT_DIR)
os.makedirs(OUTPUT_DIR, exist_ok=True)
summary_file = os.path.join(OUTPUT_DIR, f'{TARGET}.summary')
extract_file = os.path.join(OUTPUT_DIR, f'{TARGET}.extract')
# compression basis is "sentence", and give PERCENT% summary
shared_parameters = f'-sentences -percent {PERCENT}'
# os.system(
# f'perl {MEAD_BIN}/mead.pl {shared_parameters} -summary -output {summary_file} {TARGET}')
os.system(
f'perl {MEAD_BIN}/mead.pl {shared_parameters} -extract -output {extract_file} {TARGET}')
|
8,875 | 74c60c9e37e4e13ed4c61f631c3426b685b5d38f | from django.conf.urls import patterns, include, url
from views.index import Index
from views.configuracoes import Configuracoes
from views.parametros import *
urlpatterns = patterns('',
url(r'^$', Index.as_view(), name='core_index'),
url(r'^configuracoes/', Configuracoes.as_view(), name='core.core_configurations'),
#Parametros
url(r'^parametros/data/$', ParametrosData.as_view(),name='core.list_json_parametro'),
url(r'^parametros/formulario/$', ParametrosCreateForm.as_view(),name='core.add_parametro'),
url(r'^parametros/(?P<pk>\d+)/$', ParametrosUpdateForm.as_view(),name='core.change_parametro'),
url(r'^parametros/remove/(?P<pk>\d+)/$', ParametrosDelete.as_view(),name='core.delete_parametro'),
url(r'^parametros/$', ParametrosList.as_view(), name='core.list_parametros'),
)
|
8,876 | a5c19ad60ac6312631273858cebaae944a2008ec | def contador_notas(multiplo, numero):
if(numero % multiplo == 0):
notas = numero / multiplo
return notas
else:
return -1
entrada = int(input())
resultado = contador_notas(100, entrada)
if (resultado != -1):
print("{} nota(s) de R$ {}".format(resultado, 100)) |
8,877 | 905d8be76ef245a2b8fcfb3f806f8922d351ecf0 | import pickle
import numpy as np
import math
class AdaBoostClassifier:
'''A simple AdaBoost Classifier.'''
def __init__(self, weak_classifier, n_weakers_limit):
'''Initialize AdaBoostClassifier
Args:
weak_classifier: The class of weak classifier, which is recommend to be sklearn.tree.DecisionTreeClassifier.
n_weakers_limit: The maximum number of weak classifier the model can use.
'''
self.weakClassifier = weak_classifier
self.iteration = n_weakers_limit
def is_good_enough(self):
'''Optional'''
pass
def calculateError(self, y, predictY, weights):
"""
函数作用:计算误差
:param y:列表,标签
:param predictY:列表,元素是预测值
:param weights:列表,权重值
:return:误差
"""
error = 0
for i in range(len(y)):
if y[i] != predictY[i]:
error += weights[i]
return error
def fit(self,X,y):
'''Build a boosted classifier from the training set (X, y).
Args:
X: An ndarray indicating the samples to be trained, which shape should be (n_samples,n_features).
y: An ndarray indicating the ground-truth labels correspond to X, which shape should be (n_samples,1).
'''
row, col = X.shape
weightArray = [(1 / row)] * row
self.alphaList = []
self.finalClassifierList = []
for i in range(self.iteration):
clf = self.weakClassifier(max_depth=2)
clf.fit(X,y,weightArray)
predictY = clf.predict(X)
error = self.calculateError(y, predictY, weightArray)
if error > 0.5:
break
else:
self.finalClassifierList.append(clf)
alpha = 0.5 * math.log((1-error) / error)
self.alphaList.append(alpha)
aYH = alpha * y * predictY * (-1)
tempWeights = weightArray * np.exp(aYH)
tempSum = np.sum(tempWeights)
weightArray = tempWeights / tempSum
def predict_scores(self, X):
'''Calculate the weighted sum score of the whole base classifiers for given samples.
Args:
X: An ndarray indicating the samples to be predicted, which shape should be (n_samples,n_features).
Returns:
An one-dimension ndarray indicating the scores of differnt samples, which shape should be (n_samples,1).
'''
pass
def predict(self, X, threshold=0):
'''Predict the catagories for geven samples.
Args:
X: An ndarray indicating the samples to be predicted, which shape should be (n_samples,n_features).
threshold: The demarcation number of deviding the samples into two parts.
Returns:
An ndarray consists of predicted labels, which shape should be (n_samples,1).
'''
predictYList = []
for i in range(len(self.finalClassifierList)):
tempY = self.finalClassifierList[i].predict(X)
predictYList.append(tempY)
predicYArray = np.transpose(np.array(predictYList))
alphaArray = np.array(self.alphaList)
temp = predicYArray * alphaArray
predictY = np.sum(temp, axis = 1)
for i in range(len(predictY)):
if predictY[i] > threshold:
predictY[i] = 1
else:
predictY[i] = -1
return predictY
@staticmethod
def save(model, filename):
with open(filename, "wb") as f:
pickle.dump(model, f)
@staticmethod
def load(filename):
with open(filename, "rb") as f:
return pickle.load(f)
|
8,878 | c6d8b9faa610e817c449eee94d73c61cb62fa272 | print('test 123123')
|
8,879 | 92e7a7825b3f49424ec69196b69aee00bc84da68 | #!/usr/bin/python
# Copyright 2012 Google Inc. All Rights Reserved.
"""Antirollback clock user space support.
This daemon serves several purposes:
1. Maintain a file containing the minimum time, and periodically
update its value.
2. At startup, write the minimum time to /proc/ar_clock.
The kernel will not allow the time to be set substantially
earlier than this value (there is a small amount of wiggle
room).
"""
__author__ = 'dgentry@google.com (Denton Gentry)'
import os
import pwd
import sys
import tempfile
import time
import options
optspec = """
antirollback [options...]
--
i,interval= seconds between updates [28800]
p,persist= path to persistent file [/fiber/config/ar_clock]
u,user= setuid to this user to run
"""
# Unit tests can override these.
BIRTHDAY = 1349064000.0 # 10/1/2012
BUILD_FILENAME = '/etc/softwaredate'
PROC_AR = '/proc/ar_clock'
PROC_UPTIME = '/proc/uptime'
SLEEP = time.sleep
TIMENOW = time.time
def GetPersistTime(ar_filename):
"""Return time stored in ar_filename, or 0.0 if it does not exist."""
try:
with open(ar_filename) as f:
return float(f.read())
except (IOError, ValueError):
return 0.0
def GetBuildDate(build_filename):
"""Return build_date in floating point seconds since epoch."""
try:
with open(build_filename) as f:
return float(f.readline())
except (IOError, ValueError):
return 0.0
def GetMonotime():
"""Return a monotonically increasing count of seconds."""
return float(open(PROC_UPTIME).read().split()[0])
def GetAntirollbackTime(ar_filename):
"""Return the appropriate antirollback time to use at startup."""
now = max(TIMENOW(), GetPersistTime(ar_filename),
GetBuildDate(BUILD_FILENAME), BIRTHDAY)
return now
def StoreAntirollback(now, ar_filename, kern_f):
"""Write time to /proc/ar_clock and the persistent file."""
print 'antirollback time now ' + str(now)
sys.stdout.flush()
kern_f.write(str(now))
kern_f.flush()
tmpdir = os.path.dirname(ar_filename)
with tempfile.NamedTemporaryFile(mode='w', dir=tmpdir, delete=False) as f:
f.write(str(now) + '\n')
f.flush()
os.fsync(f.fileno())
os.rename(f.name, ar_filename)
def LoopIterate(uptime, now, sleeptime, ar_filename, kern_f):
SLEEP(sleeptime)
new_uptime = GetMonotime()
now += (new_uptime - uptime)
uptime = new_uptime
now = max(now, TIMENOW())
StoreAntirollback(now=now, ar_filename=ar_filename, kern_f=kern_f)
return (uptime, now)
def main():
o = options.Options(optspec)
(opt, _, _) = o.parse(sys.argv[1:])
kern_f = open(PROC_AR, 'w')
# Drop privileges
if opt.user:
pd = pwd.getpwnam(opt.user)
os.setuid(pd.pw_uid)
uptime = GetMonotime()
now = GetAntirollbackTime(opt.persist)
StoreAntirollback(now=now, ar_filename=opt.persist, kern_f=kern_f)
while True:
(uptime, now) = LoopIterate(uptime=uptime, now=now,
sleeptime=opt.interval,
ar_filename=opt.persist,
kern_f=kern_f)
if __name__ == '__main__':
main()
|
8,880 | 6dafb60b79a389499ae2a0f17f9618426faf45a9 | def Return():
s = raw_input('Enter a s: ')
i = 0
s1 = ''
leng = len(s)
while i < leng:
if s[i] == s[i].lower():
s1 += s[i].upper()
else:
s1 += s[i].lower()
i += 1
return s1
if __name__ == '__main__':
print Return()
|
8,881 | 97fb2388777bcb459b9818495121fdf8318095ca | '''
check if word appear in file
'''
# easier solution :
def findKeyInFile(word, filepath):
with open(filepath) as f:
for line in f.readlines():
if line.count(word) > 0:
return line
return None
|
8,882 | b1622aa65422fcb69a16ad48a26fd9ed05b10382 | import pytest
from components import models
pytestmark = pytest.mark.django_db
def test_app_models():
assert models.ComponentsApp.allowed_subpage_models() == [
models.ComponentsApp,
models.BannerComponent,
]
def test_app_required_translatable_fields():
assert models.ComponentsApp.get_required_translatable_fields() == []
@pytest.mark.django_db
def test_set_slug(en_locale):
instance = models.ComponentsApp.objects.create(
title_en_gb='the app',
depth=2,
path='/thing',
)
assert instance.slug == models.ComponentsApp.slug_identity
|
8,883 | 5f490d6a3444b3b782eed5691c82ab7e4b2e55db | from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common import action_chains, keys
from selenium.webdriver.common.action_chains import ActionChains
import time
import unittest
from pprint import pprint
from bs4 import BeautifulSoup
import json
import jsonpickle
import xlrd
import requests
from pyvirtualdisplay import Display
# display = Display(visible=0, size=(800, 800))
# display.start()
class Verify_Idaho_Links(unittest.TestCase):
def test_LB_Maps(self):
testcounter = 0
driver = webdriver.Chrome()
# Idaho
urlID = 'http://crc-prod-id-wf-elb-382957924.us-west-2.elb.amazonaws.com/idlb/'
driver.get(urlID)
_inputs = driver.find_elements_by_xpath('//img')
for input in _inputs:
item = str(input.get_attribute('src'))
if 'https://maps.googleapis.com/maps/api' in item:
print input.get_attribute('src')
linkID = input.get_attribute('src')
#mapIdaho = driver.find_element_by_xpath("//*[@id='j_idt141']/img")
#linkID = mapIdaho.get_attribute('src')
rID = requests.get(linkID)
print rID.status_code
if rID.status_code != 200:
print 'LB Idaho Map Is Down'
# testcounter += 1
# Louisiana
urlLA = 'https://lb.511la.org/lalb/'
driver.get(urlLA)
time.sleep(1)
mapLA = driver.find_element_by_xpath('//*[@id="j_idt155"]/img')
linkLA = mapLA.get_attribute('src')
# test = driver.find_element_by_xpath("//*[text()[contains(.,'mapPanelContent')]]")
# print test
# "//*[contains(text(), 'Delete this route')]"
rLA = requests.get(linkLA)
print rLA.status_code
if rLA.status_code != 200:
print 'LB Loisiana Map Is Down'
testcounter += 1
# Nebraska
urlNE = 'https://lb.511.nebraska.gov/nelb/'
driver.get(urlNE)
mapNE = driver.find_element_by_xpath('//*[@id="j_idt346"]/img')
linkNE = mapNE.get_attribute('src')
rNE = requests.get(linkNE)
print rNE.status_code
if rNE.status_code != 200:
print 'LB Nebraska Map Is Down'
testcounter += 1
# Iowa
urlIA = 'https://lb.511ia.org/ialb/'
driver.get(urlIA)
mapIA = driver.find_element_by_xpath('//*[@id="j_idt383"]/img')
linkIA = mapIA.get_attribute('src')
rIA = requests.get(linkIA)
print rIA.status_code
if rIA.status_code != 200:
print 'LB Iowa Map Is Down'
testcounter += 1
# Sacog
urlSACOG = 'http://sa.carsstage.org/salbweb/'
driver.get(urlSACOG)
mapSACOG = driver.find_element_by_xpath('//*[@id="j_idt122"]/img')
linkSACOG = mapSACOG.get_attribute('src')
rSACOG = requests.get(linkSACOG)
print rSACOG.status_code
if rSACOG.status_code != 200:
print 'LB Sacramento Map Is Down'
testcounter += 1
# Sandag
urlSAN = 'https://lbw.511sd.com/lbweb/'
driver.get(urlSAN)
mapSAN = driver.find_element_by_xpath('//*[@id="j_idt150"]/img')
linkSAN = mapSAN.get_attribute('src')
rSAN = requests.get(linkSAN)
print rSAN.status_code
if rSAN.status_code != 200:
print 'LB San Fransisco Map Is Down'
testcounter += 1
# Minnesota
urlMN = 'https://lb.511mn.org/mnlb/'
driver.get(urlMN)
print driver.title
#imageWait = WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.XPATH, "//*[@id='j_idt369']/img")))
try:
mapMN = driver.find_element_by_xpath('//*[@id="j_idt166"]/img')
except:
try:
mapMN = driver.find_element_by_xpath('//*[@id="j_idt368"]/img')
except:
try:
mapMN = driver.find_element_by_xpath('//*[@id="j_idt365"]/img')
except:
pass
linkMN = mapMN.get_attribute('src')
rMN = requests.get(linkMN)
print rMN.status_code
if rSAN.status_code != 200:
print 'LB Minnesota Map Is Down'
testcounter += 1
driver.quit()
if testcounter > 0:
assert False
if __name__ == '__main__':
unittest.main() |
8,884 | 493b29433f0c3646e7f80fca2f656fc4a5256003 | from functools import wraps
class aws_retry:
"""retries the call (required for some cases where data is not consistent yet in AWS"""
def __init__(self, fields):
self.fields = fields # field to inject
def __call__(self, function):
pass
#code from aws_inject
# from osbot_aws.AWS_Config import AWS_Config
# @wraps(function) # makes __name__ work ok
# def wrapper(*args,**kwargs): # wrapper function
# for field in self.fields.split(','): # split value provided by comma
# if field == 'region' : kwargs[field] = AWS_Config().aws_session_region_name()
# if field == 'account_id': kwargs[field] = AWS_Config().aws_session_account_id()
# return function(*args,**kwargs)
#return wrapper |
8,885 | 292c66bd5b7f56ee8c27cabff01cd97ff36a79dc | from django.contrib import admin
from .models import Wbs, Equipment_Type
class WbsAdmin(admin.ModelAdmin):
list_display = ('code','description','equipment_type')
list_filter = ('code','description','equipment_type')
readonly_fields = ('code','description')
class Equipment_TypeAdmin(admin.ModelAdmin):
list_display = ('type',)
list_filter = ('type',)
admin.site.register(Wbs,WbsAdmin)
admin.site.register(Equipment_Type,Equipment_TypeAdmin)
|
8,886 | 8b18f098080c3f5773aa04dffaff0639fe7fa74f | g=int(input())
num=0
while(g>0):
num=num+g
g=g-1
print(num)
|
8,887 | 62e0c3b6095a65a4508eddfa9c0a1cb31d6c917b | #OpenCV create samples commands
#opencv_createsamples -img watch5050.jpg -bg bg.txt -info info/info.lst -pngoutput info -maxxangle 0.5 -maxyangle 0.5 -maxzangle 0.5 -num 1950
#opencv_createsamples -info info/info.lst -num 1950 -w 20 -h 20 -vec positives.vec
#Training command
#opencv_traincascade -data data -vec positives.vec -bg bg.txt -numPos 1800 -numNeg 900 -numStages 10 -w 20 -h 20
|
8,888 | 1ba39cfc1187b0efc7fc7e905a15de8dc7f80e0d | from textmagic.rest import TextmagicRestClient
username = 'lucychibukhchyan'
api_key = 'sjbEMjfNrrglXY4zCFufIw9IPlZ3SA'
client = TextmagicRestClient(username, api_key)
message = client.message.create(phones="7206337812", text="wow i sent a text from python!!!!")
|
8,889 | dd91ba13177aefacc24ef4a004acae0bffafadf0 | #!/usr/bin/env conda-execute
# conda execute
# env:
# - python >=3
# - requests
# run_with: python
from configparser import NoOptionError
from configparser import SafeConfigParser
import argparse
import base64
import inspect
import ipaddress
import json
import logging
import logging.config
import os
import socket
import sys
import time
import requests
requests.packages.urllib3.disable_warnings()
""" McAfee ESM <=> ServiceNow
This script can be called as an alarm action on the McAfee ESM to send data
to ServiceNow via the API to create tickets. Optionally, ticket data is
transmitted back to the ESM via syslog and referenced as an event. The event
allows for contextual linking directly to the ticket from the ESM.
The script requires Python 3 and was tested with 3.5.2 for Windows and Linux.
Other modules, requests and configparser, are also required.
The script requires a config.ini file for the credentials. The filename and
path can be set from the command line.
An example config.ini is available at:
https://raw.githubusercontent.com/andywalden/mfe2snow/config.ini
Example:
$ python mfe2snow.py alarm="This is my alarm" severity="50"
This is intended to be called as an alarm action to Execute a Script. In the ESM,
go to System Properties | Profile Management | Remote Commands and add a profile for
"Create ServiceNow Ticket". The script can be called using any combination of fields and
values however 'alarm', 'eventdescription', 'severity', 'sourceip' and 'destip' are
mapped to ServiceNow fields. Remaining fields=values are mapped to SNOW field
"Additional Info".
This is an example of the script being called:
mfe2snow.py alarm="[$Alarm Name]" eventdescription="[$Rule Message]" severity="[$Average Severity]"
devicename="[$Device Name]" message_key="[$Event ID]" category="[$Normalized Rule]" sourceip="[$Source IP]"
destip="[$Destination IP]" sourceport="[$Source Port]" destport="[$Destination Port]" host="[$%HostID]"
domain="[$%DomainID]" command="[$%CommandID]" object="[$%ObjectID]" application="[$%AppID]"
deviceaction="[$%Device_Action]" targetuser="[$%UserIDDst]" threatcategory="[$%Threat_Category]"
threathandled="[$%Threat_Handled]" geosrc="[$Geolocation Source]" geodest="[$Geolocation Destination]"
The output is also written to a file that is overwritten each time the script is run.
Make sure the permissions on the config.ini file are secure as not to expose any credentials.
"""
__author__ = "Andy Walden"
__version__ = "1.2"
class Args(object):
"""
Handles any args and passes them back as a dict
"""
def __init__(self, args):
self.log_levels = ["quiet", "error", "warning", "info", "debug"]
self.formatter_class = argparse.RawDescriptionHelpFormatter
self.parser = argparse.ArgumentParser(
formatter_class=self.formatter_class,
description="Send McAfee ESM Alarm data to ServiceNow"
)
self.args = args
self.parser.add_argument("-v", "--version",
action="version",
help="Show version",
version="%(prog)s {}".format(__version__))
self.parser.add_argument("-l", "--level",
default=None, dest="level",
choices=self.log_levels, metavar='',
help="Logging output level. Default: warning")
self.parser.add_argument("-c", "--config",
default=None, dest="cfgfile", metavar='',
help="Path to config file. Default: config.ini")
self.parser.add_argument("fields", nargs='*', metavar='',
help="Key=Values for the query. Example: \n \
alarm=\"The milk has spilled\" sourceip=\"1.1.1.1\", destip=\"2.2.2.2\" \
The following keys are mapped to fields in SNOW: \
alarm - Description \
sourceip/destip - Node \
severity - Severity,
recordid = Message_Key")
self.pargs = self.parser.parse_args()
def get_args(self):
return self.pargs
class Config(object):
""" Creates object for provided configfile/section settings """
def __init__(self, filename, header):
config = SafeConfigParser()
cfgfile = config.read(filename)
if not cfgfile:
raise ValueError('Config file not found:', filename)
self.__dict__.update(config.items(header))
def logging_init():
filename = get_filename()
logfile = filename + ".log"
hostname = socket.gethostname()
formatter = logging.Formatter('%(asctime)s {} %(module)s: %(message)s'.format(hostname),
datefmt='%b %d %H:%M:%S')
logger = logging.getLogger()
fh = logging.FileHandler(logfile, mode='w')
fh.setFormatter(formatter)
logger.addHandler(fh)
ch = logging.StreamHandler()
ch.setFormatter(formatter)
logger.addHandler(ch)
def get_filename():
filename = (inspect.getfile(inspect.currentframe()).split("\\", -1)[-1]).rsplit(".", 1)[0]
return filename
class Syslog(object):
"""
Open TCP socket using supplied server IP and port.
Returns socket or None on failure
"""
def __init__(self,
server,
port=514):
logging.debug("Function: open_socket: %s: %s", server, port)
self.server = server
self.port = int(port)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.connect((self.server, self.port))
def send(self, data):
"""
Sends data to the established connection
"""
self.data = data
self.sock.sendall(data.encode())
logging.info("Syslog feedback sent")
class SNOW(object):
"""
Send to ServiceNow API
Initialize with host, user and passwd to create connection.
send() sends JSON query to SNOW.
"""
def __init__(self, host, user, passwd):
self.host = host
self.user = user
self.passwd = passwd
self.url = "https://" + host
self.auth_string = '{}'.format(base64.b64encode('{}:{}'
.format(user,passwd)
.encode('utf-8'))
.decode('ascii'))
self.headers = {'Authorization':'Basic '+ self.auth_string, 'Content-Type': 'application/json'}
def send(self, query_conf, uri_string):
"""
Sends URI method and JSON query string
Runs query and returns result object.
"""
self.query_conf = query_conf
self.uri_string = uri_string
result = requests.post(self.url + self.uri_string,
headers=self.headers,
data=query_conf, verify=False)
if result.status_code != 200:
logging.error("SNOW said: Status Code: %s, Headers: %s, \
Mesg: %s", result.status_code, result.headers,
result.json())
sys.exit(1)
return result
class Query(object):
"""
Returns JSON query from provided dict
"""
def __init__(self):
self.qconf = []
def create(self, **kwargs):
self.query_dict = kwargs
self.alarm = self.query_dict.pop('alarm', 'McAfee ESM Alarm')
self.node = self.query_dict.pop('node', '0.0.0.0')
self.severity = self.query_dict.pop('severity', '25')
self.id = self.query_dict.pop('id', "No key")
self.info = ", ".join(["=".join([key, str(val)])
for key, val in self.query_dict.items()])
self.qconf = {
"active" : "false",
"classification" : "1",
"description" : self.alarm,
"source" : "McAfee ESM",
"node" : self.node,
"type" : "Security" ,
"message_key" : "id",
"additional_info" : self.info,
"severity" : self.severity,
"state" : "Ready",
"sys_class_name" : "em_event",
"sys_created_by" : "mcafee.integration"
}
return(json.dumps(self.qconf))
def main():
""" Main function """
# Process any command line args
args = Args(sys.argv)
pargs = args.get_args()
logging_init()
if pargs.level:
logging.getLogger().setLevel(getattr(logging, pargs.level.upper()))
try:
fields = dict(x.split('=', 1) for x in pargs.fields)
except ValueError:
logging.error("Invalid input. Format is field=value")
sys.exit(1)
configfile = pargs.cfgfile if pargs.cfgfile else 'config.ini'
try:
c = Config(configfile, "DEFAULT")
except ValueError:
logging.error("Config file not found: %s", configfile)
sys.exit(1)
# Strip empty values
fields = {k:v for k,v in fields.items() if v is not None}
# Figure out which IP should be 'node'
destip = fields.get('destip', None)
sourceip = fields.get('sourceip', None)
if sourceip:
for subnet in homenet:
if ipaddress.ip_address(sourceip) in ipaddress.ip_network(subnet):
fields['node'] = sourceip
elif ipaddress.ip_address(destip) in ipaddress.ip_network(subnet):
fields['node'] = destip
else:
fields['node'] = sourceip
# Check for severity in arguments. Map ESM severity (1-100) to SNOW (1-5)
s = int(fields.get('severity', 25))
if 90 <= s <= 100: fields['severity'] = 1 # Critical
if 75 <= s <= 89: fields['severity'] = 2 # Major
if 65 <= s <= 74: fields['severity'] = 3 # Minor
if 50 <= s <= 64: fields['severity'] = 4 # Warning
if 0 <= s <= 49: fields['severity'] = 5 # Info
try:
snowhost = SNOW(c.snowhost, c.snowuser, c.snowpass)
except AttributeError:
print("{} is missing a required field:".format(configfile))
raise
sys.exit(1)
new_ticket = Query()
new_ticket_q = new_ticket.create(**fields)
result = snowhost.send(new_ticket_q, '/api/now/table/em_event')
# Syslog feedback to ESM
try:
syslog_host = c.get('sysloghost')
syslog_port = c.get('syslogport')
syslog = Syslog(syslog_host, syslog_port)
syslog.send(result.text)
except NoOptionError:
logging.debug("Syslog feedback disabled. Settings not detected.")
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
logging.warning("Control-C Pressed, stopping...")
sys.exit()
|
8,890 | 28a0ae0492fb676044c1f9ced7a5a4819e99a8d9 | import math
import numpy as np
import cv2
from matplotlib import pyplot as plt
from sklearn.cluster import KMeans
from sklearn import metrics
from scipy.spatial.distance import cdist
if (__name__ == "__main__"):
cap = cv2.VideoCapture('dfd1.mp4')
mog = cv2.createBackgroundSubtractorMOG2(detectShadows=0)
count = 0
#list = ['video' + str(n) for n in range(100)]
while True:
list = []
ret, frame = cap.read()
ret1, frame1 = cap.read()
fgmask = mog.apply(frame)
mask = np.zeros_like(frame1)
mask1 = np.zeros_like(frame1)
kernel = np.ones((5, 5), np.uint8)
opening = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)
closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)
dilation = cv2.dilate(closing, kernel, iterations=1)
canny = cv2.Canny(dilation, 100, 200)
cnts, contours, hierarchy = cv2.findContours(canny, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cv2.rectangle(frame, (220, 100), (550, 160), (0, 255, 0), 2)
cv2.imshow('mask', fgmask)
cv2.imshow('mask3', dilation)
cv2.imshow('mask15', canny)
cv2.imshow('mask4', frame)
cv2.imshow('mask8', frame[100:160, 220:550])
for i in range(len(contours)):
point = []
cnt = contours[i]
x, y, w, h = cv2.boundingRect(cnt)
cv2.rectangle(frame1, (int(x+w/2), int(y+h/2)), (int(x+w/2), int(y+h/2)), (255, 0, 0), 3)
X = int(x+w/2)
Y = int(y+h/2)
distance = math.sqrt(X^2+Y^2)
mask[y:y + h, x:x + w] = frame1[y:y + h, x:x + w]
#(0,0)에서 좌표 거리 계산 후 리스트에 첨가
point.append(distance)
point.append(X)
point.append(Y)
list.append(point)
#같은 좌표 값 제거
if count == 0:
print("List has one List")
elif list[count][1] == list[count-1][1] and list[count][2] == list[count-1][2] :
a = list.pop()
count = count - 1
count = count + 1
count = 0
#(0,0)에서 부터의 거리 오름차순 정리
if not list:
print("empty")
else:
list.sort()
print(list)
'''
for i in range(len(list)):
if count == 0:
print("list 내용 한개")
else:
#오름차순 정리된 점 거리 계산
distance1 = math.sqrt((list[count][1] - list[count-1][1]) ** 2 + (list[count][2] - list[count-1][2]) ** 2)
print(count)
print(list[count][1],list[count][2])
print(list[count-1][1],list[count-1][2])
print("거리 ",distance1)
count = count + 1
count = 0
'''
cv2.imshow('mask2', frame1)
print(' 장면 전환')
cv2.imshow('mask7', mask)
k = cv2.waitKey(300) & 0xFF
if k == 27:
break
cap.release()
cv2.destroyAllWindows() |
8,891 | b838d2230cb3f3270e86807e875df4d3d55438cd | # -*- coding: utf-8 -*-
"""
Created on Sun Nov 8 22:11:53 2020
@author: Rick
"""
sum= 0;
with open('workRecord.txt') as fp:
for line in fp.readlines():
idx= line.rfind('x',len(line)-8,len(line))
if idx>=0:
sum+= float(line.rstrip()[idx+1:len(line)])
else:
sum+= 1
print(sum)
print(sum*3) |
8,892 | fd54bbfbc81aec371ad6c82bf402a5a3673a9f24 | # -*- encoding:ascii -*-
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 6
_modified_time = 1383550959.0389481
_template_filename='templates/webapps/tool_shed/repository/browse_repository.mako'
_template_uri='/webapps/tool_shed/repository/browse_repository.mako'
_template_cache=cache.Cache(__name__, _modified_time)
_source_encoding='ascii'
_exports = ['stylesheets', 'javascripts']
# SOURCE LINE 7
def inherit(context):
if context.get('use_panels'):
return '/webapps/tool_shed/base_panels.mako'
else:
return '/base.mako'
def _mako_get_namespace(context, name):
try:
return context.namespaces[(__name__, name)]
except KeyError:
_mako_generate_namespaces(context)
return context.namespaces[(__name__, name)]
def _mako_generate_namespaces(context):
# SOURCE LINE 2
ns = runtime.TemplateNamespace('__anon_0x88e2e50', context._clean_inheritance_tokens(), templateuri=u'/message.mako', callables=None, calling_uri=_template_uri)
context.namespaces[(__name__, '__anon_0x88e2e50')] = ns
# SOURCE LINE 4
ns = runtime.TemplateNamespace('__anon_0x7ee9750', context._clean_inheritance_tokens(), templateuri=u'/webapps/tool_shed/common/common.mako', callables=None, calling_uri=_template_uri)
context.namespaces[(__name__, '__anon_0x7ee9750')] = ns
# SOURCE LINE 5
ns = runtime.TemplateNamespace('__anon_0x8a2fd90', context._clean_inheritance_tokens(), templateuri=u'/webapps/tool_shed/repository/common.mako', callables=None, calling_uri=_template_uri)
context.namespaces[(__name__, '__anon_0x8a2fd90')] = ns
# SOURCE LINE 3
ns = runtime.TemplateNamespace('__anon_0x88e21d0', context._clean_inheritance_tokens(), templateuri=u'/webapps/tool_shed/common/repository_actions_menu.mako', callables=None, calling_uri=_template_uri)
context.namespaces[(__name__, '__anon_0x88e21d0')] = ns
def _mako_inherit(template, context):
_mako_generate_namespaces(context)
return runtime._inherit_from(context, (inherit(context)), _template_uri)
def render_body(context,**pageargs):
context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
_import_ns = {}
_mako_get_namespace(context, '__anon_0x88e2e50')._populate(_import_ns, [u'render_msg'])
_mako_get_namespace(context, '__anon_0x7ee9750')._populate(_import_ns, [u'*'])
_mako_get_namespace(context, '__anon_0x8a2fd90')._populate(_import_ns, [u'*'])
_mako_get_namespace(context, '__anon_0x88e21d0')._populate(_import_ns, [u'render_tool_shed_repository_actions'])
status = _import_ns.get('status', context.get('status', UNDEFINED))
render_clone_str = _import_ns.get('render_clone_str', context.get('render_clone_str', UNDEFINED))
render_repository_type_select_field = _import_ns.get('render_repository_type_select_field', context.get('render_repository_type_select_field', UNDEFINED))
render_msg = _import_ns.get('render_msg', context.get('render_msg', UNDEFINED))
repository = _import_ns.get('repository', context.get('repository', UNDEFINED))
h = _import_ns.get('h', context.get('h', UNDEFINED))
render_tool_shed_repository_actions = _import_ns.get('render_tool_shed_repository_actions', context.get('render_tool_shed_repository_actions', UNDEFINED))
is_malicious = _import_ns.get('is_malicious', context.get('is_malicious', UNDEFINED))
repository_type_select_field = _import_ns.get('repository_type_select_field', context.get('repository_type_select_field', UNDEFINED))
commit_message = _import_ns.get('commit_message', context.get('commit_message', UNDEFINED))
message = _import_ns.get('message', context.get('message', UNDEFINED))
trans = _import_ns.get('trans', context.get('trans', UNDEFINED))
__M_writer = context.writer()
# SOURCE LINE 1
__M_writer(u'\n')
# SOURCE LINE 2
__M_writer(u'\n')
# SOURCE LINE 3
__M_writer(u'\n')
# SOURCE LINE 4
__M_writer(u'\n')
# SOURCE LINE 5
__M_writer(u'\n\n')
# SOURCE LINE 13
__M_writer(u'\n')
# SOURCE LINE 14
__M_writer(u'\n\n')
# SOURCE LINE 19
__M_writer(u'\n\n')
# SOURCE LINE 25
__M_writer(u'\n\n')
# SOURCE LINE 27
is_new = repository.is_new( trans.app )
can_push = trans.app.security_agent.can_push( trans.app, trans.user, repository )
can_download = not is_new and ( not is_malicious or can_push )
can_browse_contents = not is_new
__M_locals_builtin_stored = __M_locals_builtin()
__M_locals.update(__M_dict_builtin([(__M_key, __M_locals_builtin_stored[__M_key]) for __M_key in ['can_push','can_browse_contents','is_new','can_download'] if __M_key in __M_locals_builtin_stored]))
# SOURCE LINE 32
__M_writer(u'\n\n')
# SOURCE LINE 34
__M_writer(unicode(render_tool_shed_repository_actions( repository )))
__M_writer(u'\n\n')
# SOURCE LINE 36
if message:
# SOURCE LINE 37
__M_writer(u' ')
__M_writer(unicode(render_msg( message, status )))
__M_writer(u'\n')
pass
# SOURCE LINE 39
__M_writer(u'\n')
# SOURCE LINE 40
if can_browse_contents:
# SOURCE LINE 41
__M_writer(u' <div class="toolForm">\n <div class="toolFormTitle">Repository \'')
# SOURCE LINE 42
__M_writer(filters.html_escape(unicode(repository.name )))
__M_writer(u"' revision ")
__M_writer(filters.html_escape(unicode(repository.tip( trans.app ) )))
__M_writer(u' (repository tip)</div>\n')
# SOURCE LINE 43
if can_download:
# SOURCE LINE 44
__M_writer(u' <div class="form-row">\n <label>Clone this repository:</label>\n ')
# SOURCE LINE 46
__M_writer(unicode(render_clone_str( repository )))
__M_writer(u'\n </div>\n')
pass
# SOURCE LINE 49
__M_writer(u' <form name="repository_type">\n ')
# SOURCE LINE 50
__M_writer(unicode(render_repository_type_select_field( repository_type_select_field, render_help=False )))
__M_writer(u'\n </form>\n')
# SOURCE LINE 52
if can_push:
# SOURCE LINE 53
__M_writer(u' <form name="select_files_to_delete" id="select_files_to_delete" action="')
__M_writer(unicode(h.url_for( controller='repository', action='select_files_to_delete', id=trans.security.encode_id( repository.id ))))
__M_writer(u'" method="post" >\n <div class="form-row" >\n <label>Contents:</label>\n <div id="tree" >\n Loading...\n </div>\n <div class="toolParamHelp" style="clear: both;">\n Click on a file to display it\'s contents below. You may delete files from the repository by clicking the check box next to each file and clicking the <b>Delete selected files</b> button.\n </div>\n <input id="selected_files_to_delete" name="selected_files_to_delete" type="hidden" value=""/>\n </div>\n <div class="form-row">\n <label>Message:</label>\n <div class="form-row-input">\n')
# SOURCE LINE 67
if commit_message:
# SOURCE LINE 68
__M_writer(u' <textarea name="commit_message" rows="3" cols="35">')
__M_writer(filters.html_escape(unicode(commit_message )))
__M_writer(u'</textarea>\n')
# SOURCE LINE 69
else:
# SOURCE LINE 70
__M_writer(u' <textarea name="commit_message" rows="3" cols="35"></textarea>\n')
pass
# SOURCE LINE 72
__M_writer(u' </div>\n <div class="toolParamHelp" style="clear: both;">\n This is the commit message for the mercurial change set that will be created if you delete selected files.\n </div>\n <div style="clear: both"></div>\n </div>\n <div class="form-row">\n <input type="submit" name="select_files_to_delete_button" value="Delete selected files"/>\n </div>\n <div class="form-row">\n <div id="file_contents" class="toolParamHelp" style="clear: both;background-color:#FAFAFA;"></div>\n </div>\n </form>\n')
# SOURCE LINE 85
else:
# SOURCE LINE 86
__M_writer(u' <div class="toolFormBody">\n <div class="form-row" >\n <label>Contents:</label>\n <div id="tree" >\n Loading...\n </div>\n </div>\n <div class="form-row">\n <div id="file_contents" class="toolParamHelp" style="clear: both;background-color:#FAFAFA;"></div>\n </div>\n </div>\n')
pass
# SOURCE LINE 98
__M_writer(u' </div>\n <p/>\n')
pass
return ''
finally:
context.caller_stack._pop_frame()
def render_stylesheets(context):
context.caller_stack._push_frame()
try:
_import_ns = {}
_mako_get_namespace(context, '__anon_0x88e2e50')._populate(_import_ns, [u'render_msg'])
_mako_get_namespace(context, '__anon_0x7ee9750')._populate(_import_ns, [u'*'])
_mako_get_namespace(context, '__anon_0x8a2fd90')._populate(_import_ns, [u'*'])
_mako_get_namespace(context, '__anon_0x88e21d0')._populate(_import_ns, [u'render_tool_shed_repository_actions'])
h = _import_ns.get('h', context.get('h', UNDEFINED))
parent = _import_ns.get('parent', context.get('parent', UNDEFINED))
__M_writer = context.writer()
# SOURCE LINE 16
__M_writer(u'\n ')
# SOURCE LINE 17
__M_writer(unicode(parent.stylesheets()))
__M_writer(u'\n ')
# SOURCE LINE 18
__M_writer(unicode(h.css( "jquery.rating", "dynatree_skin/ui.dynatree" )))
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_javascripts(context):
context.caller_stack._push_frame()
try:
_import_ns = {}
_mako_get_namespace(context, '__anon_0x88e2e50')._populate(_import_ns, [u'render_msg'])
_mako_get_namespace(context, '__anon_0x7ee9750')._populate(_import_ns, [u'*'])
_mako_get_namespace(context, '__anon_0x8a2fd90')._populate(_import_ns, [u'*'])
_mako_get_namespace(context, '__anon_0x88e21d0')._populate(_import_ns, [u'render_tool_shed_repository_actions'])
common_javascripts = _import_ns.get('common_javascripts', context.get('common_javascripts', UNDEFINED))
h = _import_ns.get('h', context.get('h', UNDEFINED))
repository = _import_ns.get('repository', context.get('repository', UNDEFINED))
parent = _import_ns.get('parent', context.get('parent', UNDEFINED))
__M_writer = context.writer()
# SOURCE LINE 21
__M_writer(u'\n ')
# SOURCE LINE 22
__M_writer(unicode(parent.javascripts()))
__M_writer(u'\n ')
# SOURCE LINE 23
__M_writer(unicode(h.js( "libs/jquery/jquery.rating", "libs/jquery/jquery-ui", "libs/jquery/jquery.cookie", "libs/jquery/jquery.dynatree" )))
__M_writer(u'\n ')
# SOURCE LINE 24
__M_writer(unicode(common_javascripts(repository)))
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
|
8,893 | 89e5e82c073f7f87c00fc844c861c6c5cbe6a695 |
import smart_imports
smart_imports.all()
class LogicTests(utils_testcase.TestCase):
def setUp(self):
super(LogicTests, self).setUp()
game_logic.create_test_map()
self.account_1 = self.accounts_factory.create_account()
self.account_1_items = prototypes.AccountItemsPrototype.get_by_account_id(self.account_1.id)
self.collection_1 = prototypes.CollectionPrototype.create(caption='collection_1', description='description_1')
self.collection_2 = prototypes.CollectionPrototype.create(caption='collection_2', description='description_2', approved=True)
self.kit_1 = prototypes.KitPrototype.create(collection=self.collection_1, caption='kit_1', description='description_1')
self.kit_2 = prototypes.KitPrototype.create(collection=self.collection_2, caption='kit_2', description='description_2', approved=True)
self.kit_3 = prototypes.KitPrototype.create(collection=self.collection_2, caption='kit_3', description='description_3', approved=True)
self.item_1_1 = prototypes.ItemPrototype.create(kit=self.kit_1, caption='item_1_1', text='text_1_1', approved=False)
self.item_1_2 = prototypes.ItemPrototype.create(kit=self.kit_1, caption='item_1_2', text='text_1_2', approved=True)
self.item_2_1 = prototypes.ItemPrototype.create(kit=self.kit_2, caption='item_2_1', text='text_2_1', approved=True)
self.item_2_2 = prototypes.ItemPrototype.create(kit=self.kit_2, caption='item_2_2', text='text_2_2', approved=False)
self.item_3_1 = prototypes.ItemPrototype.create(kit=self.kit_3, caption='item_3_1', text='text_3_1', approved=True)
def test_get_items_count(self):
self.assertEqual(logic.get_items_count(prototypes.ItemPrototype._db_all()),
(collections.Counter({self.kit_2.id: 1, self.kit_3.id: 1}), {self.collection_2.id: 2}))
def test_get_items_count__with_account(self):
self.account_1_items.add_item(self.item_3_1)
self.account_1_items.save()
self.assertEqual(logic.get_items_count(prototypes.ItemPrototype._db_filter(id__in=self.account_1_items.items_ids())),
(collections.Counter({self.kit_3.id: 1}), {self.collection_2.id: 1}))
def test_get_collections_statistics__no_account(self):
self.assertEqual(logic.get_collections_statistics(None),
{'total_items_in_collections': {self.collection_2.id: 2},
'total_items_in_kits': collections.Counter({self.kit_2.id: 1, self.kit_3.id: 1}),
'account_items_in_collections': {},
'account_items_in_kits': {},
'total_items': 2,
'account_items': 0})
def test_get_collections_statistics__with_account(self):
self.account_1_items.add_item(self.item_3_1)
self.account_1_items.save()
self.assertEqual(logic.get_collections_statistics(self.account_1_items),
{'total_items_in_collections': {self.collection_2.id: 2},
'total_items_in_kits': collections.Counter({self.kit_2.id: 1, self.kit_3.id: 1}),
'account_items_in_collections': {self.collection_2.id: 1},
'account_items_in_kits': collections.Counter({self.kit_3.id: 1}),
'total_items': 2,
'account_items': 1})
|
8,894 | efed5c113e085e5b41d9169901c18c06111b9077 | from snake.snake import Snake
# Start application
if __name__ == '__main__':
s = Snake()
s.run() |
8,895 | 2d4680b63cdd05e89673c4bd6babda7ac6ebb588 | from django.shortcuts import render
from rest_framework import viewsets
from rest_framework.response import Response
from crud.serializers import TodoListSerializer
from crud.models import TodoList
# Create your views here.
class TodoListViewSet(viewsets.ModelViewSet):
queryset = TodoList.objects.all()
serializer_class = TodoListSerializer
def delete(self, request, pk=None):
instance = TodoList.objects.get(id = pk)
instance.delete() |
8,896 | e4fb932c476ca0222a077a43499bf9164e1f27d0 | import configparser
config = configparser.ConfigParser()
config.read('config.ini')
settings=config['Settings']
colors=config['Colors']
import logging
logger = logging.getLogger(__name__)
logLevel = settings.getint('log-level')
oneLevelUp = 20
#I don't know if this will work before loading the transformers module?
#silence transformers outputs when loading model
logging.getLogger("transformers.tokenization_utils").setLevel(logLevel+oneLevelUp)
logging.getLogger("transformers.modeling_utils").setLevel(logLevel+oneLevelUp)
logging.getLogger("transformers.configuration_utils").setLevel(logLevel+oneLevelUp)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(message)s',
datefmt='%m/%d/%Y %H:%M%S',
level=logLevel+oneLevelUp
)
logger.setLevel(logLevel)
|
8,897 | 0738fc48bc367f1df75567ab97ce20d3e747dc18 | cassandra = {
'nodes': ['localhost'],
'keyspace': 'coffee'
}
|
8,898 | 4b5794ff79371c2e49c5d2b621805b08c4ff7acb | from django.shortcuts import render
from django.http import HttpResponse,JsonResponse
from ex.models import Teacher,Student,Group,Report,TeamEvaluation,PrivateLetter,ChatBoxIsOpen
from django.core import serializers
from rest_framework.views import APIView
from rest_framework.response import Response
from django.contrib.auth.hashers import make_password, check_password
# from plane.models import User, Student, LightList, Light, Score, Visit
# from plane.utils.jwt_auth import create_token, get_user_id
# from django.contrib.auth.hashers import make_password, check_password
# from rest_framework.authtoken.models import Token
# from django.contrib.auth import authenticate
import os
from ex.utils.jwt_auth import create_token, get_user_id
from ex.utils.extensions.auth import JwtQueryParamAuthentication
from django.db.models import Q
# Create your views here.
class getPrivateLetterListsView(APIView):
def get(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self, request)[0]
except Exception as e:
return Response({
'status': 403,
'msg': '未登录',
'err': e.args
})
user_id = payload['id']
data_list = []
for item in ChatBoxIsOpen.objects.filter(Q(senderTea_id=user_id) & Q(isOpen=1)):
msgList = []
msgList1 = []
msgList2 = []
receiver = item.receiverTea_id
identity = 0
if item.receiverStu_id != None:
receiver = Student.objects.filter(id=item.receiverStu_id).first().stu_num
identity = 1
for item2 in PrivateLetter.objects.filter(Q(senderTea_id=user_id) & Q(receiverStu_id=item.receiverStu_id)):
data = {
'id': item2.id,
'message': item2.message,
'time': str(item2.time.strftime('%Y-%m-%d %H:%M:%S')),
'new': item2.new,
'Ienter': 1 # 发送
}
msgList1.append(data)
for item2 in PrivateLetter.objects.filter(Q(senderStu_id=item.receiverStu_id) & Q(receiverTea_id=user_id)):
data = {
'id': item2.id,
'message': item2.message,
'time': str(item2.time.strftime('%Y-%m-%d %H:%M:%S')),
'new': item2.new,
'Ienter': 2 # 接收
}
msgList2.append(data)
# msgList.sort()
# print(len(msgList1))
else:
for item2 in PrivateLetter.objects.filter(Q(senderTea_id=user_id) & Q(receiverTea_id=receiver)):
data = {
'id': item2.id,
'message': item2.message,
'time': str(item2.time.strftime('%Y-%m-%d %H:%M:%S')),
'new': item2.new,
'Ienter': 1 # 发送
}
msgList1.append(data)
for item2 in PrivateLetter.objects.filter(Q(senderTea_id=receiver) & Q(receiverTea_id=user_id)):
data = {
'id': item2.id,
'message': item2.message,
'time': str(item2.time.strftime('%Y-%m-%d %H:%M:%S')),
'new': item2.new,
'Ienter': 2 # 接收
}
msgList2.append(data)
# msgList.sort()
len1 = len(msgList1)
len2 = len(msgList2)
i1 = 0
i2 = 0
for i in range(0,len1 + len2):
if i1 >= len1:
msgList.append(msgList2[i2])
i2+=1
elif i2 >= len2:
msgList.append(msgList1[i1])
i1+=1
elif msgList1[i1]['time'] < msgList2[i2]['time']:
msgList.append(msgList1[i1])
i1+=1
else:
msgList.append(msgList2[i2])
i2+=1
# print(msgList)
data = {
'id': item.id,
'receiver': receiver,
'msgList': msgList,
'name': receiver + str(identity),
'identity': identity
}
data_list.append(data)
# print(data_list)
return Response({
'status': 200,
'msg': '返回成功',
'data': data_list
})
except Exception as e:
return Response({
'status': 204,
'msg': '遇到了异常错误',
'err': e.args
})
class enterPrivateLetterView(APIView):
def post(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self, request)[0]
except Exception as e:
return Response({
'status': 403,
'msg': '未登录',
'err': e.args
})
user_id = payload['id']
receiver = request.data.get('receiver')
message = request.data.get('message')
identity = request.data.get('identity')
if identity == 0:
privateLetter = PrivateLetter(senderTea_id=user_id,receiverTea_id=receiver,message=message)
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id=receiver)&Q(receiverTea_id=user_id)).first()
if not chatBoxIsOpen:
chatBoxIsOpen = ChatBoxIsOpen(senderTea_id=receiver,receiverTea_id=user_id)
else:
receiverStu_id = Student.objects.filter(stu_num=receiver).first().id
privateLetter = PrivateLetter(senderTea_id=user_id,receiverStu_id=receiverStu_id,message=message)
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id=receiverStu_id)&Q(receiverTea_id=user_id)).first()
if not chatBoxIsOpen:
chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=receiverStu_id,receiverTea_id=user_id)
privateLetter.save()
chatBoxIsOpen.save()
return Response({
'status': 200,
'msg': '发布私信成功',
})
except Exception as e:
return Response({
'status': 204,
'msg': '遇到了异常错误',
'err': e.args
})
# 获取最近联系人
class getRecentContactsView(APIView):
def get(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self, request)[0]
except Exception as e:
return Response({
'status': 403,
'msg': '未登录',
'err': e.args
})
user_id = payload['id']
data_list = []
for item in PrivateLetter.objects.filter(senderTea_id=user_id):
if item.receiverTea_id != None and item.receiverTea_id != "":
identity = 0
receiver = item.receiverTea_id
else:
identity = 1
receiver = Student.objects.filter(id=item.receiverStu_id).first().stu_num
# print(((receiver + str(identity)) not in dict))
# if (receiver + str(identity)) not in dict:
# dict[receiver + str(identity)] = '1'
data = {
# 'id': item.id,
'receiver': receiver,
'identity': identity #老师:0;学生:1
}
data_list.append(data)
for item in PrivateLetter.objects.filter(receiverTea_id=user_id):
if item.senderTea_id != None and item.senderTea_id != "":
identity = 0
receiver = item.senderTea_id
else:
identity = 1
receiver = Student.objects.filter(id=item.senderStu_id).first().stu_num
# print(((receiver + str(identity)) not in dict))
# if (receiver + str(identity)) not in dict:
# dict[receiver + str(identity)] = '1'
data = {
# 'id': item.id,
'receiver': receiver,
'identity': identity #老师:0;学生:1
}
data_list.append(data)
lenData = len(data_list)
dict = {}
data_list1 = []
for i in range(lenData - 1,-1,-1):
if (data_list[i]['receiver'] + str(data_list[i]['identity'])) not in dict:
dict[data_list[i]['receiver'] + str(data_list[i]['identity'])] = '1'
data_list1.append(data_list[i])
# lenData = len(data_list1)
# if lenData > 10:
# data_list1 = data_list1[lenData - 10:lenData]
return Response({
'status': 200,
'msg': '返回成功',
'data': data_list1
})
except Exception as e:
return Response({
'status': 204,
'msg': '遇到了异常错误',
'err': e.args
})
# 关闭聊天框
class closeChatBoxView(APIView):
def post(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self, request)[0]
except Exception as e:
return Response({
'status': 403,
'msg': '未登录',
'err': e.args
})
user_id = payload['id']
receiver = request.data.get('receiver')
iden = request.data.get('iden')
if iden == 0:
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id=user_id) & Q(receiverTea_id=receiver)).first()
else:
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id=user_id) & Q(receiverStu_id=Student.objects.filter(stu_num=receiver).first().id)).first()
chatBoxIsOpen.delete()
return Response({
'status': 200,
'msg': '返回成功',
})
except Exception as e:
return Response({
'status': 204,
'msg': '遇到了异常错误',
'err': e.args
})
# 打开聊天框
class openChatBoxView(APIView):
def post(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self, request)[0]
except Exception as e:
return Response({
'status': 403,
'msg': '未登录',
'err': e.args
})
user_id = payload['id']
receiver = request.data.get('receiver')
identity = request.data.get('identity')
if identity == 0:
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id=user_id) & Q(receiverTea_id=receiver)).first()
if not chatBoxIsOpen:
chatBoxIsOpen = ChatBoxIsOpen(senderTea_id=user_id,receiverTea_id=receiver)
else:
receiverStu_id = Student.objects.filter(stu_num=receiver).first().id
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id=user_id) & Q(receiverStu_id=receiverStu_id)).first()
if not chatBoxIsOpen:
chatBoxIsOpen = ChatBoxIsOpen(senderTea_id=user_id,receiverStu_id=receiverStu_id)
chatBoxIsOpen.save()
return Response({
'status': 200,
'msg': '返回成功',
})
except Exception as e:
return Response({
'status': 204,
'msg': '遇到了异常错误',
'err': e.args
})
# 搜索联系人
class searchContactView(APIView):
def get(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self, request)[0]
except Exception as e:
return Response({
'status': 403,
'msg': '未登录',
'err': e.args
})
user_id = payload['id']
receiver = request.GET['receiver']
identity = request.GET['identity']
# print(receiver,identity=='0')
# user = Teacher.objects.filter(id=username).first()
iden = 4
if identity == '0' and user_id == receiver:
iden = 3
elif identity == '0':
user = Teacher.objects.filter(id=receiver).first()
if not user:
iden = 4
else:
iden = 0
else:
user = Student.objects.filter(stu_num=receiver).first()
if not user:
iden = 4
else:
iden = 1
data = {
'identity': iden,
'receiver': receiver
}
return Response({
'status': 200,
'msg': '返回成功',
'data': data
})
except Exception as e:
return Response({
'status': 204,
'msg': '遇到了异常错误',
'err': e.args
})
class stuGetPrivateLetterListsView(APIView):
def get(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self, request)[0]
except Exception as e:
return Response({
'status': 403,
'msg': '未登录',
'err': e.args
})
user_id = payload['id']
username = payload['username']
# print(user_id,username)
data_list = []
for item in ChatBoxIsOpen.objects.filter(Q(senderStu_id=user_id) & Q(isOpen=1)):
msgList = []
msgList1 = []
msgList2 = []
receiver = item.receiverTea_id
identity = 0
if item.receiverStu_id != None:
receiver = Student.objects.filter(id=item.receiverStu_id).first().stu_num
identity = 1
for item2 in PrivateLetter.objects.filter(Q(senderStu_id=user_id) & Q(receiverStu_id=item.receiverStu_id)):
data = {
'id': item2.id,
'message': item2.message,
'time': str(item2.time.strftime('%Y-%m-%d %H:%M:%S')),
'new': item2.new,
'Ienter': 1 # 发送
}
msgList1.append(data)
for item2 in PrivateLetter.objects.filter(Q(senderStu_id=item.receiverStu_id) & Q(receiverStu_id=user_id)):
data = {
'id': item2.id,
'message': item2.message,
'time': str(item2.time.strftime('%Y-%m-%d %H:%M:%S')),
'new': item2.new,
'Ienter': 2 # 接收
}
msgList2.append(data)
# msgList.sort()
# print(len(msgList1))
else:
for item2 in PrivateLetter.objects.filter(Q(senderStu_id=user_id) & Q(receiverTea_id=receiver)):
data = {
'id': item2.id,
'message': item2.message,
'time': str(item2.time.strftime('%Y-%m-%d %H:%M:%S')),
'new': item2.new,
'Ienter': 1 # 发送
}
msgList1.append(data)
for item2 in PrivateLetter.objects.filter(Q(senderTea_id=receiver) & Q(receiverStu_id=user_id)):
data = {
'id': item2.id,
'message': item2.message,
'time': str(item2.time.strftime('%Y-%m-%d %H:%M:%S')),
'new': item2.new,
'Ienter': 2 # 接收
}
msgList2.append(data)
# msgList.sort()
len1 = len(msgList1)
len2 = len(msgList2)
i1 = 0
i2 = 0
for i in range(0,len1 + len2):
if i1 >= len1:
msgList.append(msgList2[i2])
i2+=1
elif i2 >= len2:
msgList.append(msgList1[i1])
i1+=1
elif msgList1[i1]['time'] < msgList2[i2]['time']:
msgList.append(msgList1[i1])
i1+=1
else:
msgList.append(msgList2[i2])
i2+=1
# print(msgList)
data = {
'id': item.id,
'receiver': receiver,
'msgList': msgList,
'name': receiver + str(identity),
'identity': identity
}
data_list.append(data)
# print(data_list)
return Response({
'status': 200,
'msg': '返回成功',
'data': data_list
})
except Exception as e:
return Response({
'status': 204,
'msg': '遇到了异常错误',
'err': e.args
})
class stuEnterPrivateLetterView(APIView):
def post(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self, request)[0]
except Exception as e:
return Response({
'status': 403,
'msg': '未登录',
'err': e.args
})
user_id = payload['id']
username = payload['username']
# print(user_id,username)
receiver = request.data.get('receiver')
message = request.data.get('message')
identity = request.data.get('identity')
if identity == 0:
privateLetter = PrivateLetter(senderStu_id=user_id,receiverTea_id=receiver,message=message)
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderTea_id=receiver)&Q(receiverStu_id=user_id)).first()
if not chatBoxIsOpen:
chatBoxIsOpen = ChatBoxIsOpen(senderTea_id=receiver,receiverStu_id=user_id)
else:
receiverStu_id = Student.objects.filter(stu_num=receiver).first().id
privateLetter = PrivateLetter(senderStu_id=user_id,receiverStu_id=receiverStu_id,message=message)
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id=receiverStu_id)&Q(receiverStu_id=user_id)).first()
if not chatBoxIsOpen:
chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=receiverStu_id,receiverStu_id=user_id)
privateLetter.save()
chatBoxIsOpen.save()
return Response({
'status': 200,
'msg': '发布私信成功',
})
except Exception as e:
return Response({
'status': 204,
'msg': '遇到了异常错误',
'err': e.args
})
# 获取最近联系人
class stuRecentContactsView(APIView):
def get(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self, request)[0]
except Exception as e:
return Response({
'status': 403,
'msg': '未登录',
'err': e.args
})
user_id = payload['id']
data_list = []
for item in PrivateLetter.objects.filter(senderStu_id=user_id):
if item.receiverTea_id != None and item.receiverTea_id != "":
identity = 0
receiver = item.receiverTea_id
else:
identity = 1
receiver = Student.objects.filter(id=item.receiverStu_id).first().stu_num
data = {
'receiver': receiver,
'identity': identity #老师:0;学生:1
}
data_list.append(data)
for item in PrivateLetter.objects.filter(receiverStu_id=user_id):
if item.senderTea_id != None and item.senderTea_id != "":
identity = 0
receiver = item.senderTea_id
else:
identity = 1
receiver = Student.objects.filter(id=item.senderStu_id).first().stu_num
data = {
'receiver': receiver,
'identity': identity #老师:0;学生:1
}
data_list.append(data)
lenData = len(data_list)
dict = {}
data_list1 = []
for i in range(lenData - 1,-1,-1):
if (data_list[i]['receiver'] + str(data_list[i]['identity'])) not in dict:
dict[data_list[i]['receiver'] + str(data_list[i]['identity'])] = '1'
data_list1.append(data_list[i])
return Response({
'status': 200,
'msg': '返回成功',
'data': data_list1
})
except Exception as e:
return Response({
'status': 204,
'msg': '遇到了异常错误',
'err': e.args
})
# 关闭聊天框
class stuCloseChatBoxView(APIView):
def post(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self, request)[0]
except Exception as e:
return Response({
'status': 403,
'msg': '未登录',
'err': e.args
})
user_id = payload['id']
receiver = request.data.get('receiver')
iden = request.data.get('iden')
if iden == 0:
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id=user_id) & Q(receiverTea_id=receiver)).first()
else:
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id=user_id) & Q(receiverStu_id=Student.objects.filter(stu_num=receiver).first().id)).first()
chatBoxIsOpen.delete()
return Response({
'status': 200,
'msg': '返回成功',
})
except Exception as e:
return Response({
'status': 204,
'msg': '遇到了异常错误',
'err': e.args
})
# 打开聊天框
class stuOpenChatBoxView(APIView):
def post(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self, request)[0]
except Exception as e:
return Response({
'status': 403,
'msg': '未登录',
'err': e.args
})
user_id = payload['id']
receiver = request.data.get('receiver')
identity = request.data.get('identity')
if identity == 0:
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id=user_id) & Q(receiverTea_id=receiver)).first()
if not chatBoxIsOpen:
chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=user_id,receiverTea_id=receiver)
else:
receiverStu_id = Student.objects.filter(stu_num=receiver).first().id
chatBoxIsOpen = ChatBoxIsOpen.objects.filter(Q(senderStu_id=user_id) & Q(receiverStu_id=receiverStu_id)).first()
if not chatBoxIsOpen:
chatBoxIsOpen = ChatBoxIsOpen(senderStu_id=user_id,receiverStu_id=receiverStu_id)
chatBoxIsOpen.save()
return Response({
'status': 200,
'msg': '返回成功',
})
except Exception as e:
return Response({
'status': 204,
'msg': '遇到了异常错误',
'err': e.args
})
# 搜索联系人
class stuSearchContactView(APIView):
def get(self, request, *args, **kwargs):
try:
try:
payload = JwtQueryParamAuthentication.authenticate(self, request)[0]
except Exception as e:
return Response({
'status': 403,
'msg': '未登录',
'err': e.args
})
user_id = payload['id']
username = payload['username']
receiver = request.GET['receiver']
identity = request.GET['identity']
# print(receiver,identity=='0')
# user = Teacher.objects.filter(id=username).first()
# 0:教师,1:学生,2:还未搜索,3:自己,4:用户不存在
iden = 4
if identity == '1' and username == receiver:
iden = 3
elif identity == '0':
user = Teacher.objects.filter(id=receiver).first()
if not user:
iden = 4
else:
iden = 0
else:
user = Student.objects.filter(stu_num=receiver).first()
if not user:
iden = 4
else:
iden = 1
data = {
'identity': iden,
'receiver': receiver
}
return Response({
'status': 200,
'msg': '返回成功',
'data': data
})
except Exception as e:
return Response({
'status': 204,
'msg': '遇到了异常错误',
'err': e.args
})
|
8,899 | a98d03b169b59704b3b592cee0b59f5389fd77b3 | #! /usr/bin/env python3
import sys
def stage_merge_checksums(
old_survey=None,
survey=None,
brickname=None,
**kwargs):
'''
For debugging / special-case processing, read previous checksums, and update them with
current checksums values, then write out the result.
'''
from collections import OrderedDict
cfn = old_survey.find_file('checksums', brick=brickname)
print('Old checksums:', cfn)
checksums = OrderedDict()
with open(cfn, 'r') as f:
for line in f.readlines():
words = line.split()
fn = words[1]
if fn.startswith('*'):
fn = fn[1:]
hashcode = words[0]
checksums[fn] = hashcode
# produce per-brick checksum file.
with survey.write_output('checksums', brick=brickname, hashsum=False) as out:
f = open(out.fn, 'w')
# Update hashsums
for fn,hashsum in survey.output_file_hashes.items():
print('Updating checksum', fn, '=', hashsum)
checksums[fn] = hashsum
# Write outputs
for fn,hashsum in checksums.items():
f.write('%s *%s\n' % (hashsum, fn))
f.close()
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--old-output', required=True,
help='"Old" output directory to read old checksum file from.')
parser.add_argument('-b', '--brick', required=True,
help='Brick name to run')
parser.add_argument(
'-P', '--pickle', dest='pickle_pat',
help='Pickle filename pattern, default %(default)s',
default='pickles/runbrick-%(brick)s-%%(stage)s.pickle')
parser.add_argument('-n', '--no-write', dest='write', default=True,
action='store_false')
parser.add_argument('--survey-dir', type=str, default=None,
help='Override the $LEGACY_SURVEY_DIR environment variable')
parser.add_argument('-d', '--outdir', dest='output_dir',
help='Set output base directory, default "."')
opt = parser.parse_args()
optdict = vars(opt)
old_output_dir = optdict.pop('old_output')
from legacypipe.runbrick import get_runbrick_kwargs
survey, kwargs = get_runbrick_kwargs(**optdict)
if kwargs in [-1, 0]:
return kwargs
import logging
lvl = logging.INFO
logging.basicConfig(level=lvl, format='%(message)s', stream=sys.stdout)
# tractor logging is *soooo* chatty
logging.getLogger('tractor.engine').setLevel(lvl + 10)
from legacypipe.survey import LegacySurveyData
old_survey = LegacySurveyData(survey_dir=old_output_dir,
output_dir=old_output_dir)
kwargs.update(old_survey=old_survey)
brickname = optdict['brick']
from astrometry.util.stages import CallGlobalTime, runstage
prereqs = {
'outliers': None,
}
prereqs.update({
'merge_checksums': 'outliers'
})
pickle_pat = optdict['pickle_pat']
pickle_pat = pickle_pat % dict(brick=brickname)
stagefunc = CallGlobalTime('stage_%s', globals())
stage = 'merge_checksums'
R = runstage(stage, pickle_pat, stagefunc, prereqs=prereqs, force=[stage],
write=[], **kwargs)
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.