index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
990,200 | 090a22514c4476415811b86530a294b86a1acae4 |
learning_step = 0
total_learning_step = 200
learning_candidates = 128
learning_selected = 64 # batch_size
learning_up = 2
learning_down = 1
learning_rate = 0.0001
lambda_starter = 1
fixed_lambda = -1 # -1 starts \lambda-curriculum
# FULL: FetchReach-v1, HandReach-v0, HandManipulateEggFull-v0
# ROTATION: HandManipulateBlockRotateXYZ-v0, HandManipulatePenRotate-v0
goal_type = "FULL"
|
990,201 | 7b9f2fe13aab2e800179bc32ef410996bcf53552 | import numpy as np
import tensorflow as tf
import pandas as pd
import div_tools as dt
import div_tools_np as dtnp
import matplotlib.pyplot as plt
from scipy.special import logit
import cPickle
from scipy.misc import imresize
from scipy.spatial.distance import pdist, squareform
def zeros(shape):
return tf.Variable(tf.zeros(shape))
def normal(shape, std_dev):
return tf.Variable(tf.random_normal(shape, stddev=std_dev))
def logdet_tf(S):
# Is this really not built in to TF??
ld = 2.0 * tf.reduce_sum(tf.log(tf.diag_part(tf.cholesky(S))))
return ld
def logdet_lower(L):
ld = tf.reduce_sum(tf.log(tf.abs(tf.diag_part(L))))
return ld
def lower_tf(X):
# Better way to do this??
return tf.matrix_band_part(X, -1, 0) - tf.matrix_band_part(X, 0, 0)
def learn_gauss_test(train_x, valid_x, batch_size=20):
#sigma_list_obs = np.median(squareform(squareform(pdist(train_x)))) ** 2
#sigma_list_obs = tf.Variable((sigma_list_obs,),
# trainable=False, dtype="float")
sigma_list_latent = tf.Variable((10.0,), trainable=False, dtype="float")
sigma_list_obs = tf.Variable((100.0,), trainable=False, dtype="float")
num_examples, D = train_x.shape
assert(valid_x.shape == train_x.shape) # Assume same for now
train_x_tf = tf.Variable(train_x, trainable=False, dtype="float")
valid_x_tf = tf.Variable(valid_x, trainable=False, dtype="float")
# Better to initialize so too small??
W_dummy = normal((D, D), 0.5 / D)
# W_tf = lower_tf(W_dummy) + tf.eye(D)
W_tf = tf.matrix_band_part(W_dummy, -1, 0)
b_tf = zeros((D,))
x = tf.placeholder(dtype="float", shape=[batch_size, D])
samples = tf.matmul(x, W_tf) + b_tf
# cost of the network, and optimizer for the cost
cost = tf.reduce_mean(dt.mmd_marg(samples, sigma_list_latent, unbiased=True))
# Warning: we must change this is W is no longer lower!
ldw = logdet_lower(W_tf)
# cost = tf.reduce_mean(dt.nll(samples, ldw))
optimizer = tf.train.AdamOptimizer().minimize(cost)
#samples_full = tf.matmul(train_x_tf, W_tf) + b_tf
samples_full = samples # Just use subset for high mem case
samples_valid = tf.matmul(valid_x_tf[:100, :], W_tf) + b_tf
gen_latent = normal((D, D), 1.0)
gen_obs = tf.matmul(gen_latent - b_tf, tf.matrix_inverse(W_tf))
#gen_chk = tf.matmul(gen_obs, W_tf) + b_tf
#gen_err = tf.reduce_max(tf.abs(gen_latent - gen_chk))
metric_train = dt.run_all_metrics(train_x_tf, samples_full, ldw,
gen_obs, gen_latent,
sigma_list_obs, sigma_list_latent)
metric_valid = dt.run_all_metrics(valid_x_tf, samples_valid, ldw,
gen_obs, gen_latent,
sigma_list_obs, sigma_list_latent)
# initalize all the variables in the model
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
W0 = W_tf.eval(session=sess)
b0 = b_tf.eval(session=sess)
X0 = np.dot(train_x, W0) + b0[None, :]
num_iterations = 5000
iteration_break = 100
train_hist = []
valid_hist = []
for i in xrange(num_iterations):
batch_indices = np.random.choice(num_examples, size=batch_size,
replace=False)
batch_x = train_x[batch_indices]
# print out the cost after every 'iteration_break' iterations
if i % iteration_break == 0:
curr_cost = sess.run(cost, feed_dict={x: batch_x})
print 'Cost at iteration ' + str(i+1) + ': ' + str(curr_cost)
# Re-calculate with np since TF sometimese has trouble here
logdet_W = np.linalg.slogdet(W_tf.eval(session=sess))[1]
train_hist.append({k: np.mean(v.eval(session=sess, feed_dict={x: batch_x}))
for k, v in metric_train.iteritems()})
train_hist[-1]['nll_cmp'] = -logdet_W
valid_hist.append({k: np.mean(v.eval(session=sess))
for k, v in metric_valid.iteritems()})
valid_hist[-1]['nll_cmp'] = -logdet_W
# optimize the network
sess.run(optimizer, feed_dict={x: batch_x})
W_opt = W_tf.eval(session=sess)
b_opt = b_tf.eval(session=sess)
X_opt = np.dot(train_x, W_opt) + b_opt[None, :]
print W_opt
print b_opt
return X0, X_opt, train_hist, valid_hist
def load_data(source_file, digit=None, reshuffle=False):
f = open(source_file, 'rb')
data = cPickle.load(f)
f.close()
if digit is None or digit == 'all':
x_train = data[0][0]
x_valid = data[1][0]
x_test = data[2][0]
else:
x_train = data[0][0][data[0][1] == digit, :]
x_valid = data[1][0][data[1][1] == digit, :]
x_test = data[2][0][data[2][1] == digit, :]
if reshuffle: # To guarantee iid
n_train, n_valid = x_train.shape[0], x_valid.shape[0]
x_all = np.concatenate((x_train, x_valid, x_test), axis=0)
# Multi-dimensional arrays are only shuffled along the first axis
np.random.shuffle(x_all)
x_train = x_all[:n_train, :]
x_valid = x_all[n_train:n_train + n_valid, :]
x_test = x_all[n_train + n_valid:, :]
return x_train, x_valid, x_test
def down_sample(X, old_shape, new_shape=(10, 10), jitter=False, warp=False):
assert(X.shape[1] == np.prod(old_shape))
assert(np.all(0.0 <= X) and np.all(X < 1.0))
epsilon = 1.0
v_range = 256.0
Y = np.zeros((X.shape[0], np.prod(new_shape)))
for ii in xrange(X.shape[0]):
X_sq = np.reshape(X[ii, :], old_shape)
Y[ii, :] = imresize(X_sq, size=new_shape).ravel()
assert(np.all(0 <= Y) and np.all(Y <= 255))
assert(epsilon <= np.min(np.diff(np.unique(Y.ravel()))))
if jitter:
Y = Y + epsilon * np.random.rand(Y.shape[0], Y.shape[1])
Y = Y / v_range # at least normalize
assert(np.all(0.0 <= Y) and np.all(Y < 1.0))
if warp:
assert(jitter)
Y = logit(Y)
assert(np.all(np.isfinite(Y)))
return Y
if __name__ == '__main__':
np.random.seed(57421100)
down_sample_size = 10
digit = None
# TODO take as argument
source_file = '../data/mnist.pkl'
x_train, x_valid, x_test = load_data(source_file, digit)
if down_sample_size is not None:
curr_shape = (28, 28)
new_shape = (down_sample_size, down_sample_size)
x_train = down_sample(x_train, curr_shape, new_shape, jitter=True)
#x_valid = down_sample(x_valid, curr_shape, new_shape, jitter=True)
R = learn_gauss_test(x_train[:10000, :], x_train[10000:20000, :], batch_size=20)
X0, X_opt, train_hist, valid_hist = R
df_valid = pd.DataFrame(valid_hist)
df_train = pd.DataFrame(train_hist)
|
990,202 | e1be1ed43a6c0b6c41fb86e1d2546ab50858009a | '''
内建函数
'''
str1 = '123456'
print(len(str1))#长度
print(max(str1))#最大值
print(min(str1))#最小值
#求和
# print(sum(str1))#报错 字符串里面的字符不能直接相加
list1 = []
for ch in str1:
list1.append(int(ch))
sum(list1) |
990,203 | a06d04daee932555d46805d43e35aeaa7b2b78d1 | #!/usr/local/env python
# -*- coding: utf-8 -*-
'''
Given a string containing just the characters '(', ')', '{', '}', '[' and ']', determine if the input string is valid.
The brackets must close in the correct order, "()" and "()[]{}" are all valid but "(]" and "([)]" are not.
'''
class Solution:
# @return a boolean
def isValid(self, s):
l = [0]
sym = ['(', '{', '[', ')', '}', ']']
for i in range(len(s)):
if s[i] in sym:
if s[i] in sym[:3]:
l.append(s[i])
else:
if l[-1]==sym[sym.index(s[i])-3]:
l.pop()
else:
return False
if len(l) != 1:
return False
else:
return True
t = Solution()
print t.isValid("()")
|
990,204 | fa04e9161db0914b5f7de74bbacf0ee43c9f8e1d | def sqlsentance():
print('mysql语句') |
990,205 | 6852bfec968675c66945c8457d351ce330a2a63a | import os
import csv
dirname = os.path.dirname(__file__)
csvpath = os.path.join(dirname, 'PyBank_data.csv')
outputpath = os.path.join(dirname, 'PyBank_Output.txt')
totalAmount = 0
totalMonths = 0
currentAmountChange = 0
nextAmount = 0
previousAmountChange = 0
changeAmount = 0
greatestMonth = ''
lowestAmountChange = 0
lowestMonth = ''
output = ''
with open(csvpath, 'r') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',')
csvheader = next(csvreader)
csvdata = list(csvreader)
for row in csvdata:
totalAmount = totalAmount + int(row[1])
currentAmountChange = int(row[1]) - nextAmount
nextAmount = int(row[1])
currentMonth = row[0]
if(currentAmountChange == nextAmount):
changeAmount = 0
else:
changeAmount = changeAmount + currentAmountChange
if previousAmountChange < currentAmountChange:
previousAmountChange = currentAmountChange
greatestMonth = currentMonth
if lowestAmountChange > currentAmountChange:
lowestAmountChange = currentAmountChange
lowestMonth = currentMonth
totalMonths = len(csvdata)
averageChange = round(changeAmount/(totalMonths-1), 2)
output = (
'Financial Analysis\n'
'----------------------------\n'
f'Total Months: {totalMonths}\n'
f'Total: ${totalAmount}\n'
f'Average Change: ${averageChange}\n'
f'Greatest Increase in Profits: {greatestMonth} (${previousAmountChange})\n'
f'Greatest Decrease in Profits: {lowestMonth} (${int(lowestAmountChange)})\n'
)
print(output)
with open(outputpath, 'w', newline='') as outputfile:
outputfilewriter = outputfile.write(output)
|
990,206 | 2f6d3320ad95a3471c1edf1a328a296d9c79d20b | from collections import namedtuple
import numpy as np
__all__ = ["extract_options", "make_cube", "inf_cube", "incube",
"uniform", "gaussian",
"individual", "collective", "rotative", "mh"]
Id = lambda x, *vargs, **kvargs: x
MESSAGE = "Starting point outside of the domain."
Roaming = namedtuple('Roaming', 'x walker objective')
def extract_options(options, prefix):
"""extract_options(dict(law=0, law_a=1, law_b=2, foo=3, foo_c=4), 'law') == {'a': 1, 'b': 2}"""
return {k.replace(prefix+'_', ""):options[k] for k in options if k.find(prefix+'_')==0}
def make_cube(d, start=-np.inf, stop=np.inf):
cube = np.zeros((d, 2))
cube[:, 0] = start
cube[:, 1] = stop
return cube
def inf_cube(d):
return make_cube(d)
def incube(x, cube): # whether in the open cube
x = np.array(x)
cube = np.array(cube)
return np.alltrue(cube[:, 0] < x) and np.alltrue(x < cube[:, 1])
def uniform(x, width=1, *vargs, seed=None, **kvargs):
x = np.array(x)
d = x.size
if np.isscalar(width):
width = width * np.ones(d)
if seed is not None:
np.random.seed(seed)
return x + width * (np.random.rand(d) - 0.5)
def gaussian(x, sigma=1, *vargs, seed=None, **kvargs):
x = np.array(x)
d = x.size
if np.isscalar(sigma):
sigma = sigma * np.ones(d)
if seed is not None:
np.random.seed(seed)
return x + sigma * (np.random.randn(d))
def individual(x, cube=None, law=uniform, *vargs, **options):
x = np.array(x)
d = x.size
if cube is None:
cube = inf_cube(d)
cube = np.array(cube)
if not incube(x, cube):
raise Exception(MESSAGE)
y = law(x, **extract_options(options, 'law'))
index = (y < cube[:, 0]) | (y > cube[:, 1])
y[index] = x[index]
return y, None
def collective(x, cube=None, cov=1, *vargs, seed=None, **kvargs): # multivariate-normal distribution
x = np.array(x)
d = x.size
if cube is None:
cube = inf_cube(d)
cube = np.array(cube)
if not incube(x, cube):
raise Exception(MESSAGE)
if np.isscalar(cov):
cov = cov * np.ones(d)
cov = np.array(cov)
if cov.ndim == 1:
cov = np.diag(cov)
if seed is not None:
np.random.seed(seed)
y = x + np.random.multivariate_normal(np.zeros(d), cov)
y = y if incube(y, cube) else x
return y, None
def rotative(x, t=0, state=None, cube=None, law=uniform, *vargs, seed=None, **options):
x = np.array(x, dtype='float64') # caution: must specify the datatype, in-place operation below
d = x.size
if cube is None:
cube = inf_cube(d)
cube = np.array(cube)
if not incube(x, cube):
raise Exception(MESSAGE)
if seed is not None and t==0:
np.random.seed(seed)
r = t % d
y = law([x[r]], **extract_options(options, 'law'))
if cube[r, 0] < y < cube[r, 1]:
x[r] = y
return x, state
def mh(x, proba, cube=None, move='individual', ascdes=(Id, Id), picked=range(100, 1000, 1), redu=np.mean, seed=None, **options):
x = np.array(x)
d = len(x)
if cube is None:
cube = inf_cube(d)
cube = np.array(cube)
if not incube(x, cube):
raise Exception(MESSAGE)
dispatcher = dict(individual=individual, collective=collective, rotative=rotative)
move = dispatcher.get(move, move)
rng = np.random.RandomState(seed)
N = picked[-1]
walker = np.zeros((N+1, d))
objective = np.zeros(N+1)
walker[0, :] = x
objective[0] = proba(x)
_x = ascdes[0](x)
px = proba(x)
_cube = np.apply_along_axis(ascdes[0], 0, cube)
state = None
for t in range(N):
_y, state2 = move(_x, t=t, state=state, cube=_cube, **extract_options(options, 'move'))
y = ascdes[1](_y)
py = proba(y)
if rng.rand() < py / px:
_x, x, px = _y, y, py
state = state2
walker[t+1, :] = x
objective[t+1] = px
return Roaming(redu(walker[picked, :], axis=0), walker, objective)
|
990,207 | 548abb9274610c0aa46703f47ed5f9eb58c5fdd7 | from __future__ import unicode_literals, print_function, division
from io import open
import unicodedata
import string
import re
import random
import pickle
import numpy as np
import scipy.spatial
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch import optim
import torch.nn.functional as F
use_cuda = torch.cuda.is_available()
device = ('cuda' if use_cuda else 'cpu')
print('Device : ', device)
class Lang:
def __init__(self, name):
self.name = name
self.word2index = {}
self.word2count = {}
self.index2word = {0: "SOS", 1: "EOS"}
self.n_words = 2 # Count SOS and EOS
def addSentence(self, sentence):
for word in sentence.split(' '):
self.addWord(word)
def addWord(self, word):
if word not in self.word2index:
self.word2index[word] = self.n_words
self.word2count[word] = 1
self.index2word[self.n_words] = word
self.n_words += 1
else:
self.word2count[word] += 1
# Maximum length of the sequences you are mapping
MAX_LENGTH = 20 # 50
MIN_LENGTH = 4 # 2
# Dull set (from RL-chatbot)
dull_set = ["I don't know what you're talking about.", "I don't know.",
"You don't know.", "You know what I mean.", "I know what you mean.",
"You know what I'm saying.", "You don't know anything."]
# start of sentence and end of sentence indices
SOS_token = 0
EOS_token = 1
input_lang = pickle.load(open("saved_pickle/input_lang_4_20.p", "rb"))
output_lang = pickle.load(open("saved_pickle/output_lang_4_20.p", "rb"))
pairs = pickle.load(open("saved_pickle/pairs_4_20.p", "rb"))
# input_lang.n_words, input_lang.word2index["froid"], input_lang.index2word[33]
input_lang.n_words, output_lang.n_words, output_lang.word2index["?"]
# Turn a Unicode string to plain ASCII, thanks to
# http://stackoverflow.com/a/518232/2809427
def unicodeToAscii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
)
def normalizeString(s):
s = unicodeToAscii(s.lower().strip())
s = re.sub(r"([.!?])", r" \1", s)
s = re.sub(r"[^a-zA-Z.!?]+", r" ", s)
s = re.sub("newlinechar", "", s)
return s
def readLangs(lang1, lang2, reverse=False):
print("Reading lines...")
# Read the file and split into lines
lines = open('../data/%s-%s.txt' % (lang1, lang2), encoding='utf-8').read().strip().split('\n')
# Split every line into pairs and normalize
pairs = [[normalizeString(s) for s in l.split('\t')] for l in lines]
# Reverse pairs, make Lang instances
if reverse:
pairs = [list(reversed(p)) for p in pairs]
input_lang = Lang(lang2)
output_lang = Lang(lang1)
else:
input_lang = Lang(lang1)
output_lang = Lang(lang2)
return input_lang, output_lang, pairs
def filterPair(p):
'''
Your Preferences here
'''
return len(p[0].split(' ')) < MAX_LENGTH and len(p[1].split(' ')) < MAX_LENGTH and len(
p[1].split(' ')) > MIN_LENGTH and "https://" not in p[1]
def filterPairs(pairs):
return [pair for pair in pairs if filterPair(pair)]
def prepareData(lang1, lang2, reverse=False, Filter=False):
input_lang, output_lang, pairs = readLangs(lang1, lang2, reverse)
print("Read %s sentence pairs" % len(pairs))
if Filter:
pairs = filterPairs(pairs)
print("Trimmed to %s sentence pairs" % len(pairs))
print("Counting words...")
for pair in pairs:
input_lang.addSentence(pair[0])
output_lang.addSentence(pair[1])
print("Counted words:")
print(input_lang.name, input_lang.n_words)
print(output_lang.name, output_lang.n_words)
return input_lang, output_lang, pairs
input_lang, output_lang, pairs = prepareData('input', 'output', reverse=False, Filter=True)
def indexesFromSentence(lang, sentence):
return [lang.word2index[word] for word in sentence.split(' ')]
def variableFromSentence(lang, sentence):
indexes = indexesFromSentence(lang, sentence)
indexes.append(EOS_token)
result = Variable(torch.LongTensor(indexes).view(-1, 1))
if use_cuda:
return result.to(device)
else:
return result
def variablesFromPair(pair, reverse):
input_variable = variableFromSentence(input_lang, pair[0])
target_variable = variableFromSentence(output_lang, pair[1])
if reverse:
return (target_variable, input_variable)
return (input_variable, target_variable)
import time
import math
def asMinutes(s):
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def timeSince(since, percent):
now = time.time()
s = now - since
es = s / (percent)
rs = es - s
return '%s (- %s)' % (asMinutes(s), asMinutes(rs))
import numpy as np
class EncoderRNN(nn.Module):
def __init__(self, input_size, hidden_size, num_layers=3, bidirectional=False):
super(EncoderRNN, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bidirectional = bidirectional
self.embedding = nn.Embedding(input_size, hidden_size)
self.gru = nn.GRU(hidden_size, hidden_size, num_layers, bidirectional=bidirectional)
if bidirectional:
num_directions = 2
else:
num_directions = 1
# make the initial hidden state learnable as well
hidden0 = torch.zeros(self.num_layers * num_directions, 1, self.hidden_size)
if use_cuda:
hidden0 = hidden0.to(device)
else:
hidden0 = hidden0
self.hidden0 = nn.Parameter(hidden0, requires_grad=True)
def forward(self, input, hidden):
embedded = self.embedding(input).view(1, 1, -1)
output = embedded
output, hidden = self.gru(output, hidden)
if self.bidirectional:
output = output[:, :, :self.hidden_size] + output[:, :, self.hidden_size:] # Sum bidirectional outputs
return output, hidden
def initHidden(self):
if use_cuda:
return self.hidden0.to(device)
else:
return self.hidden0
class AttnDecoderRNN(nn.Module):
def __init__(self, hidden_size, output_size, dropout_p=0.1, max_length=MAX_LENGTH, num_layers=3):
super(AttnDecoderRNN, self).__init__()
self.hidden_size = hidden_size
self.output_size = output_size
self.dropout_p = dropout_p
self.max_length = max_length
self.num_layers = num_layers
self.embedding = nn.Embedding(self.output_size, self.hidden_size)
self.attn = nn.Linear(self.hidden_size * 2, self.max_length)
self.attn_combine = nn.Linear(self.hidden_size * 2, self.hidden_size)
self.dropout = nn.Dropout(self.dropout_p)
self.gru = nn.GRU(self.hidden_size, self.hidden_size, num_layers=num_layers)
self.out = nn.Linear(self.hidden_size, self.output_size)
hidden0 = torch.zeros(self.num_layers, 1, self.hidden_size)
if use_cuda:
hidden0 = hidden0.to(device)
else:
hidden0 = hidden0
self.hidden0 = nn.Parameter(hidden0, requires_grad=True)
def forward(self, input, hidden, encoder_outputs):
embedded = self.embedding(input).view(1, 1, -1)
embedded = self.dropout(embedded)
attn_weights = F.softmax(
self.attn(torch.cat((embedded[0], hidden[0]), 1)), dim=1)
attn_applied = torch.bmm(attn_weights.unsqueeze(0),
encoder_outputs.unsqueeze(0))
output = torch.cat((embedded[0], attn_applied[0]), 1)
output = self.attn_combine(output).unsqueeze(0)
output = F.relu(output)
output, hidden = self.gru(output, hidden)
output = F.log_softmax(self.out(output[0]), dim=1)
return output, hidden, attn_weights
def initHidden(self):
if use_cuda:
return self.hidden0.to(device)
else:
return self.hidden0
def train(input_variable, target_variable, encoder, decoder, encoder_optimizer, decoder_optimizer,
criterion, max_length=MAX_LENGTH, teacher_forcing_ratio=0.5, bidirectional=False):
encoder_hidden = encoder.initHidden()
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
input_length = input_variable.size()[0]
target_length = target_variable.size()[0]
encoder_outputs = Variable(torch.zeros(max_length, encoder.hidden_size))
encoder_outputs = encoder_outputs.to(device) if use_cuda else encoder_outputs
loss = 0
for ei in range(input_length):
encoder_output, encoder_hidden = encoder(input_variable[ei], encoder_hidden)
encoder_outputs[ei] = encoder_output[0][0]
decoder_input = Variable(torch.LongTensor([[SOS_token]]))
decoder_input = decoder_input.to(device) if use_cuda else decoder_input
if bidirectional:
# sum the bidirectional hidden states into num_layers long cause the decoder is not bidirectional
encoder_hidden = encoder_hidden[:encoder.num_layers, :, :] + encoder_hidden[encoder.num_layers:, :, :]
decoder_hidden = encoder_hidden
use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False
if use_teacher_forcing:
# Teacher forcing: Feed the target as the next input
for di in range(target_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_outputs)
loss += criterion(decoder_output, target_variable[di])
decoder_input = target_variable[di] # Teacher forcing
else:
# Without teacher forcing: use its own predictions as the next input
for di in range(target_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_outputs)
topv, topi = decoder_output.data.topk(1)
ni = topi[0][0]
decoder_input = Variable(torch.LongTensor([[ni]]))
decoder_input = decoder_input.to(device) if use_cuda else decoder_input
loss += criterion(decoder_output, target_variable[di])
if ni == EOS_token:
break
loss.backward()
encoder_optimizer.step()
decoder_optimizer.step()
return loss.data[0] / target_length
def trainIters(encoder, decoder, n_iters, print_every=1000, plot_every=100,
learning_rate=0.01, teacher_forcing_ratio=0.5, bidirectional=False, reverse=False):
start = time.time()
plot_losses = []
print_loss_total = 0 # Reset every print_every
plot_loss_total = 0 # Reset every plot_every
encoder_optimizer = optim.SGD(encoder.parameters(), lr=learning_rate)
decoder_optimizer = optim.SGD(decoder.parameters(), lr=learning_rate)
training_pairs = [variablesFromPair(random.choice(pairs), reverse)
for i in range(n_iters)]
criterion = nn.NLLLoss()
for iter in range(1, n_iters + 1):
training_pair = training_pairs[iter - 1]
input_variable = training_pair[0]
target_variable = training_pair[1]
loss = train(input_variable, target_variable, encoder, decoder,
encoder_optimizer, decoder_optimizer, criterion,
teacher_forcing_ratio=teacher_forcing_ratio,
bidirectional=bidirectional)
print_loss_total += loss
plot_loss_total += loss
if iter % print_every == 0:
print_loss_avg = print_loss_total / print_every
print_loss_total = 0
print('%s (%d %d%%) %.4f' % (timeSince(start, iter / n_iters),
iter, iter / n_iters * 100, print_loss_avg))
if iter % plot_every == 0:
plot_loss_avg = plot_loss_total / plot_every
plot_losses.append(plot_loss_avg)
plot_loss_total = 0
hidden_size = 128 # 256
num_layers = 2 # 4
bidirectional = False # True
encoder = EncoderRNN(input_lang.n_words, hidden_size, num_layers=num_layers, bidirectional=bidirectional)
attn_decoder = AttnDecoderRNN(hidden_size, output_lang.n_words, dropout_p=0.1, num_layers=num_layers)
if use_cuda:
encoder = encoder.to(device)
attn_decoder = attn_decoder.to(device)
# encoder.load_state_dict(torch.load("saved_params/encoder.pth"))
# attn_decoder.load_state_dict(torch.load("saved_params/attn_decoder.pth"))
# encoder.load_state_dict(torch.load("saved_params/encoder_2L_h128_uni.pth"))
# attn_decoder.load_state_dict(torch.load("saved_params/attn_decoder_2L_h128_uni.pth"))
trainIters(encoder, attn_decoder, n_iters=10000, print_every=1000,
learning_rate=0.0001, teacher_forcing_ratio=0.75,
bidirectional=bidirectional) # last loss 4.3393, 16 secs per 100 iters, so ~ 22500 iters/hr
# If you want to save the results of your training
torch.save(encoder.state_dict(), "saved_params/encoder_2L_h128_uni.pth")
torch.save(attn_decoder.state_dict(), "saved_params/attn_decoder_2L_h128_uni.pth")
def evaluate(encoder, decoder, sentence, max_length=MAX_LENGTH,
bidirectional=bidirectional):
input_variable = variableFromSentence(input_lang, sentence)
input_length = input_variable.size()[0]
encoder_hidden = encoder.initHidden()
encoder_outputs = Variable(torch.zeros(max_length, encoder.hidden_size))
encoder_outputs = encoder_outputs.to(device) if use_cuda else encoder_outputs
for ei in range(input_length):
encoder_output, encoder_hidden = encoder(input_variable[ei],
encoder_hidden)
encoder_outputs[ei] = encoder_outputs[ei] + encoder_output[0][0]
decoder_input = Variable(torch.LongTensor([[SOS_token]])) # SOS
decoder_input = decoder_input.to(device) if use_cuda else decoder_input
if bidirectional:
# sum the bidirectional hidden states into num_layers long cause the decoder is not bidirectional
encoder_hidden = encoder_hidden[:encoder.num_layers, :, :] + encoder_hidden[encoder.num_layers:, :, :]
decoder_hidden = encoder_hidden
decoded_words = []
decoder_attentions = torch.zeros(max_length, max_length)
for di in range(max_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_outputs)
decoder_attentions[di] = decoder_attention.data
topv, topi = decoder_output.data.topk(1)
ni = topi[0][0]
if ni == EOS_token:
decoded_words.append('<EOS>')
break
else:
decoded_words.append(output_lang.index2word[ni])
decoder_input = Variable(torch.LongTensor([[ni]]))
decoder_input = decoder_input.to(device) if use_cuda else decoder_input
return decoded_words, decoder_attentions[:di + 1]
def evaluateRandomly(encoder, decoder, n=10, bidirectional=False):
for i in range(n):
pair = random.choice(pairs)
print('input from data >', pair[0])
print('output from data=', pair[1])
output_words, attentions = evaluate(encoder, decoder, pair[0], bidirectional=bidirectional)
output_sentence = ' '.join(output_words)
print('bot response <', output_sentence)
print('')
evaluateRandomly(encoder, attn_decoder, n=10, bidirectional=bidirectional)
bidirectional = False # True
hidden_size = 128 # 256
num_layers = 2 # 4
bidirectional = False # True
backward_encoder = EncoderRNN(output_lang.n_words, hidden_size, num_layers=num_layers, bidirectional=bidirectional)
backward_attn_decoder = AttnDecoderRNN(hidden_size, input_lang.n_words, dropout_p=0.1, num_layers=num_layers)
if use_cuda:
backward_encoder = backward_encoder.to(device)
backward_attn_decoder = backward_attn_decoder.to(device)
# backward_encoder.load_state_dict(torch.load("saved_params/backward_encoder_2L_h128_uni.pth"))
# backward_attn_decoder.load_state_dict(torch.load("saved_params/backward_attn_decoder_2L_h128_uni.pth"))
trainIters(backward_encoder, backward_attn_decoder, n_iters=10000, print_every=1000,
learning_rate=0.0001, teacher_forcing_ratio=0.75,
bidirectional=bidirectional, reverse=True)
# If you want to save the results of your training
torch.save(backward_encoder.state_dict(), "saved_params/backward_encoder_2L_h128_uni.pth")
torch.save(backward_attn_decoder.state_dict(), "saved_params/backward_attn_decoder_2L_h128_uni.pth")
hidden_size = 128 # 256
num_layers = 2 # 4
bidirectional = False # True
# Forward
forward_encoder = EncoderRNN(input_lang.n_words, hidden_size, num_layers=num_layers, bidirectional=bidirectional)
forward_attn_decoder = AttnDecoderRNN(hidden_size, output_lang.n_words, dropout_p=0.1, num_layers=num_layers)
# Backward
backward_encoder = EncoderRNN(output_lang.n_words, hidden_size, num_layers=num_layers, bidirectional=bidirectional)
backward_attn_decoder = AttnDecoderRNN(hidden_size, input_lang.n_words, dropout_p=0.1, num_layers=num_layers)
if use_cuda:
forward_encoder = forward_encoder.to(device)
forward_attn_decoder = forward_attn_decoder.to(device)
backward_encoder = backward_encoder.to(device)
backward_attn_decoder = backward_attn_decoder.to(device)
# Forward
# forward_encoder.load_state_dict(torch.load("saved_params/encoder_2L_h128_uni.pth"))
# forward_attn_decoder.load_state_dict(torch.load("saved_params/attn_decoder_2L_h128_uni.pth"))
# backward_encoder.load_state_dict(torch.load("saved_params/backward_encoder_2L_h128_uni.pth"))
# backward_attn_decoder.load_state_dict(torch.load("saved_params/backward_attn_decoder_2L_h128_uni.pth"))
def RLStep(input_variable, target_variable, encoder, decoder, criterion, max_length=MAX_LENGTH,
teacher_forcing_ratio=0.5, bidirectional=False):
encoder_hidden = encoder.initHidden()
input_length = input_variable.size()[0]
target_length = target_variable.size()[0]
encoder_outputs = Variable(torch.zeros(max_length, encoder.hidden_size))
encoder_outputs = encoder_outputs.to(device) if use_cuda else encoder_outputs
loss = 0
response = []
for ei in range(input_length):
encoder_output, encoder_hidden = encoder(input_variable[ei], encoder_hidden)
encoder_outputs[ei] = encoder_output[0][0]
decoder_input = Variable(torch.LongTensor([[SOS_token]]))
decoder_input = decoder_input.to(device) if use_cuda else decoder_input
if bidirectional:
# sum the bidirectional hidden states into num_layers long cause the decoder is not bidirectional
encoder_hidden = encoder_hidden[:encoder.num_layers, :, :] + encoder_hidden[encoder.num_layers:, :, :]
decoder_hidden = encoder_hidden
use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False
if use_teacher_forcing:
# Teacher forcing: Feed the target as the next input
for di in range(target_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_outputs)
loss += criterion(decoder_output, target_variable[di])
decoder_input = target_variable[di] # Teacher forcing
else:
# Without teacher forcing: use its own predictions as the next input
for di in range(target_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_outputs)
topv, topi = decoder_output.data.topk(1)
ni = topi[0][0]
decoder_input = Variable(torch.LongTensor([[ni]]))
decoder_input = decoder_input.to(device) if use_cuda else decoder_input
loss += criterion(decoder_output, target_variable[di])
# TODO: ni or decoder_output?
response.append(ni)
if ni == EOS_token:
break
return (loss, target_length, response)
def calculate_rewards(input_variable, target_variable,
forward_encoder, forward_decoder,
backward_encoder, backward_decoder,
criterion, dull_responses,
teacher_forcing_ratio, bidirectional):
ep_rewards = []
# ep_num are used to bound the number of episodes
# MAXIMUM ep = 10
ep_num = 1
responses = []
ep_input = input_variable
ep_target = target_variable
while (ep_num <= 10):
# First start with Forward model to generate the current response, given ep_input
# ep_target is empty if ep_num > 1
_, _, curr_response = RLStep(ep_input, ep_target,
forward_encoder, forward_decoder,
criterion,
teacher_forcing_ratio=teacher_forcing_ratio,
bidirectional=bidirectional)
## Break once we see (1) dull response, (2) the response is less than MIN_LENGTH, (3) repetition
if (len(curr_response) < MIN_LENGTH): # or (curr_response in responses) or (curr_response in dull_responses):
break
curr_response = Variable(torch.LongTensor(curr_response), requires_grad=False).view(-1, 1)
curr_response = curr_response.to(device) if use_cuda else curr_response
responses.append(curr_response)
## Ease of answering
# Use the forward model to generate the log prob of generating dull response given ep_input.
# Use the teacher_forcing_ratio = 1!
r1 = 0
for d in dull_responses:
forward_loss, forward_len, _ = RLStep(ep_input, d, forward_encoder, forward_decoder,
criterion,
teacher_forcing_ratio=1.1,
bidirectional=bidirectional)
if forward_len > 0:
# log (1/P(a|s)) = CE --> log(P(a | s)) = - CE
r1 -= forward_loss / forward_len
if len(dull_responses) > 0:
r1 = r1 / len(dull_responses)
## Information flow
# responses contains all the generated response by the forward model
r2 = 0
if (len(responses) > 2):
# vec_a --> h_(i) = responses[-3]
# vec_b --> h_(i+1)= responses[-1]
vec_a = responses[-3].data
vec_b = responses[-1].data
# length of the two vector might not match
min_length = min(len(vec_a), len(vec_b))
vec_a = vec_a[:min_length]
vec_b = vec_b[:min_length]
cos_sim = 1 - scipy.spatial.distance.cosine(vec_a, vec_b)
# -1 <= cos_sim <= 1
# TODO: how to handle negative cos_sim?
if cos_sim <= 0:
r2 = - cos_sim
else:
r2 = - np.log(cos_sim)
## Semantic Coherence
# Use the forward model to generate the log prob of generating curr_response given ep_input
# Use the backward model to generate the log prob of generating ep_input given curr_response
r3 = 0
forward_loss, forward_len, _ = RLStep(ep_input, curr_response,
forward_encoder, forward_decoder,
criterion,
teacher_forcing_ratio=teacher_forcing_ratio,
bidirectional=bidirectional)
backward_loss, backward_len, _ = RLStep(curr_response, ep_input,
backward_encoder, backward_decoder,
criterion,
teacher_forcing_ratio=teacher_forcing_ratio,
bidirectional=bidirectional)
if forward_len > 0:
r3 += forward_loss / forward_len
if backward_len > 0:
r3 += backward_loss / backward_len
## Add up all the three rewards
rewards = 0.25 * r1 + 0.25 * r2 + 0.5 * r3
ep_rewards.append(rewards)
## Set the next input
ep_input = curr_response
## TODO: what's the limit of the length? and what should we put as the dummy target?
ep_target = Variable(torch.LongTensor([0] * MAX_LENGTH), requires_grad=False).view(-1, 1)
ep_target = ep_target.to(device) if use_cuda else ep_target
# Turn off the teacher forcing ration after first iteration (since we don't have a target anymore).
teacher_forcing_ratio = 0
ep_num += 1
# Take the mean of the episodic rewards
r = 0
if len(ep_rewards) > 0:
r = np.mean(ep_rewards)
return r
def trainRLIters(forward_encoder, forward_decoder, backward_encoder, backward_decoder, dull_responses, n_iters,
print_every=1000, plot_every=100, learning_rate=0.01, teacher_forcing_ratio=0.5,
bidirectional=False):
start = time.time()
plot_losses = []
print_loss_total = 0 # Reset every print_every
plot_loss_total = 0 # Reset every plot_every
# Optimizer
forward_encoder_optimizer = optim.SGD(forward_encoder.parameters(), lr=learning_rate)
forward_decoder_optimizer = optim.SGD(forward_decoder.parameters(), lr=learning_rate)
backward_encoder_optimizer = optim.SGD(backward_encoder.parameters(), lr=learning_rate)
backward_decoder_optimizer = optim.SGD(backward_decoder.parameters(), lr=learning_rate)
training_pairs = [variablesFromPair(random.choice(pairs), reverse=False)
for i in range(n_iters)]
criterion = nn.NLLLoss()
for iter in range(1, n_iters + 1):
training_pair = training_pairs[iter - 1]
input_variable = training_pair[0]
target_variable = training_pair[1]
## Manually zero out the optimizer
forward_encoder_optimizer.zero_grad()
forward_decoder_optimizer.zero_grad()
backward_encoder_optimizer.zero_grad()
backward_decoder_optimizer.zero_grad()
# Forward
forward_loss, forward_len, _ = RLStep(input_variable, target_variable,
forward_encoder, forward_decoder,
criterion,
teacher_forcing_ratio=teacher_forcing_ratio,
bidirectional=bidirectional)
## Calculate the reward
reward = calculate_rewards(input_variable, target_variable,
forward_encoder, forward_decoder,
backward_encoder, backward_decoder,
criterion, dull_responses,
teacher_forcing_ratio, bidirectional)
## Update the forward seq2seq with its loss scaled by the reward
loss = forward_loss * reward
loss.backward()
forward_encoder_optimizer.step()
forward_decoder_optimizer.step()
print_loss_total += (loss.data[0] / forward_len)
plot_loss_total += (loss.data[0] / forward_len)
if iter % print_every == 0:
print_loss_avg = print_loss_total / print_every
print_loss_total = 0
print('%s (%d %d%%) %.4f' % (timeSince(start, iter / n_iters),
iter, iter / n_iters * 100, print_loss_avg))
if iter % plot_every == 0:
plot_loss_avg = plot_loss_total / plot_every
plot_losses.append(plot_loss_avg)
plot_loss_total = 0
dull_responses = [variableFromSentence(output_lang, d) for d in dull_set]
dull_responses = []
trainRLIters(forward_encoder, forward_attn_decoder, backward_encoder, backward_attn_decoder, dull_responses,
n_iters=10000, print_every=100, learning_rate=0.0001,
teacher_forcing_ratio=0.5, bidirectional=bidirectional)
torch.save(forward_encoder.state_dict(), "saved_params/rl_forward_encoder_2L_h128_uni.pth")
torch.save(forward_attn_decoder.state_dict(), "saved_params/rl_forward_attn_decoder_2L_h128_uni.pth")
evaluateRandomly(forward_encoder, forward_attn_decoder, n=10, bidirectional=bidirectional)
|
990,208 | f23f3f977d93e2077fc957386e37ede3341e2752 | # Tool to collect generated files for deployment
import argparse
import collections
import json
import os
import re
import subprocess
import sys
parser = argparse.ArgumentParser(description='Collect files for deployment')
parser.add_argument('staticprep_files', nargs='+', metavar='STATICPREP_FILES', help='files produced by staticprep')
parser.add_argument('--extradeploy', nargs='*', metavar='EXTRADEPLOY', help='additional files to deploy')
args = parser.parse_args()
# Deployment info. Key is a source file, values are additional generated files
deploy_files = {}
make_dependency_re = re.compile('update target \'(.*)\' due to: (.*)')
# Filter out untracked files
def filter_untracked(files):
tracked = []
for f in files:
git_process = subprocess.Popen(' '.join (['git', 'ls-files', '--error-unmatch', f]), shell=True,
stdout=open(os.devnull, 'w'), stderr=subprocess.STDOUT)
git_process.wait()
if git_process.returncode == 0:
tracked.append(f)
return tracked
# Recursively determine dependencies
def resolve_dependencies(dep_tree, target):
pending_deps = collections.deque()
seen_deps = set()
pending_deps.extend(dep_tree.get(target, []))
while len(pending_deps) > 0:
next_dep = pending_deps.popleft()
if not next_dep in seen_deps:
seen_deps.add(next_dep)
pending_deps.extend(dep_tree.get(next_dep, []))
return seen_deps
# Collect all files the staticprep_files depend on using make
for prep_file_name in args.staticprep_files:
dep_tree = {}
make_process = subprocess.Popen(' '.join (['make', '-s', '-n', '-B', '--trace', prep_file_name]),
shell=True, stdout=subprocess.PIPE)
for line in make_process.stdout:
line = line.decode('utf-8').strip()
if line.startswith("Makefile:"):
# Extract part after first ' '
message = line.split(' ', 1)[1]
dep_match = make_dependency_re.match(message)
if dep_match:
deps = set(dep_match.group(2).split())
dep_tree.setdefault(dep_match.group(1), set()).update(deps)
# Inject 'dependency' of extradeploy files on produced file
for extradeploy in args.extradeploy:
dep_tree.setdefault(extradeploy, set()).add(prep_file_name)
# Read generated files, add as dependencies
generated_files = {}
with open(prep_file_name + '.gen') as prep_file_generated:
generated_files = json.load(prep_file_generated)
for generated, deps in generated_files.items():
dep_tree.setdefault(generated, set()).update(deps)
# Determine tracked files
all_deps = set()
for file, deps in dep_tree.items():
all_deps = all_deps | deps
tracked_deps = filter_untracked(all_deps)
# Generate deployment info
for to_deploy in list(generated_files.keys()) + args.extradeploy:
deploy_dep = resolve_dependencies(dep_tree, to_deploy)
deploy_dep = filter(lambda f: f in tracked_deps, deploy_dep)
deploy_files.setdefault(to_deploy, []).extend(deploy_dep)
# Write out json
json.dump(deploy_files, sys.stdout, indent=2)
|
990,209 | f420b6dc058a92f5999c56fb70e910e792e8bd48 | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 16 18:12:19 2018
@author: 18665
"""
from saveSymbolList import filename
def get_all_symbol_detail():
import json
with open(filename,'r') as f:
symbolList = json.load(f)
return symbolList
def symbol_replace(symbol):
if 'usd' in symbol:
return symbol.replace('usd','_usd')
elif 'eth' in symbol and 'btc' not in symbol:
if 'usd' not in symbol:
return symbol.replace('eth','_eth')
elif 'btc' in symbol:
if 'usd' not in symbol:
return symbol.replace('btc','_btc')
if __name__ == '__main__':
symbolList = get_all_symbol_detail()
dicts = {}
for s in symbolList:
print(s)
ss = symbol_replace(s)
print(ss)
dicts[s] = ss |
990,210 | 5ff53f2d59085c7e937e07fef4b256f0ac78d888 |
__author__ = 'Ловягин Даниил Анатольевич'
# Задача-1: Ввести ваше имя и возраст в отдельные переменные,
# вычесть из возраста 18 и вывести на экран в следующем виде:
# "Василий на 2 года/лет больше 18"
# по желанию сделать адаптивный вывод, то есть "на 5 лет больше", "на 3 года меньше" и.т.д.
# TODO: код пишем тут...
user_name = str(input('Введите Ваше Имя:'))
user_age = int(input('Введите Ваш возраст:'))
a = user_age - 18
if user_age > 18:
print(user_name, 'на', a, 'лет больше 18')
else:
print(user_name, 'на', abs(a), 'лет меньше 18')
# Задача-2: Исходные значения двух переменных запросить у пользователя.
# Поменять значения переменных местами. Вывести новые значения на экран.
# Подсказка:
# * постарайтесь сделать решение через дополнительную переменную
# или через арифметические действия
# Не нужно решать задачу так:
# print("a = ", b, "b = ", a) - это неправильное решение!
# TODO: код пишем тут...
a = int(input('Первое число'))
b = int(input('Второе число'))
a = a + b
b = a - b
a = a - b
print('Новое первое число', a)
print('Новое второе число', b)
# Задача-3: Напишите программу, вычисляющую корни квадратного уравнения вида
# ax² + bx + c = 0.
# Коэффициенты уравнения вводятся пользователем.
# Для вычисления квадратного корня воспользуйтесь функцией sqrt() модуля math:
# import math
# math.sqrt(4) - вычисляет корень числа 4
# TODO: код пишем тут...
from math import *
a = float(input('Введите число а - '))
b = float(input('Введите число b - '))
c = float(input('Введите число c - '))
d = b ** 2 - 4 * a * c #дискриминант
print('Дискриминант =',round(d,2))
if d < 0:
print('Корней нет')
elif d == 0:
x = - b / 2 * a
print('Корень один -',round(x,2))
else:
x1 = (-b + sqrt(d)) / (2 * a)
x2 = (-b - sqrt(d)) / (2 * a)
print('Два корня :','x1=',round(x1,2), 'x1=',round(x2,2))
|
990,211 | e5928fc7f8aafaaa490e6996df9e918898bef24b | # -*- coding: utf-8 -*-
'''
'''
class Json(object):
pass |
990,212 | 6be1d9db7a19f27de362a10d02bf0914718aa585 | #!/usr/bin/env python
from ast import *
import ast, md5
import sys
import nodeTransformer
import os.path, os
import zipfile
from nodeTransformer import str2ast, AstVariable
from pyModule import *
from script2script.tools import echo
#TODO don't forget the With ast stuff
#TODO for the future, add a list of import to include (for the __import__('name') def)
#TODO for the future, add a list of import to not include (because they are in a if)
#TODO do a PythonModule for .zip files
#TODO do a test on pyc and pyo files, saying it don't take them
#Imports variations :
# import X => ok, have a variable = module object
# import X.Y => ok, have a variable = module object
# import X.Y as titi => ok, have a variable = module object
# import X as titi => ok, have a variable = module object
# from X import * => not ok, TODO when we will have dir()
# from X import a, b, c => ok, variabel = module + assignation
# from X import a as toto => ok, variabel = module + assignation
# X = import('X')
# X = __import__('X')
# from . import echo
# from .. import formats
# from ..filters import equalizer
from script2script.lang.python.ast2simple.parsePython import ast2str
class MoreImport(nodeTransformer.NodeTransformer):
"""
Transform each import to be ready for transforming to Simple
The visit function of MoreImport should have the '__main__' module
"""
def __init__(self, paths=sys.path):
"""
paths should not contain the '' path,
it should be changed by the current path
"""
self.paths = paths
self.module = PythonModuleList()
for path in paths:
self.module.addModule( PythonModuleOnDisk(path) )
self.moduleResolver = SimpleModuleResolver(self.module)
self.needed_modules = {}
def addModule(self, name):
"""
add a module to the module list
if the module is already here, don't add it
the name should be the absolute name of the module
"""
if name in self.needed_modules: return
self.needed_modules[name] = True #avoid circular references
module = self.moduleResolver.find(name)
ast = ast.parse(module.getContent(), module.getPath(), 'exec').body
self.needed_modules[name] = ImportOneModule(self.getModule(name), ast, self).getModuleBody()
@echo
def getAbsName(self, path, fromPath):
res = self.moduleResolver.find(path, fromPath)
return '.'.join(res.getNames()) if res else None
def visit(self, node):
assert isinstance(node, ast.Module)
module = PythonModuleStatic('', '', '__main__')
node.body = ImportOneModule(module, node.body, self).getModuleBody()
#do stuffs
return node
class ImportOneModule(nodeTransformer.NodeTransformer):
"""
Load a module body list and return it with :
- __name__ and __file__ variable set
- Module object construct and variable affectation
Return a list, the first element is the module affectation :
genModuleVar = type('Module', (), {})
"""
def __init__(self, module, ast, moreImportObject):
self._module = module
self._ast = ast
self._moreImportObject = moreImportObject
self._moduleVar = self.genVar('moduleObject')
def getModuleBody(self):
ast = [
Assign([Name('__name__', Store())], Str(self._module.getName())),
Assign([Name('__file__', Store())], Str(self._module.getPath() or '')),
] + self._ast
#replace the import statements
res = str2ast('from importlib import import_module') + self.visit(ast)
#affect values to the module
moduleAffectation = str2ast("moduleObject = type('Module', (), {})()", moduleObject = self._moduleVar.name)
res = moduleAffectation + ModuleAffectation(self._moduleVar).visit(res)
print ast2str(res)
return res
def visit_Import(self, node):
res = []
for aliaselem in node.names:
name, asname = aliaselem.name, aliaselem.asname
if asname:
res += self.genImportWithAsName(name, asname)
else:
res += self.genImportWithoutAsName(name)
return res
def genImportWithAsName(self, name, asname):
"""
Import, the imported element is the last of the list toto.titi.tutu => tutu
"""
name = self._moreImportObject.getAbsName(name, '.'.join(self._module.getNames()))
pass
def genImportWithoutAsName(self, name):
"""
Import, the imported element is the first of the list toto.titi.tutu => toto
"""
objectName = name.split('.')[0]
absName = self._moreImportObject.getAbsName(name, '.'.join(self._module.getNames()))
#if absName is None:
# return str2ast('raise ImportError("no module named %s")' % name)
return str2ast("name = import_module('%s', None)" % absName, name = objectName)
class ModuleAffectation(nodeTransformer.NodeTransformer):
"""
For each affectation in the ast,
create a do another one for the variable name in the form
toto = 'tutu'
myVar.toto = toto
It's used in the module part to have the module.var acess
It's not used for import ast element
"""
#TODO to complete
def __init__(self, var):
NodeTransformer.__init__(self)
self._var = var if isinstance(var, AstVariable) else AstVariable(var)
def _genAffect(self, name):
"""
generate affectation value
"""
return [self._var.assign( ast.Name(name, Load()), name)]
@staticmethod
def getAllNames(eList):
"""
Return all the names inside a tuple/list assignment
Don't return object attribute assigment
"""
if isinstance(eList, ast.Name): return [eList.id]
if isinstance(eList, ast.List) or isinstance(eList, ast.Tuple):
res = []
for e in eList.elts:
res += ModuleAffectation.getAllNames(e)
return res
#in attribute case
return []
def visit_Assign(self, node):
assert len(node.targets) == 1
assert isinstance(node.targets[0], Name)
names = []
for t in node.targets:
names += self.getAllNames(t)
resAst = [node]
for name in names:
resAst += self._genAffect(name)
return resAst
def visit_ClassDef(self, node):
return [node] + self._genAffect(node.name)
def visit_FunctionDef(self, node):
return [node] + self._genAffect(node.name)
#class NoMoreImport(nodeTransformer.NodeTransformer):
# init_code = """
# class Module(object): pass
#
# class DictModule(object):
# def __init__(self):
# self.content = {}
#
# def add(self, name, fct):
# self.content[name] = (False, fct)
#
# #TODO do it for recursive module
# def getModule(self, name):
# if name not in self.content:
# raise ImportError("No module named %s" % name)
#
# l, v = self.content[name]
# if l: return v #v is the module
#
# m = Module()
# self.content[name] = (True, m)
# v(m)
# return m
#
# dictModule = DictModule()
# __import__ = dictModule.getModule
# """
#
# def __init__(self, moduleRef, curPath=''):
# NodeTransformer.__init__(self)
#
# self.dict_imports = {} #import in a dict form
#
# self._moduleRef = moduleRef
# self._curPath = curPath
#
#
#
# def main_visit(self, node):
# #the node should be a module
# res = self.visit(node)
#
# dictModule_klass = self.genVar('DictModule')
# dictModule_inst = self.genVar('dictModule')
# before = str2ast(self.init_code, DictModule=dictModule_klass.name, dictModule=dictModule_inst.name)
#
#
# for k, (vname, vast) in self.dict_imports.iteritems():
# before += vast
# before += str2ast("dictModule.add('%s', moduleFctVar)" % k, dictModule = dictModule_inst.name, moduleFctVar=vname.name)
# pass
#
# node.body = before + node.body
# return node
#
#
# def genImport(self, module, fctName):
# """
# Generate the import function for this name
# The import function is in the form :
# def fctName(genVar_398_module):
# genVar_398_module.__file__ = "/my/module/file/path/toto/tutu"
# genVar_398_module.__name__ = "toto.tutu"
#
# It dont' return a module, just affection values to the parameter module
# """
#
# contentAst = ast.parse(module.getContent(), module.getPath(), 'exec').body
# moduleVar = self.genVar('module')
#
# contentAst = [
# moduleVar.assign(Str(module.getPath()), '__file__'),
# moduleVar.assign(Str('.'.join(module.getNames()[1:])), '__name__'), #TODO remove the [1:]
# ] + ModuleAffectation(moduleVar.name).visit(contentAst)
#
# arguments = ast.arguments([moduleVar.param()], None, None, [])
# return [FunctionDef(fctName, arguments , contentAst, [] )]
#
#
# def addImport(self, name):
# """
# from a name (and searching the reference of the current object)
# return an absolute name for the import.
# Put the module result into the inter dict
# """
#
# resModule = self._moduleRef.find(name, self._curPath)
#
# fctName = self.genVar('importfct')
#
# if resModule is None: #error case
# codeAst = str2ast("""
# def fctName(moduleVar):
# raise ImportError("No module named %s")
# """ % name, fctName = fctName.name)
#
# self.dict_imports[name] = (fctName, codeAst)
#
# return name
#
# newName = '.'.join(resModule.getNames()[1:]) #TODO remove the [1:]
# codeAst = self.genImport(resModule, fctName.name)
#
# self.dict_imports[newName] = (fctName, codeAst)
#
# return newName
#
#
# def visit_Import(self, node):
# res = []
#
# for alias in node.names:
# name = alias.name
# asname = alias.asname or name
#
# #2 cases
# # - import toto.tutu.titi as tralala => this is tralal = toto.tutu.titi => lo see after
# # - import toto.tutu.titi => toto = toto, toto.tutu = toto.tutu, toto.tutu.titi = toto.tutu.titi
# absName = self.addImport(name)
#
# res += [
# str2ast("asname = __import__('%s')" % absName, asname = asname)
# ]
#
# return res
#
#
#
#
#__EOF__
|
990,213 | 77f50205e818787a7d55bb461e3b837dea6e42c3 | class Employee:
def __init__(self, username=""):
self.username = username
def json(self):
return {
"username": self.username,
}
def __repr__(self):
return str(self.json())
@staticmethod
def json_parse(json):
employee = Employee()
employee.username = json["username"]
return employee
|
990,214 | 7e6443b35948109196a32bc78c27a1f419ff3270 | cube = np.array([0.01, 0.01, 0.01]) |
990,215 | d3edc0ebaaf5f58d3280c5651a67e788967d6bac | from inventory import Inventory
class Player():
def __init__(self, name, room, score=0):
self.name = name
self.room = room
self.score = score
Inventory.__init__(self, self.name)
def changeRoom(self, direction):
next_room = self.room.getRoomInDirection(direction)
if next_room == None:
print(f"\nLocation unchanged. There is nothing {direction}.")
else:
self.room = next_room
print(f'\nYou are currently: \n{self.room}')
def lookRoom(self, direction):
next_room = self.room.getRoomInDirection(direction)
if next_room == None:
print(f"\nThere is nothing {direction}.")
else:
print(f'\nGoing {direction} will take you to: \n{next_room}')
def playerItems(self):
Inventory.showItems(self, self.name)
self.getScore()
def addItem(self, newItem):
newPoints = Inventory.addItem(self, newItem)
self.score = self.score + newPoints
Inventory.removePoints(self, newItem)
Inventory.showItems(self, self.name)
self.getScore()
def dropItem(self, newItem):
Inventory.dropItem(self, newItem)
def getScore(self):
print(f"\n Current Score: {self.score}")
def getItem(self, name):
return Inventory.getItem(self, name) |
990,216 | 6c43494e84d1ce80807a9565cd11a125218a0c5c | import unittest
import numpy as np
import torch
from numpy.testing import assert_equal
from attack import class_ensemble
class TestUtils(unittest.TestCase):
def test_class_ensemble(self):
a = torch.tensor([[3, 2, 1], [1, 2, 1], [3, 2, 2]])
a = a.numpy()
print(a.shape, a)
a = np.array(a)
idx2 = class_ensemble(a)
print(idx2)
assert_equal(idx2.numpy(), [3, 2, 1])
def test_class_ensemble2(self):
m = np.array([[0, 2, 1], [1, 1, 0], [1, 2, 0]])
idx2 = class_ensemble(m)
print(idx2)
assert_equal(idx2.numpy(), [1, 2, 0])
if __name__ == '__main__':
unittest.main() |
990,217 | 2434f48368a348608edaf65da2d616bf96d025e8 | from collections import deque
import sys
input = sys.stdin.readline
for _ in range(int(input())):
n,m=map(int,input().split())
nodes=[[] for _ in range(n)]
for _ in range(m):
a,b=map(int,input().split())
nodes[a-1].append(b-1)
nodes[b-1].append(a-1)
q = deque([0])
vstd=[False for _ in range(n)]
c=0
vstd[0]=True
while q:
for _ in range(len(q)):
curr = q.pop()
for i in nodes[curr]:
if not vstd[i]:
vstd[i]=True
q.append(i)
c+=1
print(c)
|
990,218 | 2ac1a5d9aa2346538268065b138c8d854502d4b4 | from sklearn.metrics import mean_squared_error
from math import sqrt
class RMSEEvaluator:
def __init__(self, decoratee=None):
self._decoratee = decoratee
def evaluate(self, matrixA, matrixB):
# for i, el in enumerate(matrixA):
# print("{0:.2f}|{1:.2f}".format(el, matrixB[i]))
if self._decoratee:
matrixA = self._decoratee.convert(matrixA)
matrixB = self._decoratee.convert(matrixB)
return [(sqrt(mean_squared_error(matrixA, matrixB)), "", "")]
|
990,219 | d220eb94d502800f072588376bd2dc069773eaf9 | #!/usr/bin/env python2
# -*- coding: utf8 -*-
#
# Copyright (c) 2017 unfoldingWord
# http://creativecommons.org/licenses/MIT/
# See LICENSE file for details.
#
# Contributors:
# Jesse Griffin <jesse@unfoldingword.org>
"""
This script was used to convert the MD tN files to TSV format.
Ran on 2018-05-31, see https://git.door43.org/unfoldingWord/en_tn/pulls/1064
"""
import re
import glob
import codecs
import string
import random
# DONE
# * Clean intro files
# * remove ## Links (vim)
# * sed -i 's/ #*$//' */*/intro.md
# * Rename intro files to 00-intro so they sort right
# * $ for x in `find * -type f -name 'intro.md'`; do mv $x ${x%%intro.md}00.md; done
id_list = []
linkre = re.compile(ur'\[\[(.*?)\]\]', re.UNICODE)
books_nums = {}
books = {
u'GEN': [ u'Genesis', '01' ],
u'EXO': [ u'Exodus', '02' ],
u'LEV': [ u'Leviticus', '03' ],
u'NUM': [ u'Numbers', '04' ],
u'DEU': [ u'Deuteronomy', '05' ],
u'JOS': [ u'Joshua', '06' ],
u'JDG': [ u'Judges', '07' ],
u'RUT': [ u'Ruth', '08' ],
u'1SA': [ u'1 Samuel', '09' ],
u'2SA': [ u'2 Samuel', '10' ],
u'1KI': [ u'1 Kings', '11' ],
u'2KI': [ u'2 Kings', '12' ],
u'1CH': [ u'1 Chronicles', '13' ],
u'2CH': [ u'2 Chronicles', '14' ],
u'EZR': [ u'Ezra', '15' ],
u'NEH': [ u'Nehemiah', '16' ],
u'EST': [ u'Esther', '17' ],
u'JOB': [ u'Job', '18' ],
u'PSA': [ u'Psalms', '19' ],
u'PRO': [ u'Proverbs', '20' ],
u'ECC': [ u'Ecclesiastes', '21' ],
u'SNG': [ u'Song of Solomon', '22' ],
u'ISA': [ u'Isaiah', '23' ],
u'JER': [ u'Jeremiah', '24' ],
u'LAM': [ u'Lamentations', '25' ],
u'EZK': [ u'Ezekiel', '26' ],
u'DAN': [ u'Daniel', '27' ],
u'HOS': [ u'Hosea', '28' ],
u'JOL': [ u'Joel', '29' ],
u'AMO': [ u'Amos', '30' ],
u'OBA': [ u'Obadiah', '31' ],
u'JON': [ u'Jonah', '32' ],
u'MIC': [ u'Micah', '33' ],
u'NAM': [ u'Nahum', '34' ],
u'HAB': [ u'Habakkuk', '35' ],
u'ZEP': [ u'Zephaniah', '36' ],
u'HAG': [ u'Haggai', '37' ],
u'ZEC': [ u'Zechariah', '38' ],
u'MAL': [ u'Malachi', '39' ],
u'MAT': [ u'Matthew', '41' ],
u'MRK': [ u'Mark', '42' ],
u'LUK': [ u'Luke', '43' ],
u'JHN': [ u'John', '44' ],
u'ACT': [ u'Acts', '45' ],
u'ROM': [ u'Romans', '46' ],
u'1CO': [ u'1 Corinthians', '47' ],
u'2CO': [ u'2 Corinthians', '48' ],
u'GAL': [ u'Galatians', '49' ],
u'EPH': [ u'Ephesians', '50' ],
u'PHP': [ u'Philippians', '51' ],
u'COL': [ u'Colossians', '52' ],
u'1TH': [ u'1 Thessalonians', '53' ],
u'2TH': [ u'2 Thessalonians', '54' ],
u'1TI': [ u'1 Timothy', '55' ],
u'2TI': [ u'2 Timothy', '56' ],
u'TIT': [ u'Titus', '57' ],
u'PHM': [ u'Philemon', '58' ],
u'HEB': [ u'Hebrews', '59' ],
u'JAS': [ u'James', '60' ],
u'1PE': [ u'1 Peter', '61' ],
u'2PE': [ u'2 Peter', '62' ],
u'1JN': [ u'1 John', '63' ],
u'2JN': [ u'2 John', '64' ],
u'3JN': [ u'3 John', '65' ],
u'JUD': [ u'Jude', '66' ],
u'REV': [ u'Revelation', '67' ],
}
def getOLQuote(b, c, v, glquote):
'''Eventually, look at alignment data and return occurrence num and orig quote'''
# For now, return 0 to indicate not found
return [u'0', u'']
def getNoteID():
'''Returns a unique 4 character alpha-numberic string'''
while True:
ID = ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(4))
if not any(char.isdigit() for char in ID): continue
if not any(char.isalpha() for char in ID): continue
if '0' in ID: continue
if 'o' in ID: continue
if ID[0].isdigit(): continue
if ID not in id_list:
id_list.append(ID)
return ID
def convertToList(f, tn_checks):
'''Iterates through each verse file'''
b, c, v = f.rstrip('.md').split('/')
if c == u'00':
c = u'front'
f = f.replace(b, books_nums[b], 1).lower()
b = books_nums[b]
if not b in tn_checks:
tn_checks[b] = [[u'Book', u'Chapter', u'Verse', u'ID', u'SupportReference',
u'OrigQuote', u'Occurrence', u'GLQuote', u'OccurrenceNote']]
if v in ['00']:
# This is an introduction, which has a different format than a regular note
for line in codecs.open(f, 'r', encoding='utf-8').readlines():
if line.startswith(u'# '):
ID = getNoteID()
# We'll use the intro text for the ref
ref = line.replace(u'#', u'').strip()
olquote = u''
occurrence = u'0'
glquote = u''
note_text = line.strip()
continue
# This is the note text
note_text += line.strip()
note_text += u'<br>'
tn_checks[b].append([b, c, u'intro', ID, ref, olquote, occurrence, glquote, note_text])
return tn_checks
for line in codecs.open(f, 'r', encoding='utf-8').readlines():
# This is the text snippet from the ULB
if line.startswith(u'#'):
ID = getNoteID()
ref = u''
glquote = line.strip(u'#').strip()
occurrence, olquote = getOLQuote(b, c, v, glquote)
continue
# This is the note text (skips blank lines)
if not line.startswith(u'\n'):
note_text = line.strip()
if u'/en/ta/' in note_text:
if linkre.search(note_text):
ref = linkre.search(note_text).group(1).split('/')[-1]
try:
tn_checks[b].append([b, c, v, ID, ref, olquote, occurrence, glquote, note_text])
except UnboundLocalError:
print b, c, v, line
return tn_checks
def saveToTSV(tsv_file, data):
with codecs.open(tsv_file, 'w', encoding='utf-8') as writer:
for item in data:
row = u' '.join(item)
writer.write(u'{0}\n'.format(row))
if __name__ == "__main__":
tn_checks = {}
file_list = [x for x in glob.glob('*/*/*.md')]
numbered_list = []
for x in file_list:
b = x.split('/')[0]
bup = b.upper()
newbook = books[bup][1]
numbered_list.append(x.replace(b, newbook))
books_nums[newbook] = bup
numbered_list.sort()
for f in numbered_list:
tn_checks = convertToList(f, tn_checks)
for k,v in tn_checks.items():
tn_check_file = 'en_tn_{0}-{1}.tsv'.format(books[k][1], k)
saveToTSV(tn_check_file, v)
|
990,220 | cbadc1a84fbb808c7ae3757be3d2cf81df35cd54 | list1 = [1, 2, 5, 14, 20, 30, 12, 200, 69, 83]
list1.sort()
for num in list1:
if num % 2 == 0:
print(num, end = ", ")
|
990,221 | 25a75c6dd732264cf646f6045ce4e9df30140062 | from flask import Flask, render_template, request
app = Flask(__name__)
@app.route('/')
def index():
return render_template('inicio.html')
@app.route('/contacto')
def contacto():
return render_template('contacto.html')
@app.route('/nutriologa')
def nutriologa():
return render_template('nutriologaexperiencia.html')
@app.route('/cita')
def cita():
return render_template('Reserva_cita.html')
@app.route('/alimentacionsana')
def alimentacionsana():
return render_template('alimentacionsana.html')
@app.route('/embarazo')
def embarazo():
return render_template('alimentacion_embarazada.html')
@app.route('/calculadora')
def calculadora():
return render_template('calculadora_calorias.html')
@app.route('/ejercicio')
def ejercicio():
return render_template('alimentacion_gym.html')
if __name__ == '__main__':
app.run(debug=True)
|
990,222 | b00fd8f489db75003b45ee393989a5aec8cb6158 | # Generated by Django 3.2.6 on 2021-08-12 06:22
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("lists", "0001_initial"),
]
operations = [
migrations.AlterModelTable(
name="item",
table=None,
),
]
|
990,223 | 6c6ae3d061766754ecde4f0ada127e39ec64853d | import autofit as af
import autolens as al
from test_autolens.integration.tests.interferometer import runner
test_type = "lens__source"
test_name = "lens_light_mass__source__hyper_bg"
data_name = "lens_light__source_smooth"
instrument = "sma"
def make_pipeline(name, folders, real_space_mask, search=af.DynestyStatic()):
phase1 = al.PhaseInterferometer(
phase_name="phase_1",
folders=folders,
galaxies=dict(
lens=al.GalaxyModel(
redshift=0.5,
light=al.lp.SphericalDevVaucouleurs,
mass=al.mp.EllipticalIsothermal,
),
source=al.GalaxyModel(redshift=1.0, light=al.lp.EllipticalSersic),
),
real_space_mask=real_space_mask,
search=search,
)
phase1.search.const_efficiency_mode = True
phase1.search.n_live_points = 60
phase1.search.facc = 0.8
phase1 = phase1.extend_with_multiple_hyper_phases(
hyper_galaxies_search=True,
include_background_sky=True,
include_background_noise=True,
)
phase2 = al.PhaseInterferometer(
phase_name="phase_2",
folders=folders,
galaxies=dict(
lens=al.GalaxyModel(
redshift=0.5,
light=phase1.result.model.galaxies.lens.light,
mass=phase1.result.model.galaxies.lens.mass,
hyper_galaxy=phase1.result.hyper_combined.instance.galaxies.lens.hyper_galaxy,
),
source=al.GalaxyModel(
redshift=1.0,
light=phase1.result.model.galaxies.source.light,
hyper_galaxy=phase1.result.hyper_combined.instance.galaxies.source.hyper_galaxy,
),
),
hyper_image_sky=phase1.result.hyper_combined.instance.hyper_image_sky,
hyper_background_noise=phase1.result.hyper_combined.instance.hyper_background_noise,
real_space_mask=real_space_mask,
search=search,
)
phase2.search.const_efficiency_mode = True
phase2.search.n_live_points = 40
phase2.search.facc = 0.8
phase2 = phase2.extend_with_multiple_hyper_phases(
hyper_galaxies_search=True,
include_background_sky=True,
include_background_noise=True,
)
return al.PipelineDataset(name, phase1, phase2)
if __name__ == "__main__":
import sys
runner.run(sys.modules[__name__])
|
990,224 | 7fac9762f26a47d0af72998999710da13dbdf67d | import random
numbers = random.sample(range(100), 10)
print(numbers)
numbers_filtered = list(filter(lambda x: x % 2 != 0, numbers))
print('Numbers after filter(x % 2 != 0) : ', numbers_filtered)
numbers_squares = map(lambda x: x ** 2, numbers_filtered)
print('Squares of filter numbers : ', list(numbers_squares))
|
990,225 | 1e43d8d117c67f97d1ec2abcdd621dea44acd8ae |
def isAsendingOrder(n):
str_n = str(n)
l = len(str_n)
v = [int(str_n[i]) for i in xrange(0, l)]
for i in xrange(0, l-1):
if(v[i] > v[i+1]):
return False
return True
def minimize(n):
while(True):
str_n = str(n)
l = len(str_n)
v = [int(str_n[i]) for i in xrange(0, l)]
split_idx = -1
for i in xrange(0, l-1):
if(v[i] > v[i+1]):
split_idx = i
break
if (split_idx == -1):
break
v[split_idx] -= 1
for i in xrange(split_idx+1, l):
v[i] = 9
new_n = ''.join([str(i) for i in v])
n = int(new_n)
return n
# raw_input() reads a string with a line of input, stripping the '\n' (newline) at the end.
# This is all you need for most Google Code Jam problems.
t = int(raw_input()) # read a line with a single integer
for i in xrange(1, t + 1):
datas = raw_input().split(" ") # read a list of integers, 2 in this case
n = int(datas[0])
while(True):
if(isAsendingOrder(n)):
break
n = minimize(n)
print "Case #{}: {}".format(i, n)
# check out .format's specification for more formatting options
|
990,226 | 3b22bb8b491b8e484902b785dd29f4f754441fd4 | import base64
import random
from PositionSpiderProject.conf.user_agent import USER_AGENT_LIST
class RandomUserAgent(object):
def process_request(self, request, spider):
request.headers['User-Agent'] = random.choice(USER_AGENT_LIST)
class IPProxyOpenDownloadMiddleware(object):
"""
芝麻代理:http://h.zhimaruanjian.com/getapi/) 18210836561 fanjianhaiabc123
链接:https://pan.baidu.com/s/1U6KnIFOYhS9NT7iXd4t84g
"""
PROXIES = ["http://122.6.198.245:4226", "http://114.217.233.220:4276", "http://111.72.109.116:4213"]
def process_request(self, request, spider):
proxy = random.choice(self.PROXIES)
request.meta['proxy'] = proxy
print(proxy)
class IPProxyExclusiveDownloadMiddleware(object):
"""
独享代理(快代理:https://www.kuaidaili.com/pricing/#kps)
链接:https://pan.baidu.com/s/1U6KnIFOYhS9NT7iXd4t84g
"""
def process_request(self,request, spider):
proxy = '121.199.6.124:16816'
user_password = '970138074:rcdj35ur'
request.meta['proxy'] = proxy
# bytes
b64_user_password = base64.b64encode(user_password.encode("utf-8"))
request.headers["Proxy-Authorization"] = 'Basic ' + b64_user_password.decode("utf-8")
|
990,227 | 751dc91de36dac49958e9c9302cf377d74066d9b | /media/swaggyp1985/SSD1T/Software/miniconda3/lib/python3.7/ntpath.py |
990,228 | 8cd5bd927e13843aa4398354c7db324107b6f1c0 | # Generated by Django 3.1.5 on 2021-01-27 02:11
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='master',
old_name='master_payment',
new_name='master_payments',
),
migrations.RenameField(
model_name='master',
old_name='master_region',
new_name='master_regions',
),
migrations.RenameField(
model_name='master',
old_name='review',
new_name='reviews',
),
]
|
990,229 | ad4d327d3790928fca1208ea803c4532b3eef1e4 | ../../../../micropython-lib/urequests/urequests.py |
990,230 | 9fbd6ac358769bce7664576240cd1ee4d9d12bd9 | from django.apps import AppConfig
class ShowcenterConfig(AppConfig):
name = 'showcenter'
|
990,231 | a4a04fb7f9824cebc64a88f25688ccf4d1b1899f | #given a string of ascii characters, write a function that returns True if all the characters are unique, and false if they are not
# don't use any built in python functions
def is_unique(word):
if not word:
return None
chars_dict = {}
for char in word:
if char in chars_dict:
return False
chars_dict[char] = 1
return True
assert is_unique('') == None
assert is_unique('abc') == True
assert is_unique('Aab') == True
assert is_unique('aba') == False
#now try doing it without any built in data structures (can use sort function for simplicity)
def is_unique_chars(word):
if not word:
return None
new_word = sorted(word)
for i in range(len(word)):
if i < len(word)-1:
if new_word[i] == new_word[i+1]:
return False
return True
assert is_unique_chars('') == None
assert is_unique_chars('abc') == True
assert is_unique_chars('Aab') == True
assert is_unique_chars('aba') == False
#now try doing that without using a sort function
def unique_chars(word):
if not word:
return None
for i in range(len(word)):
if i >=len(word):
break
for j in range(i + 1, len(word)):
if word[i] == word[j]:
return False
return True
assert unique_chars('') == None
assert unique_chars('abc') == True
assert unique_chars('Aab') == True
assert unique_chars('aba') == False
print('passed') |
990,232 | b8360a6914e1ca964e76d5a2b3a82406744149ea | #!/usr/bin/python
import MySQLdb
# connect
#db = MySQLdb.connect(host="10.201.148.115", user="root", passwd="em7admin",db="master")
myDB = MySQLdb.connect(host="10.201.148.115",port=7706,user="root",passwd="em7admin",db="master")
cHandler = myDB.cursor()
cHandler.execute("SHOW DATABASES")
results = cHandler.fetchall()
list_databases = []
for items in results:
list_databases.append(items[0])
print list_databases
# list_app_id.append(app_id[0].__int__())
#print list_app_id |
990,233 | 119b57fde556da4e3acee140e30009d710eb945a | from fastapi import FastAPI, Header, HTTPException
from pydantic import BaseModel
import uvicorn
import requests
from datetime import datetime
import sqlite3
import json
class SkatUser(BaseModel):
UserId: int
class SkatYear(BaseModel):
label : str
startDate: datetime
endDate: datetime
class Tax(BaseModel):
UserId: int
Amount: int
db = sqlite3.connect('./Skat/skat.sqlite')
app = FastAPI()
# SkatUser CRUD endpoints
@app.post("/SkatUser/create", status_code=201)
async def create_SkatUser(skatUser: SkatUser):
# Check if UserID already exist
#Read from one user with id
queryGet = 'SELECT * FROM SkatUser WHERE UserId = ?'
select = db.execute(queryGet, [skatUser.UserId]).fetchone()
if(select == None):
# Create SkatUser
query = 'INSERT INTO SkatUser (UserId, IsActive) VALUES (?,?)'
db.execute(query, [skatUser.UserId, 1])
db.commit()
return read_skatUserFunc(skatUser.UserId)
if(len(select) > 0):
raise HTTPException(status_code=404, detail="The user does already exist!")
def read_skatUserFunc(skatUser_id: int):
#Read from one user with id
query = 'SELECT * FROM SkatUser WHERE UserId = ?'
select = db.execute(query, [skatUser_id]).fetchone()
if(select == None):
raise HTTPException(status_code=404, detail="The user does not exist!")
if(len(select) > 0):
skatUser = {select[0], select[1], select[2], select[3]}
return skatUser
@app.get("/SkatUser/read/{SkatUser_id}", status_code=200)
async def read_skatUser(skatUser_id: int):
return read_skatUserFunc(skatUser_id)
@app.get("/SkatUser/readall", status_code=200)
async def read_skatUsers():
#Read from all user
query = 'SELECT * FROM SkatUser'
select = db.execute(query)
people = []
for row in select:
people.append({'Id':row[0], 'UserId':row[1], 'CreatedAt':row[2], 'IsActive':row[3]})
return people
@app.put("/SkatUser/update", status_code=200)
async def update_skatUser(setActive: int, UserId: int):
#check if the SkatUser exist
query = 'SELECT * FROM SkatUser WHERE UserId = ?'
select = db.execute(query, [UserId]).fetchone()
if(select == None):
raise HTTPException(status_code=404, detail="The SkatUser does not exist!")
if((setActive == 1) or (setActive ==0)):
query2 = 'UPDATE SkatUser SET IsActive = ? WHERE UserId = ?'
db.execute(query2, [setActive, UserId])
db.commit()
return read_skatUserFunc(UserId)
else:
raise HTTPException(status_code=404, detail="The is active can only be a 1 or 0!")
@app.delete("/skatUser/delete/{skatUse_id}", status_code=200)
async def delete_skatuUser(skatUser_id: int):
#Read from one SkatUser with id
query = 'SELECT * FROM SkatUser WHERE Id = ?'
select = db.execute(query, [skatUser_id]).fetchone()
if(select == None):
raise HTTPException(status_code=404, detail="The address does not exist!")
if(len(select) > 0):
skatUser = {select[0], select[1], select[2], select[3]}
#Delete the skatUser
query = 'DELETE FROM SkatUser WHERE Id = ?'
db.execute(query, [skatUser_id])
db.commit()
return skatUser
# SkatYear CRUD endpoints
@app.post("/SkatYear/create", status_code=201)
async def create_SkatYear(skatYear: SkatYear):
# Create SkatUser
query = 'INSERT INTO SkatYear (Label, StartDate, EndDate) VALUES (?,?,?)'
c = db.execute(query, [skatYear.label, skatYear.startDate, skatYear.endDate])
skatYearId = c.lastrowid
query = """SELECT Id, UserId FROM SkatUser"""
users = db.execute(query)
for row in users:
query2 = 'INSERT INTO SkatUserYear (SkatUserId, SkatYearId, UserId, isPaid, Amount) VALUES (?,?,?,?,?)'
db.execute(query2, [row[0], skatYearId, row[1], 0, 0])
db.commit()
return read_skatYearFunc(skatYearId)
def read_skatYearFunc(skatYear_id: int):
#Read from one user with id
query = 'SELECT * FROM SkatYear WHERE Id = ?'
select = db.execute(query, [skatYear_id]).fetchone()
if(select == None):
raise HTTPException(status_code=404, detail="The user does not exist!")
if(len(select) > 0):
skatYear ={select[0], select[1], select[2], select[3], select[4], select[5]}
return skatYear
@app.get("/SkatYear/read/{SkatYear_id}", status_code=200)
async def read_skatYear(skatYear_id: int):
return read_skatYearFunc(skatYear_id)
@app.get("/SkatYear/readall", status_code=200)
async def read_skatYears():
#Read from all user
query = 'SELECT * FROM SkatYear'
select = db.execute(query)
skatYears = []
for row in select:
skatYears.append({'Id':row[0], 'Label':row[1], 'CreatedAt':row[2], 'ModifiedAt':row[3], 'StartAt':row[4], 'EndAt':row[5]})
return skatYears
@app.put("/SkatYear/update", status_code=200)
async def update_skatYear(label: str, modifiedAt: datetime, id: int):
#check if the address exist
query = 'SELECT * FROM SkatYear WHERE Id = ?'
select = db.execute(query, [id]).fetchone()
if(select == None):
raise HTTPException(status_code=404, detail="The address does not exist!")
else:
query = 'UPDATE skatYear SET Label = ?, ModifiedAt = ? WHERE Id = ?'
db.execute(query, [label, modifiedAt, id])
db.commit()
return read_skatYearFunc(id)
@app.delete("/skatYear/delete/{id}", status_code=200)
async def delete_skatYear(skatYear_id: int):
#Read from one user with id
query = 'SELECT * FROM SkatYear WHERE Id = ?'
select = db.execute(query, [skatYear_id]).fetchone()
if(select == None):
raise HTTPException(status_code=404, detail="The skatYear does not exist!")
else:
skatUser = {select[0], select[1], select[2], select[3]}
query = 'DELETE FROM SkatYear WHERE Id = ?'
db.execute(query, [skatYear_id])
db.commit()
# Delete corresponding SkatUserYear - Cascade is doin' dis for us
return skatUser
@app.post("/pay-taxes", status_code=200)
async def pay_taxes(tax: Tax):
#Check if user paid taxes (if SkatUserYear.Amount > 0) ? Why not IsPaid == true?
#Call Tax Calculator - SkatUserYear.Amount = response.sum & IsPaid = true
#Call BankAPI/subtractMoneyFromAccount - Body: UserId, Amount
query = "SELECT Amount FROM SkatUserYear WHERE UserId = ? ORDER BY Id DESC;" #Order by DESC to get the latest entry
c = db.execute(query, [tax.UserId])
skatuseryear = c.fetchone() #Fetch latest row
if(skatuseryear == None):
raise HTTPException(status_code=404, detail="The skatYear does not exist!")
if skatuseryear[0] <= 0:
obj = {'money': tax.Amount}
response = requests.post("http://localhost:7071/api/Skat_Tax_Calculator", data=json.dumps(obj))
if response.status_code == 200:
query2 = "UPDATE SkatUserYear SET Amount = ?, IsPaid = ? WHERE UserId = ?"
db.execute(query2, [(response.json()['tax_money']), 1, tax.UserId])
db.commit()
obj2 = {'UserId': tax.UserId, 'Amount': response.json()['tax_money']}
response2 = requests.post("http://localhost:5003/withdrawal-money", data=json.dumps(obj2))
return response.json()['tax_money']
return "You have already paid"
#Start server with uvicorn
if __name__ == "__main__":
uvicorn.run(app, host="127.0.0.1", port=5002, log_level="info") |
990,234 | f9aa88c7a0bed94ee5b8c7aa5728faf161f5fad5 | """
Основной скрипт который запускает тесты и сохраняет графики
"""
from test import kernel_test, single_core_fibonacci
from grafic_test import grafic
from throttling import trottling
print("Start testing")
RESULT_FIBONACCI_TEST = single_core_fibonacci()
grafic(RESULT_FIBONACCI_TEST[1], "TEST_FIBONACCI")
trottling(RESULT_FIBONACCI_TEST[1], "TEST_FIBONACCI")
RESULT_KERNEL_TEST = kernel_test()
grafic(RESULT_KERNEL_TEST["get_kernel_test"][1], "TEST_GET_KERNEL")
grafic(RESULT_KERNEL_TEST["extract_kernel_test"][1], "TEST_EXTRACT_KERNEL")
grafic(RESULT_KERNEL_TEST["make_kernel_test"][1], "TEST_MAKE_KERNEL")
trottling(RESULT_KERNEL_TEST["get_kernel_test"][1], "TEST_GET_KERNEL")
trottling(RESULT_KERNEL_TEST["extract_kernel_test"][1], "TEST_EXTRACT_KERNEL")
trottling(RESULT_KERNEL_TEST["make_kernel_test"][1], "TEST_MAKE_KERNEL")
print("testing done")
|
990,235 | 69cab3d0e190970b53d88450d20465d4112bb828 | from django.conf.urls import url
from apps.hobbygroups.dashboard import views
urlpatterns = [url(r"^.*$", views.index, name="hobbies_dashboard_index")]
|
990,236 | 5a52a4edcfd38cae62b0745cad7c982875d56949 | #!/usr/bin/env python
'''
This module computes weekly midday vapour pressure deficit (VPD) values
and thermal time from DWD (German Weather Service - http://www.dwd.de)
XML data
'''
import sys
import os
import re
import math
import datetime
from collections import defaultdict
import numpy as np
from lxml import etree
__author__ = 'Christian Schudoma'
__copyright__ = 'Copyright 2013-2014, Christian Schudoma'
__license__ = 'MIT'
__maintainer__ = 'Arne Neumann'
def trost2date(trost_date):
"""converts a 'YYYY-MM-DD' date string into a datetime.date instance"""
year, month, day = (int(val) for val in trost_date.split('-'))
return datetime.date(year, month, day)
def date2trost(datetime_date):
"""converts a datetime.date instance into a 'YYYY-MM-DD' date string"""
# '0>2' means: add leading zero(s) if the int is less than two digits long
return '{0}-{1:0>2}-{2:0>2}'.format(datetime_date.year, datetime_date.month, datetime_date.day)
def read_dwd_climate_data(climate_file, start_date='2011-04-11',
end_date='2011-09-02',
use_datetime=True):
"""
Reads DWD Climate Data (tested on hourly temperatures, hourly rel.
humidities) in the interval of (start_date (YYYY-MM-DD),
end_date (YYYY-MM-DD)) from a DWD XML file containing data from a single
weather station.
This is a rewrite of C.'s readClimateData_DWDXML function, which
used BeautifulSoup and some weird while True constructs.
Parameters
----------
fn : str
file name of a DWD XML climate file
start_date : str
start date in YYYY-MM-DD format
end_date : str
end date in YYYY-MM-DD format
use_datetime : bool
If True, adds HH:mm:ss to output
Returns
-------
station_data : dict
If use_datetime is True, returns a dictionary mapping from
(date, time) tuples to a list of (exactly one)
float. date is given as a 'YYYY-MM-DD' string, time as 'HH:mm:ss'.
If use_datetime is True, returns a dictionary mapping from a
'YYYY-MM-DD' date string to a list of floats (one for each point
in time when data was measured on that date).
"""
start, end = trost2date(start_date), trost2date(end_date)
station_data = defaultdict(list)
tree = etree.parse(climate_file)
stations = list(tree.iterfind('{http://www.unidart.eu/xsd}stationname'))
assert len(stations) == 1, "Can't handle multi-station file '{}'".format(dwd_file)
for station in stations:
for datapoint in station.iterchildren():
point_of_time = datetime.datetime.strptime(datapoint.attrib['date'],
"%Y-%m-%dT%H:%M:%SZ")
value = float(datapoint.text)
day = point_of_time.date()
if day >= start and day <= end:
if use_datetime:
key = (date2trost(day), point_of_time.time().isoformat())
else:
key = date2trost(day)
station_data[key].append(value)
return station_data
def calc_VPD(t_celsius, rel_humidity):
"""
calculates the Vapour Pressure Deficit (VPD) from temperature (degrees
celsius) and relative humidity values. We'll use the algorithm given
in the Licor LI-6400 manual for calculating VPD.
Parameters
----------
t_celsius : float
temperature in degrees celsius
rel_humidity : float
relative humidity (represented as a fraction between 0.0 and 1.0)
Returns
-------
vpd : float
Vapour Pressure Deficit
Other algorithms to calculate VPD include:
A, B, C, D, E, F = -1.88e4, -13.1, -1.5e-2, 8e-7, -1.69e-11, 6.456
vp_sat = math.exp(A / T + B + C * T + D * T ** 2 + E * T ** 3 + F * math.log(T))
Source: http://en.wikipedia.org/wiki/Vapour_Pressure_Deficit
A, B, C, D, E, F = -1.044e4, -1.129e1, -2.702e-2, 1.289e-5, -2.478e-9, 6.456
vp_sat = math.exp(A / T + B + C * T + D * T ** 2 + E * T ** 3 + F * math.log(T)) * PSI_IN_KPA
Source: http://ohioline.osu.edu/aex-fact/0804.html
vp_sat = 6.11 * math.exp((2.5e6 / 461) * (1 / 273 - 1 / (273 + T)))
Source: http://physics.stackexchange.com/questions/4343
"""
# according to Licor LI-6400 manual pg 14-10
# and Buck AL (1981). New equations for computing vapor pressure and
# enhancement factor. J Appl Meteor 20:1527-1532
vp_sat = 0.61365 * math.exp((17.502 * t_celsius) / (240.97 + t_celsius))
vp_air = vp_sat * rel_humidity
return vp_sat - vp_air # or vp_sat * (1 - rel_humidity)
def compute_weekly_midday_vpd(temperatures, relHumidity):
"""
Computes the weekly midday (10am - 2pm) VPD.
Returns dictionary {week-index: average midday VPD}
"""
hourly = {timepoint: calc_VPD(temperatures[timepoint][0], relHumidity[timepoint][0])
for timepoint in set(temperatures.keys()).intersection(set(relHumidity.keys()))}
daily = {}
midday = (datetime.datetime.strptime('10:00:00', '%H:%M:%S'),
datetime.datetime.strptime('14:00:00', '%H:%M:%S'))
for tp in hourly:
try:
hour = datetime.datetime.strptime(tp[1], '%H:%M:%S')
except:
hour = datetime.datetime.strptime(tp[1], '%H:%M')
if midday[0] <= hour <= midday[1]:
daily[tp[0]] = daily.get(tp[0], []) + [hourly[tp]]
weekly = {}
for k in sorted(daily):
week = tuple(map(int, datetime.datetime.strftime(datetime.datetime.strptime(k, '%Y-%m-%d'), '%Y-%W').split('-')))
weekly[week] = weekly.get(week, []) + [np.median(daily[k])]
return {week: sum(weekly[week])/len(weekly[week]) for week in weekly}
def calc_heat_sum(tmin, tmax, tbase=6.0):
"""
Calculates thermal time as heatsum.
Daily heat sum is defined as:
heat_sum_d = max(tx - tbase, 0), with
tx = (tmin + tmax)/2 and
tmax = min(tmax_measured, 30.0)
"""
tmax = min(tmax, 30.0)
tx = (tmin + tmax) / 2.0
return max(tx - tbase, 0.0)
def compute_heatsum_per_day(maxTemps, minTemps):
"""
Computes daily heat sums based on min/max temperatures for a range of days
"""
heatsum, heatsum_day = 0, {}
for k in sorted(set(maxTemps.keys()).intersection(set(minTemps.keys()))):
heatsum += calc_heat_sum(minTemps[k], maxTemps[k])
heatsum_day[k] = heatsum
return heatsum_day
def compute_heatsum_per_week(heatsum_day, day=5):
"""
Returns weekly heatsums from a representative day of the week
(day=5: Friday => end of weekly measuring interval for many DWD stations!)
"""
heatsum_week = {}
for k in heatsum_day:
year, week, weekday = map(int, datetime.datetime.strftime(datetime.datetime.strptime(k, '%Y-%m-%d'), '%Y %W %w').split())
if weekday == day:
heatsum_week[(year, week)] = heatsum_day[k]
return heatsum_week
def main(argv):
if len(argv) != 4:
print 'Usage python %s <temperatures> <relHumidity> <start,end> <outfile>' % os.path.basename(sys.argv[0])
print '<temperatures>, <relHumidity>: DWD XML files'
print '<start,end>: start and end date in the format "YYYY-MM-DD,YYYY-MM-DD" (don\'t forget the "s)'
print '<outfile>: specify a file for writing the output WARNING: file will be overwritten!'
sys.exit(1)
# manage parameters and read raw data
fn_Temperatures, fn_RelHumidities = argv[0], argv[1]
startd, endd = argv[2].split(',')
fout = argv[3]
rawTemperatures = read_dwd_climate_data(fn_Temperatures, start_date=startd, end_date=endd)
rawRelHumidities = read_dwd_climate_data(fn_RelHumidities, start_date=startd, end_date=endd)
# convert %-values from DWD to fractional values
for k in rawRelHumidities:
rawRelHumidities[k] = map(lambda x: x/100.0, rawRelHumidities[k])
# group temperatures per day in order to facilitating the computation of daily min/max
groupedTemperatures = {}
for k in rawTemperatures:
groupedTemperatures[k[0]] = groupedTemperatures.get(k[0], []) + rawTemperatures[k]
# compute daily min/max temperatures
maxTemperatures, minTemperatures = {}, {}
for k in sorted(groupedTemperatures):
maxTemperatures[k], minTemperatures[k] = max(groupedTemperatures[k]), min(groupedTemperatures[k])
# compute VPD and thermal time
VPD = compute_weekly_midday_vpd(rawTemperatures, rawRelHumidities)
HEATSUM = compute_heatsum_per_week(compute_heatsum_per_day(maxTemperatures, minTemperatures))
# write heatsum/vpd values
out = open(fout, 'wb')
out.write('Week\tVPD_midday[kPa]\theatsum[Cd]\n')
for k in sorted(set(VPD).intersection(set(HEATSUM))):
out.write('%i-%i\t%.3f\t%.3f\n' % (k[0], k[1], HEATSUM[k], VPD[k]))
out.close()
pass
if __name__ == '__main__':
main(sys.argv[1:])
|
990,237 | 480850b4837fef829a89da5eb347b3e3625bd861 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Adapted from:
# https://github.com/youtube/api-samples/blob/master/python/my_uploads.py
# https://github.com/youtube/api-samples/blob/master/python/upload_video.py
import httplib2
import os
import logging
from googleapiclient.http import MediaFileUpload
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from oauth2client.file import Storage
from oauth2client.client import flow_from_clientsecrets
from oauth2client.tools import run_flow
# Explicitly tell the underlying HTTP transport library not to retry, since
# we are handling retry logic ourselves.
httplib2.RETRIES = 1
# Maximum number of times to retry before giving up.
MAX_RETRIES = 10
# Always retry when these exceptions are raised.
# RETRIABLE_EXCEPTIONS = (httplib2.HttpLib2Error, IOError, httplib.NotConnected,
# httplib.IncompleteRead, httplib.ImproperConnectionState,
# httplib.CannotSendRequest, httplib.CannotSendHeader,
# httplib.ResponseNotReady, httplib.BadStatusLine)
RETRIABLE_EXCEPTIONS = (httplib2.HttpLib2Error)
# Always retry when an apiclient.errors.HttpError with one of these status
# codes is raised.
RETRIABLE_STATUS_CODES = [500, 502, 503, 504]
# The CLIENT_SECRETS_FILE variable specifies the name of a file that contains
# the OAuth 2.0 information for this application, including its client_id and
# client_secret. You can acquire an OAuth 2.0 client ID and client secret from
# the {{ Google Cloud Console }} at
# {{ https://cloud.google.com/console }}.
# Please ensure that you have enabled the YouTube Data API for your project.
# For more information about using OAuth2 to access the YouTube Data API, see:
# https://developers.google.com/youtube/v3/guides/authentication
# For more information about the client_secrets.json file format, see:
# https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
CLIENT_SECRETS_FILE = 'client_secret.json'
# This OAuth 2.0 access scope allows an application to upload files to the
# authenticated user's YouTube channel, but doesn't allow other types of access.
SCOPES = ['https://www.googleapis.com/auth/youtube.readonly',
'https://www.googleapis.com/auth/youtube.upload']
API_SERVICE_NAME = 'youtube'
API_VERSION = 'v3'
VALID_PRIVACY_STATUSES = ('public', 'private', 'unlisted')
# This variable defines a message to display if the CLIENT_SECRETS_FILE is
# missing.
MISSING_CLIENT_SECRETS_MESSAGE = """
WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the APIs Console
https://console.developers.google.com
For more information about the client_secrets.json file format, please visit:
https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
""" % os.path.abspath(os.path.join(os.path.dirname(__file__), CLIENT_SECRETS_FILE))
# Authorize the request and store authorization credentials.
def yt_get_authenticated_service(args):
flow = flow_from_clientsecrets(CLIENT_SECRETS_FILE, scope=SCOPES, message=MISSING_CLIENT_SECRETS_MESSAGE)
storage = Storage("actioncam-upload-oauth2.json")
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run_flow(flow, storage, args)
return build(API_SERVICE_NAME, API_VERSION, credentials = credentials, cache_discovery=False)
def yt_get_my_uploads_list(youtube):
# Retrieve the contentDetails part of the channel resource for the
# authenticated user's channel.
channels_response = youtube.channels().list(
mine=True,
part='contentDetails'
).execute()
for channel in channels_response['items']:
# From the API response, extract the playlist ID that identifies the list
# of videos uploaded to the authenticated user's channel.
return channel['contentDetails']['relatedPlaylists']['uploads']
return None
def yt_list_my_uploaded_videos(uploads_playlist_id, youtube):
uploaded_videos = []
# Retrieve the list of videos uploaded to the authenticated user's channel.
playlistitems_list_request = youtube.playlistItems().list(
playlistId=uploads_playlist_id,
part='snippet'
)
logging.debug('Videos in list %s' % uploads_playlist_id)
while playlistitems_list_request:
playlistitems_list_response = playlistitems_list_request.execute()
# Print information about each video.
for playlist_item in playlistitems_list_response['items']:
title = playlist_item['snippet']['title']
video_id = playlist_item['snippet']['resourceId']['videoId']
uploaded_videos.append(title)
logging.debug("Title: '%s' (ID: %s)" % (title, video_id))
playlistitems_list_request = youtube.playlistItems().list_next(playlistitems_list_request, playlistitems_list_response)
logging.info("There are %d already uploaded videos." % len(uploaded_videos))
return uploaded_videos
def yt_initialize_upload(file_to_upload, sequence_title, youtube, options):
tags = None
if options.keywords:
tags = options.keywords.split(',')
body=dict(
snippet=dict(
title="%s %s" % (options.title, sequence_title) if options.title else sequence_title,
description="%s %s" % (options.description, sequence_title) if options.description else sequence_title,
tags=tags,
categoryId=options.category
),
status=dict(
privacyStatus=options.privacyStatus
)
)
# Call the API's videos.insert method to create and upload the video.
insert_request = youtube.videos().insert(
part=','.join(body.keys()),
body=body,
# The chunksize parameter specifies the size of each chunk of data, in
# bytes, that will be uploaded at a time. Set a higher value for
# reliable connections as fewer chunks lead to faster uploads. Set a lower
# value for better recovery on less reliable connections.
#
# Setting 'chunksize' equal to -1 in the code below means that the entire
# file will be uploaded in a single HTTP request. (If the upload fails,
# it will still be retried where it left off.) This is usually a best
# practice, but if you're using Python older than 2.6 or if you're
# running on App Engine, you should set the chunksize to something like
# 1024 * 1024 (1 megabyte).
media_body=MediaFileUpload(file_to_upload, chunksize=-1, resumable=True)
)
yt_resumable_upload(insert_request)
# This method implements an exponential backoff strategy to resume a failed upload.
def yt_resumable_upload(request):
response = None
error = None
retry = 0
while response is None:
try:
logging.info('Uploading file...')
status, response = request.next_chunk()
if response is not None:
if 'id' in response:
logging.info('Video id "%s" was successfully uploaded.' % response['id'])
else:
exit('The upload failed with an unexpected response: %s' % response)
except HttpError as e:
if e.resp.status in RETRIABLE_STATUS_CODES:
error = 'A retriable HTTP error %d occurred:\n%s' % (e.resp.status, e.content)
else:
raise
except RETRIABLE_EXCEPTIONS as e:
error = 'A retriable error occurred: %s' % e
if error is not None:
logging.error(error)
retry += 1
if retry > MAX_RETRIES:
exit('No longer attempting to retry.')
max_sleep = 2 ** retry
sleep_seconds = random.random() * max_sleep
logging.info('Sleeping %f seconds and then retrying...' % sleep_seconds)
time.sleep(sleep_seconds)
|
990,238 | 729e625d07bbabc59c6fbb00b3cf1c0acc0c390e | import nonebot
from nonebot import logger
from nalomu.commands import ImageCommand, method_command
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
class UsageCommands(ImageCommand):
@method_command('usage', aliases=['使用帮助', '帮助', '使用方法', '食用方法', '功能列表'])
async def usage(self):
# 获取设置了名称的插件列表
plugins = list(filter(lambda p: p.name, nonebot.get_loaded_plugins()))
arg = self.stripped_msg.lower()
plugin = next((p for p in plugins if p.name == arg), False)
logger.info(plugin)
await self.send(self.render_image('usage', plugins=plugins, plugin=plugin))
|
990,239 | 96e348fb1c14a768752c1a669293c20efcae4ca8 | #encoding: utf-8
# desc: 此文件用于存放常用的sql语句,达到解耦的目的 |
990,240 | 68ca4670e5a320177c95fde37b249ed0f94adf0e | # empty blanks
blanks = ["___1___", "___2___", "___3___", "___4___"]
# Game level
# Easy
easy_level = '''A ___1___ is created with the def keyword. You specify the inputs
a ___1___ takes by adding ___2___ separated by commas between the parentheses.
___1___s by default return ___3___ if you don't specify the value to return.
___2___ can be standard data types such as string, number, dictionary, tuple,
and ___4___ or can be more complicated such as objects and lambda functions.'''
# Medium
medium_level = ''' Python is one of the only ___1___ known in which there exist
an ___2___ clause for For loops. It's a special type of ___3___ that executes
only if the for loop exits naturally, without any ___4___ statements. The
___4___ statement stops the execution of the innermost loop and start executing
the next line of code which in this case is the ___2___ clause.'''
# Difficult
hard_level = '''A ___1___ is created with the def keyword. You specify the inputs
a ___1___ takes by adding ___2___ separated by commas between the parentheses.
___1___s by default return ___3___ if you don't specify the value to return.
___2___ can be standard data types such as string, number, dictionary, tuple,
and ___4___ or can be more complicated such as objects and lambda functions.'''
# Game Answer
easy_level_answer = ["function", "parameters", "none", "list"]
medium_level_answer = ["languages", "else", "syntax", "break"]
hard_level_answer = ["function", "parameters", "none", "list"]
print "Welcome!"
def select_level():
"""
Prompts the user to input the desired difficulty level
:inputs: None
:outputs: play_game() function call
"""
while True:
level = raw_input("Please select a game level: easy, medium, hard: ")
count = 3
level = level
while (level not in ["easy", "medium", "hard"]) and count > 1:
print("\n wrong or misspelled level, please try again!")
trials = int(count) - 1
print "You have " + str(trials) + " more trials remaining"
level = raw_input("Select a game level: easy, medium, hard: ")
count -= 1
if (level not in ["easy", "medium", "hard"]) and count == 1:
print ("\n wrong or misspelled level\n") + "You have " \
+ str(0) + " trials remaining\n" + game_over
sys.exit()
else:
if level == "easy":
return play_game(easy_level, easy_level_answer)
elif level == "medium":
return play_game(medium_level, medium_level_answer)
else:
return play_game(hard_level, hard_level_answer)
def validate_answer(user_answer, answer, game_answer_index, empty_blank):
"""
Validate if user's answer matches the game's answer
:inputs: user_answer : user's answer; answer: game answer;
game_answer_index : game answer's index
:output: user_answer
"""
if user_answer == answer[game_answer_index]:
return user_answer
else:
count = 3
while user_answer != answer[game_answer_index] and count > 1:
print "\nYour entered: " + user_answer + \
"\nWrong or misspelled answer!"
trials = int(count) - 1
print "You have " + str(trials) + " more trials remaining"
user_answer = raw_input("What goes in blank " + empty_blank + "?")
count -= 1
if user_answer != answer[game_answer_index] and count == 1:
print "Your entered: " + user_answer + "\nWrong or misspelled"\
" answer!\n" + "You have " + str(0) + \
" trials remaining\n" + "You lost! \n" + game_over
sys.exit()
if user_answer == answer[game_answer_index]:
return user_answer
def validate_blank(level, blanks):
"""
Validate empty blank exist to be filled
:return : None.
"""
for blank in blanks:
if blank in level:
return blank
return None
def play_game(level, answer):
"""
prompt player to select a game difficulty level: easy, medium, hard
when player guesses correctly, new prompt shows with correct answer
in the previous blanks and a new prompt for the next blank.
when player guesses incorrectly, they are prompted to try again
"""
if level == easy_level:
print "\n You chose the EASY level game. \n\n" + level
elif level == medium_level:
print "\n You chose the MEDIUM level game. \n\n" + level
elif level == hard_level:
print "\n You chose the HARD level game. \n\n" + level
blanks_index = 0
game_answer_index = 0
while blanks_index < len(blanks):
empty_blank = validate_blank(level, blanks)
user_answer = raw_input("\nWhat goes in blank " + empty_blank + "?")
valid_answer = validate_answer(
user_answer, answer, game_answer_index, empty_blank)
if valid_answer == answer[game_answer_index]:
# print "\n Your answer is correct! \n"
level = level.replace(empty_blank, valid_answer)
print "\n Your answer is correct! \n\n" + level
game_answer_index += 1
blanks_index += 1
print "\n Congratulations, You won the game!!! "
sys.exit()
game_over = "Game Over!!"
print play_game(select_level()) |
990,241 | 0dacc15fba989a567e992b065833569ac9ea5003 | #! /usr/bin/python
# -*- coding: utf-8 -*-
import pymongo
db_name = 'nlp100_pizzaboi'
def bigram(sentence):
bigrams = []
sentence_uni = unicode(sentence)
for i in xrange(len(sentence_uni) - 1):
bigrams.append(sentence_uni[i:i+2])
return bigrams
def tweet2dict(t):
names = ('url','data','user','name','body','rt','rep','qt','rtf','repf','qtf','bigram')
t = t.strip('\n').split('\t')
t.append(bigram(t[4]))
if len(t) == 12:
D = dict(zip(t, names))
return D
if __name__ == '__main__':
conn = pymongo.Connection()
db = conn[db_name]
for t in sys.stdin:
db.tweets.insert( tweet2dict(t) )
db.tweets.create_index([
('url', 1),
('date', 1),
('user', 1),
('rt', 1),
('rep', 1),
('qt', 1),
('bigram', 1)) |
990,242 | 0db5903aee64b08c9d426a45cba1a2c0fee2a341 | input("It is working if it pauses for input!")
|
990,243 | d64c330e8c60076df836f517e84d32c530008cb5 | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 24 11:32:44 2019
@author: LocalAdmin
DMM module for the Keithley 2110 5 1/2 Digit Multimeter
"""
# Standard libraries
import pyvisa as visa
import time
import datetime
import numpy as np
import matplotlib.pyplot as plt
import pickle as pkl
import os
# Custom libraries
import tc332_module as tc
import sourcemeter_module as sm
import instrument_module as instr
import multimeter_module as dmm
sleepy_time = 0.001
def connect_dmm2100():
"""Sets up connection to the Keithly DMM 2100"""
address = 'USB0::0x05E6::0x2100::1416380::INSTR'
rm = visa.ResourceManager()
return rm.open_resource(address)
def connect_dmm2110():
"""Sets up connection to the Keithly DMM 2110, not the 2100"""
address = 'USB0::0x05E6::0x2110::8010814::INSTR'
rm = visa.ResourceManager()
return rm.open_resource(address)
# =============================================================================
# Setting functions
# =============================================================================
def set_meas_time_all(instrument, time_meas):
"""Set the measurement time for all measurements in seconds or number of PLCs
(power line cycles: 200 ms for EU grid of 50 Hz)"""
set_meas_time_current(instrument, time_meas)
set_meas_time_voltage(instrument, time_meas)
set_meas_time_resistance(instrument, time_meas)
def set_meas_time_current(instrument, time_meas):
"""Set the measurement time for current in seconds"""
instrument.write('SENSE:CURR:DC:APER %s' % time_meas)
# Check if value was set
time.sleep(sleepy_time)
time_actual = get_meas_time_current(instrument)
if time_actual != time_meas:
print('Measurement time was NOT correctly set to %s s for current' % time_meas)
else:
print('Measurement time was set to %s s for current' % time_meas)
def set_meas_time_voltage(instrument, time_meas):
"""Set the measurement time for voltage in seconds"""
instrument.write('SENSE:VOLT:DC:APER %s' % time_meas)
# Check if value was set
time.sleep(sleepy_time)
time_actual = get_meas_time_voltage(instrument)
if time_actual != time_meas:
print('Measurement time was NOT correctly set to %s s for voltage' % time_meas)
else:
print('Measurement time was set to %s s for voltage' % time_meas)
def set_meas_time_resistance(instrument, time_meas):
"""Set the measurement time for resistance in seconds"""
instrument.write('SENSE:RESISTANCE:APER %s' % time_meas)
# Check if value was set
time.sleep(sleepy_time)
time_actual = get_meas_time_voltage(instrument)
if time_actual != time_meas:
print('Measurement time was NOT correctly set to %s s for resistance' % time_meas)
else:
print('Measurement time was set to %s s for resistance' % time_meas)
# =============================================================================
# Query functions
# =============================================================================
def meas_voltage(instrument, v_range=10, resolution=0.003):
"""Measures the voltage of the dmm"""
return float(instrument.query('MEAS:VOLTage:DC? %s,%s' % (v_range, resolution)))
def meas_resistance(instrument):
"""Measures the resistance of the dmm"""
return float(instrument.query('MEAS:RESistance?'))
def meas_pressure(instrument):
"""Measures dmm voltage and then converts it into pressure(bars)"""
return volt_to_pressure(meas_voltage(instrument))
def volt_to_pressure(volt):
"""Turns voltage measured into pressure in bars"""
return volt/10
def get_meas_time_current(instrument):
"""Queries for the measurement time"""
return float(instrument.query('SENSE:CURR:DC:APER?') )
def get_meas_time_voltage(instrument):
"""Queries for the measurement time"""
return float(instrument.query('SENSE:VOLT:DC:APER?'))
def get_meas_time_resistance(instrument):
"""Queries for the measurement time"""
return float(instrument.query('SENSE:RESISTANCE:APER?'))
|
990,244 | 9685ab273ff045d83cdcf18fc257aff4008f3419 | import pandas as pd
from matplotlib import pyplot as plt
data = pd.read_csv('matplotlib5.csv')
ages = data['Age']
dev_salaries = data['All_Devs']
py_salaries = data['Python']
js_salaries = data['JavaScript']
# creating two regular line graphs
plt.plot(ages, dev_salaries, color='#444444',
linestyle='--', label='All Devs')
plt.plot(ages, py_salaries, label='Python')
# use .fill_between to fill between a line and another value.
# .fill_between(x values, y values, another set of y values) to fill in between the two y values.
# can use where = (condition) argument to specify how to fill. Ex. color = 'something', alpha = 'something', label = 'something'
plt.fill_between(ages, py_salaries, dev_salaries,
where= (py_salaries > dev_salaries), interpolate = True, alpha = 0.25, label = 'aboveavg')
plt.fill_between(ages, py_salaries, dev_salaries,
where= (py_salaries < dev_salaries), interpolate = True, color = 'red', alpha = 0.5, label = 'belowavg')
plt.legend()
plt.title('Median Salary (USD) by Age')
plt.xlabel('Ages')
plt.ylabel('Median Salary (USD)')
plt.tight_layout()
plt.show()
|
990,245 | 4ec01fc81ae581310f432db1f41e14c4922300ff | # read the three newest input files
# grab the uptime duration and compare it to the last known value stored in separate file
# if time increased, up
# else down
import sys
infile = sys.argv[1]
reffile = sys.argv[2]
strg1Read = open(infile,"r")
strg1Ref = open(reffile,"r")
refTimeStr = ""
for line in strg1Ref:
if line != "\n":
refTimeStr = line
for line in strg1Read:
if line != "\n":
data1=line.split("load average: ")
#data=line.split(", ")
data2=data1[0].split(", ")
data3=data1[1].split(", ")
group = "Storage"
name = infile.split(".")[0].split("/")[6]
if name == "newest1":
name = "strg001"
elif name == "newest2":
name = "strg002"
elif name == "newest3":
name = "strg003"
elif name == "newest4":
name = "strg004"
uptimeRaw = data2[0].strip()
uptime = data2[0].split("up")[1].strip()
if len(data2)==4: # this has been up more than one day
uptime += " " + data2[1].strip()
users = data2[2].split("user")[0].strip()
else:
users = data2[1].split("user")[0].strip()
#users = data[1].split("user")[0].strip()
#users = data2[2].split("user")[0].strip()
#load = data[2].split(",")
#load = data[3].split(",")
#load01 = load[0].split(":")[1].strip()
#load05 = load[1].strip()
#load15 = load[2].strip()
load01 = data3[0].strip()
load05 = data3[1].strip()
load15 = data3[2].strip()
# strg1Ref.write(uptime + "\n")
if uptimeRaw == refTimeStr:
status = "Unknown"
else:
status = "Up"
strg1Ref.close()
strg1Ref = open(reffile,"w")
strg1Ref.write(uptimeRaw)
print group + "#" + name + "#" + status + "#" + uptime + "#" + users + "#" + load01 + "#" + load05 + "#" + load15
strg1Read.close()
strg1Ref.close()
|
990,246 | ef5cd9bf188dc207c35f4cb9cefcca8f0353b150 | import os
from google.cloud import datastore
from google.cloud import storage
# os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = r'./etc/dbauthen.json'
# os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = os.path.join(
# os.path.dirname(os.path.realpath(__file__)), 'etc', 'dbauthen.json')
datastore_client = datastore.Client()
storage_client = storage.Client()
# Datastore kind/table name
kind = 'Prayer'
# storage bucket
bucket_name = "namo-chatbot"
bucket = storage_client.get_bucket(bucket_name)
def get_prayer_image_url(prayer_name):
query = datastore_client.query(kind='Prayer')
query.add_filter('name', '=', prayer_name)
results = list(query.fetch())
if len(results) == 0:
return []
image_urls = []
if type(results[0]["image_object"]) == list:
for image_object in results[0]["image_object"]:
blob = bucket.get_blob("prayers/"+image_object)
if blob is None:
continue
image_urls.append(blob.public_url)
elif type(results[0]["image_object"]) == str:
blob = bucket.get_blob("prayers/"+results[0]["image_object"])
if blob is not None:
image_urls.append(blob.public_url)
return image_urls
def get_all_prayer():
query = datastore_client.query(kind='Prayer')
results = list(query.fetch())
return results
def get_prayers_by_tag(tag):
query = datastore_client.query(kind='Prayer')
query.add_filter('tags', '=', tag)
results = list(query.fetch())
# prayers = []
# for res in results:
# blob = bucket.get_blob("prayers/"+res["image_object"])
# prayers.append({"name": res["name"], "image_url": blob.public_url})
return results
def get_teaching_image_url(id):
key = datastore_client.key('Teaching', id)
result = datastore_client.get(key)
print(result["image_object"])
if result is None:
return []
image_urls = []
if type(result["image_object"]) == list:
for image_object in result["image_object"]:
blob = bucket.get_blob("teachings/"+image_object)
if blob is None:
continue
image_urls.append(blob.public_url)
elif type(result["image_object"]) == str:
blob = bucket.get_blob("teachings/"+result["image_object"])
if blob is not None:
image_urls.append(blob.public_url)
return image_urls
def get_all_teaching():
query = datastore_client.query(kind='Teaching')
results = list(query.fetch())
return results
def get_teaching_by_tag(tag):
query = datastore_client.query(kind='Teaching')
query.add_filter('tags', '=', tag)
results = list(query.fetch())
return results
# print(get_prayers_by_tag("สุข"))
# print(get_prayer_image_url("ทดสอบ"))
# print(get_prayer_image_url("อิติปิโส"))
# print(get_all_prayer()[0]['name'])
# print(get_teaching_image_url(5643280054222848))
# print(get_all_teaching())
# print(get_teaching_image_url(get_all_teaching()[0].id))
# print(get_teaching_by_tag("ปัญญา"))
# print(get_teaching_by_tag("กรรม"))
|
990,247 | f4305adb064094df8849c6f08db0e3b9d19055d1 | # Released under the MIT License. See LICENSE for details.
#
# This file was automatically generated from "the_pad.ma"
# pylint: disable=all
points = {}
# noinspection PyDictCreation
boxes = {}
boxes['area_of_interest_bounds'] = (
(0.3544110667, 4.493562578, -2.518391331)
+ (0.0, 0.0, 0.0)
+ (16.64754831, 8.06138989, 18.5029888)
)
points['ffa_spawn1'] = (-3.812275836, 4.380655495, -8.962074979) + (
2.371946621,
1.0,
0.8737798622,
)
points['ffa_spawn2'] = (4.472503025, 4.406820459, -9.007239732) + (
2.708525168,
1.0,
0.8737798622,
)
points['ffa_spawn3'] = (6.972673935, 4.380775486, -7.424407061) + (
0.4850648533,
1.0,
1.597018665,
)
points['ffa_spawn4'] = (-6.36978974, 4.380775486, -7.424407061) + (
0.4850648533,
1.0,
1.597018665,
)
points['flag1'] = (-7.026110145, 4.308759233, -6.302807727)
points['flag2'] = (7.632557137, 4.366002373, -6.287969342)
points['flag_default'] = (0.4611826686, 4.382076338, 3.680881802)
boxes['map_bounds'] = (
(0.2608783669, 4.899663734, -3.543675157)
+ (0.0, 0.0, 0.0)
+ (29.23565494, 14.19991443, 29.92689344)
)
points['powerup_spawn1'] = (-4.166594349, 5.281834349, -6.427493781)
points['powerup_spawn2'] = (4.426873526, 5.342460464, -6.329745237)
points['powerup_spawn3'] = (-4.201686731, 5.123385835, 0.4400721376)
points['powerup_spawn4'] = (4.758924722, 5.123385835, 0.3494054559)
points['shadow_lower_bottom'] = (-0.2912522507, 2.020798381, 5.341226521)
points['shadow_lower_top'] = (-0.2912522507, 3.206066063, 5.341226521)
points['shadow_upper_bottom'] = (-0.2912522507, 6.062361813, 5.341226521)
points['shadow_upper_top'] = (-0.2912522507, 9.827201965, 5.341226521)
points['spawn1'] = (-3.902942148, 4.380655495, -8.962074979) + (
1.66339533,
1.0,
0.8737798622,
)
points['spawn2'] = (4.775040345, 4.406820459, -9.007239732) + (
1.66339533,
1.0,
0.8737798622,
)
points['tnt1'] = (0.4599593402, 4.044276501, -6.573537395)
|
990,248 | 3118c95b21d8ae6d6939ff37b3a4429c1303e02f | # -*- coding: utf-8 -*-
import scrapy
from scrapy import FormRequest,Request
from ..items import NewscrawlItem
from configparser import ConfigParser
import os
class avcjSpider(scrapy.Spider):
name = "avcj"
allowed_domains = ["www.avcj.com"]
start_urls = ['https://www.avcj.com/type/news']
login_url = 'https://www.avcj.com/userlogin'
def getContent(self, response):
spiderItem = NewscrawlItem()
spiderItem['site'] = "avcj : News"
spiderItem['headlines'] = response.xpath('.//header[@class="article-header"]').xpath('.//h1[@class="article-title"]/text()').extract_first()
spiderItem['dates'] = response.xpath('.//li[@class="author-dateline-time"]/time/text()').extract_first()
spiderItem['links'] = response.request.url
li = response.css('.article-page-body-content').xpath('.//p/text()').extract()
spiderItem['content'] = ' '.join(li)
return spiderItem
def parse(self, response):
work_dir = os.path.dirname(os.path.abspath(__file__))
filepath = os.path.join(work_dir,'spiders.cfg')
print(filepath)
config_raw = ConfigParser()
config_raw.read(filepath)
pageNumberToScrapyForOldPost = int(config_raw.get('avcj', 'pageNumberToScrapy').strip())
newsURLlist = response.xpath('.//div[@class="list-view"]/article/div[@class="image-text-group-b"]/h5/a/@href').extract()
for url in newsURLlist:
request = response.follow(url, callback = self.getContent)
yield request
otherPageURLTemplate = 'https://www.avcj.com/type/news/page/'
for pagenumber in range(2,pageNumberToScrapyForOldPost+2):
otherPageURL = otherPageURLTemplate + str(pagenumber)
request = response.follow(otherPageURL, callback = self.parseRecursion)
yield request
def parseRecursion(self, response):
newsURLlist = response.xpath('.//div[@class="list-view"]/article/div[@class="image-text-group-b"]/h5/a/@href').extract()
for url in newsURLlist:
request = response.follow(url, callback = self.getContent)
yield request
def start_requests(self):
yield scrapy.Request(self.login_url,callback=self.login)
def login(self,response):
work_dir = os.path.dirname(os.path.abspath(__file__))
filepath = os.path.join(work_dir,'spiders.cfg')
print(filepath)
config_raw = ConfigParser()
config_raw.read(filepath)
username = config_raw.get('avcj', 'username').strip()
password = config_raw.get('avcj', 'password').strip()
unicornHeader = {
'X-Requested-With': 'XMLHttpRequest',
'Referer': 'https://www.avcj.com/userlogin',
}
formdata = {'redirect_url':'https://www.avcj.com/','subscriber_with_institution_loggedin_as_individual':'true','subscriber[email_id]':username,'subscriber[password]':password,'myTime':'Yes'}
yield FormRequest(url = 'https://www.avcj.com/home/verify_subscription_login',headers = unicornHeader,formdata=formdata,callback=self.parse_login)
def parse_login(self,response):
yield from super().start_requests()
|
990,249 | 030f7286275839982fae4f4e5a0f2bcba857046f |
def variable_valid(var):
var = var.lower()
if not 'a' <= var[0] <= 'z':
return False
return all([c.isalnum() or c == '_' for c in var])
|
990,250 | e1cc5815d1a61d2b15869fc74e5220f3523e7f27 | # -*- coding:utf-8 -*-
# 创建时间:2019-03-25 11:47:32
# 创建人: Dekiven
import os
from TkToolsD.CommonWidgets import *
from TkToolsD.ConfigEditor import ConfigEditor
tk, ttk = getTk()
# help(tk.Event)
class ConfigEditWindow(tk.Toplevel) :
'''ConfigEditWindow
'''
def __init__(self, *args, **dArgs) :
tk.Toplevel.__init__(self, *args, **dArgs)
notebook = ttk.Notebook(self)
notebook.pack(expand=tk.YES, fill=tk.BOTH)
notebook.bind('<<NotebookTabChanged>>', self.__onChangeTab)
self.notebook = notebook
self.frames = []
self.data = {}
self.platforms = []
self.curIdx = 0
self.callback = None
self.geometry('600x400')
def setData(self, data, platforms) :
self.data = data
for name in platforms :
self.__addFrame(name, data.get(name) or {})
def setCallback(self, callback) :
self.callback = callback
def __addFrame(self, name, conf) :
notebook = self.notebook
frame = ttk.LabelFrame(notebook, text=name, width=400)
cmv = ConfigEditor(frame)
cmv.pack(expand=tk.YES, fill=tk.BOTH)
cmv.setSupportTypes(('int', 'float', 'bool', 'string', 'version'))
cmv.setData(conf)
def __onChange(d) :
self.data[name] = d
if self.callback :
self.callback(name, d)
cmv.setCallback(__onChange)
notebook.add(frame, text=name)
def __onChangeTab(self, event):
notebook = self.notebook
self.curIdx = notebook.index('current')
def __main() :
root = tk.Tk()
c = ConfigEditWindow(root)
c.setData({'a':{
'test1':('string', 'test1str',),
'testDic':('dict', {
'a':('string', 'aStr'),
'b':('string', 'bStr'),
't':('dict', {
'a':('string', 'aStr'),
'b':('string', 'bStr'),
}),
}),
'testList':('list', (('string', 'list1'), ('int', 1)))
}}, ['a', 'b'])
def p(n, d) :
print(n, d)
c.setCallback(lambda n,d: p(n,d))
centerToplevel(c)
root.wait_window()
root.mainloop()
if __name__ == '__main__' :
__main()
|
990,251 | 8a4971b00cdfe2932d801f18003463ff9c1423d0 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os, sys
import time
# add ../../Sources to the PYTHONPATH
sys.path.append(os.path.join("..", "..", "Sources"))
from yocto_api import *
from yocto_power import *
#Idea: Make function to write to file and then have another fuction to
#draw out the graph as it is appended to the file by the sensor data.
def usage():
scriptname = os.path.basename(sys.argv[0])
print("Usage:")
print(scriptname + ' <serial_number>')
print(scriptname + ' <logical_name>')
print(scriptname + ' any ')
sys.exit()
def die(msg):
sys.exit(msg + ' (check USB cable)')
errmsg = YRefParam()
if len(sys.argv) < 2:
usage()
target = sys.argv[1]
# Setup the API to use local USB devices
if YAPI.RegisterHub("usb", errmsg) != YAPI.SUCCESS:
sys.exit("init error" + errmsg.value)
if target == 'any':
# retreive any Power sensor
sensor = YPower.FirstPower()
if sensor is None:
die('No module connected')
else:
sensor = YPower.FindPower(target + '.power')
if not (sensor.isOnline()):
die('device not connected')
#We're adding the loop here for outputting to the file.
counter = 0
while sensor.isOnline():
write_data = open('wattageData.txt','a+')
print("Power : " + "%2.1f" % sensor.get_currentValue() + "W (Ctrl-C to stop)")
write_data.write("%d,%2.1f \n" % (counter, sensor.get_currentValue()))
write_data.close()
YAPI.Sleep(1000)
counter += 1
YAPI.FreeAPI()
|
990,252 | c5245b1333f3e47ed5bb8a1169c18c49eea6dd2d | """
Given a list of non negative integers, arrange them such that they form the largest number.
For example:
Given [3, 30, 34, 5, 9], the largest formed number is 9534330.
Note: The result may be very large, so you need to return a string instead of an integer.
"""
class K:
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return '%d%d' % (self.obj, other.obj) < '%d%d' % (other.obj, self.obj)
class Solution:
# @param A : tuple of integers
# @return a strings
def largestNumber(self, A):
return str(int(''.join([str(s) for s in A])))
|
990,253 | ec36ae2d726c7bdbadad37551ee104c711871b7b | """
Application fixtures to easily create app instances
Examples:
$ python -m pytest
$ coverage run -m pytest
$ coverage report
$ coverage html
"""
import pytest
from starlette.testclient import TestClient
from app import ApplicationFactory
@pytest.fixture
def app():
"""debug application fixture"""
return ApplicationFactory('API', 'Test application').create(debug=True)
@pytest.fixture
def client(app):
"""application client fixture"""
return TestClient(app)
|
990,254 | 4ebd08d3798a0caa82b1572787115276878d5185 | import pickle
import os
import pandas as pd
from morphoclass import MorphoToken
def quartilecounter(dataset):
segment = []
for doc in dataset:
for sent in doc:
segment.append(len([token.form for token in sent if token.category == 'word']))
segment.sort()
if segment:
mean = round(sum(segment) / len(segment), 2)
lower = len(segment) * 0.25
upper = len(segment) * 0.75
middle = len(segment) // 2
if len(segment) % 2 == 0 and len(segment) > 2:
median = (segment[middle] + segment[middle + 1]) / 2
else:
median = segment[middle]
lowerint = int(lower)
upperint = int(upper)
if lowerint != lower and lowerint != len(segment) - 1:
lower_q = (segment[lowerint] + segment[lowerint + 1]) / 2
else:
lower_q = segment[lowerint]
if upperint != upper and upperint != len(segment) - 1:
upper_q = (segment[upperint] + segment[upperint + 1]) / 2
else:
upper_q = segment[upperint]
quartile = upper_q - lower_q
return {'Межквартильный размах': quartile, 'Среднее арифметическое': mean, 'Медиана': median,
'Верхняя квартиль': segment[upperint], 'Нижняя квартиль': segment[lowerint]}
def main():
p = '/home/al/PythonFiles/files/disser/readydata/morpho'
files = os.listdir(p)
results = {}
for f in files:
if os.path.splitext(f)[1] or os.path.isdir(f):
continue
fullp = os.path.join(p, f)
data = pickle.load(open(fullp, 'rb'))[:2000]
results[f] = quartilecounter(data)
df = pd.DataFrame.from_dict(results, orient='index')
df.to_excel('sentlengths.xlsx')
if __name__ == '__main__':
main()
|
990,255 | 7678c108c899d700c10b0fd74849685bae37f0b1 | import sys
from fenics import *
import mshr
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.tri as tri
def solve0(variant):
cur_var = int(variant)
alpha = 1
if(cur_var==1):
u_D = Expression('3 + 2*x[0]*x[0] + 3*x[1]*x[1]',degree=2)
g = Expression('(4*sqrt(x[0]*x[0]+x[1]*x[1])*pow(cos(atan2(x[1],x[0])),2)+6*sqrt(x[0]*x[0] + x[1]*x[1])*pow(sin(atan2(x[1],x[0])),2))',degree=2)
f = Expression('-10 + alpha*(3+2*x[0]*x[0]+3*x[1]*x[1])',degree=2,alpha=alpha)
elif(cur_var==2):
u_D = Expression('(x[0]*x[0]+x[1]*x[1])*pow(cos(atan2(x[1],x[0])),2)',degree=2)
g = Expression('2*sqrt(x[0]*x[0]+x[1]*x[1])*pow(cos(atan2(x[1],x[0])),2)',degree=2)
f = Expression('-(4*pow(cos(atan2(x[1],x[0])),2)-2*cos(2*atan2(x[1],x[0]))) + alpha*(x[0]*x[0]+x[1]*x[1])*pow(cos(atan2(x[1],x[0])),2)',degree=2,alpha=alpha)
elif(cur_var==3):
u_D = Expression('x[0]*x[0]+x[0]*x[1]',degree=2)
g = Expression('2*sqrt(x[0]*x[0]+x[1]*x[1])*pow(cos(atan2(x[1],x[0])),2)+sqrt(x[0]*x[0]+x[1]*x[1])*sin(2*atan2(x[1],x[0]))',degree=2)
f = Expression('-2+alpha*(x[0]*x[0]+x[0]*x[1])',degree=2,alpha=alpha)
else:
return
domain = mshr.Circle(Point(0.,0.),1.0,60)
mesh = mshr.generate_mesh(domain, 60)
V = FunctionSpace(mesh,'P',1)
def boundary1(x,on_boundary):
if on_boundary:
if x[1]>=0:
return True
else:
return False
else:
return False
bc = DirichletBC(V,u_D,boundary1)
u = TrialFunction(V)
v = TestFunction(V)
a = dot(grad(u),grad(v))*dx + alpha*u*v*dx
L = f*v*dx + g*v*ds
u = Function(V)
solve(a == L,u,bc)
#errors
error_L2 = errornorm(u_D, u, 'L2')
vertex_values_u_D = u_D.compute_vertex_values(mesh)
vertex_values_u = u.compute_vertex_values(mesh)
error_C = np.max(np.abs(vertex_values_u - vertex_values_u_D))
print('norm_L2 = ' + str(error_L2))
print('error_C = ' + str(error_C))
#graph 1
n = mesh.num_vertices()
d = mesh.geometry().dim()
mesh_coordinates = mesh.coordinates().reshape((n, d))
triangles = np.asarray([cell.entities(0) for cell in cells(mesh)])
triangulation = tri.Triangulation(mesh_coordinates[:, 0], mesh_coordinates[:, 1],triangles)
fig1 = plt.figure(1)
zfaces = np.asarray([u(cell.midpoint()) for cell in cells(mesh)])
plt.tripcolor(triangulation, facecolors=zfaces, edgecolors='k')
plt.savefig(str('u'+variant+'.png'))
#graph 2
fig2 = plt.figure(2)
zfaces2 = np.asarray([u_D(cell.midpoint()) for cell in cells(mesh)])
plt.tripcolor(triangulation, facecolors=zfaces2, edgecolors='k')
plt.savefig(str('u_D'+variant+'.png'))
#difference
fig3 = plt.figure(3)
zfaces3 = abs(zfaces-zfaces2)
plt.tripcolor(triangulation, facecolors=zfaces3, edgecolors='k')
plt.colorbar()
plt.savefig(str('difference'+variant+'.png'))
if __name__ == '__main__':
solve0(sys.argv[1])
|
990,256 | da90085e2d934ecf704ea275e18be0c8713be308 | # -*- coding: utf-8 -*-
from flask import Flask, request, session, g, redirect, url_for, render_template
from contextlib import closing
import sqlite3
# configuration
DATABASE = '/tmp/flaskr.db'
SECRET_KEY = "excuses_key"
app = Flask(__name__)
app.config.from_object(__name__)
class Excuse(object):
up_text = u"Норм"
down_text = u"Не катит"
def __init__(self, id, title, vote_up, vote_down):
self.id = id
self.title = title
self.vote_up = vote_up
self.vote_down = vote_down
def do_vote_up(self):
self.vote_up += 1
g.db.execute("update excuses set upvotes=upvotes+1 where id = {excuse_id}".format(excuse_id=self.id))
g.db.commit()
return self.vote_up
def do_vote_down(self):
self.vote_down += 1
g.db.execute("update excuses set downvotes=downvotes+1 where id = {excuse_id}".format(excuse_id=self.id))
g.db.commit()
return self.vote_down
def update(self):
cur = g.db.execute('select id, title, upvotes, downvotes from excuses order by random() limit 1')
self.id, self.title, self.vote_up, self.vote_down = cur.fetchone()
excuse = Excuse(None, None, None, None)
@app.route("/", methods=['POST', 'GET'])
def index():
if request.args.get("up_vote", ""):
excuse.do_vote_up()
return render_template("message.html", excuse=excuse)
if request.args.get("down_vote", ""):
excuse.do_vote_down()
return render_template("message.html", excuse=excuse)
excuse.update()
return render_template("message.html", excuse=excuse)
def connect_db():
return sqlite3.connect(app.config['DATABASE'])
def init_db():
with closing(connect_db()) as db:
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
@app.before_request
def before_request():
g.db = connect_db()
@app.teardown_request
def teardown_request(exception):
db = getattr(g, 'db', None)
if db is not None:
db.close()
if __name__ == '__main__':
app.run()
|
990,257 | 158d4bd0e35b3da867b68770914f2906e6418826 | import os
import sys
import boto3
import time
NAMESPACE = os.getenv('NAMESPACE', 'default')
class Metrics:
def init_metrics(self, app_name, component=""):
self.metrics = {}
self.app_name = app_name
self.dimensions = [{
'Name': 'namespace',
'Value': NAMESPACE
}]
if component:
self.dimensions.append({
'Name': 'component',
'Value': component
})
def record_time(self, name, start_time):
timing = time.time() - start_time
if name not in self.metrics:
self.metrics[name] = {
'MetricName': name,
'Dimensions': self.dimensions,
'Timestamp': time.time(),
'StatisticValues': {
"SampleCount": 0,
"Sum": 0,
"Minimum": timing * 1000,
"Maximum": timing * 1000
},
'Unit': 'Milliseconds',
'StorageResolution': 60
}
self.metrics[name]["StatisticValues"]["SampleCount"] += 1
self.metrics[name]["StatisticValues"]["Sum"] += (timing * 1000)
if (timing * 1000) < self.metrics[name]["StatisticValues"]["Minimum"]:
self.metrics[name]["StatisticValues"]["Minimum"] = timing * 1000
if (timing * 1000) > self.metrics[name]["StatisticValues"]["Maximum"]:
self.metrics[name]["StatisticValues"]["Maximum"] = timing * 1000
def record_scalar(self, name, scalar=1):
if name not in self.metrics:
self.metrics[name] = {
'MetricName': name,
'Dimensions': self.dimensions,
'Timestamp': time.time(),
'StatisticValues': {
"SampleCount": 0,
"Sum": 0,
"Minimum": scalar,
"Maximum": scalar
},
'Unit': 'Count',
'StorageResolution': 60
}
self.metrics[name]["StatisticValues"]["SampleCount"] += 1
self.metrics[name]["StatisticValues"]["Sum"] += scalar
if scalar < self.metrics[name]["StatisticValues"]["Minimum"]:
self.metrics[name]["StatisticValues"]["Minimum"] = scalar
if scalar > self.metrics[name]["StatisticValues"]["Maximum"]:
self.metrics[name]["StatisticValues"]["Maximum"] = scalar
def flush(self):
if self.metrics:
client = boto3.client('cloudwatch')
client.put_metric_data(
Namespace=self.app_name,
MetricData=self.metrics.values()
)
self.metrics = {}
# http://stackoverflow.com/questions/12778158/why-do-imports-not-work-when-creating-custom-module-classes
_ref, sys.modules[__name__] = sys.modules[__name__], Metrics()
|
990,258 | 8be4516cfab835a6aff3b1c6faf132fcd7d5e557 | from boardgamegeek import BGGClient
from django import forms
from django import forms
class SearchForm(forms.Form):
query = forms.CharField(label="query")
|
990,259 | 3d7c48e749de0b328af2d425aadf69f8a266081a | def content():
user_dict = {"Profile": [["Name", "#"], ["Date of Birth", "#"], ["Blood Group", "#"],
["Age", "#"], ["Update Details", "/update/"]],
"Home": ["Donate", "Your Contributions", "People"],
"Contact": ["sritejakittu777@gmail.com", "chinnicharan14@gmail.com"]}
return user_dict
def bloodgroups():
blood_group_list = ["A+", "A-", "B+", "B-", "AB+", "AB-", "A1+", "A1-", "A2+",
"A2-", "A1B+", "A1B-", "A2B+", "A2B-", "O+", "O-"]
bg_list = [('', '---Select blood group---')]
j = 1
for i in blood_group_list:
bg_list.append((i, i))
j += 1
return bg_list
def short_bd_list():
return ['A+', 'A-', 'B-', 'B+', 'AB+', 'AB-', 'O+', 'O-']
def get_states_list():
return [
'Assam',
'Andhra Pradesh',
'Odisha',
'Punjab',
'Delhi',
'Gujarat',
'Karnataka',
'Haryana',
'Rajasthan',
'Himachal Pradesh',
'Uttarakhand',
'Jharkhand',
'Chhattisgarh',
'Kerala',
'Tamil Nadu',
'Madhya Pradesh',
'West Bengal',
'Bihar',
'Maharashtra',
'Uttar Pradesh',
'Chandigarh',
'Telangana',
'Jammu and Kashmir',
'Tripura',
'Meghalaya',
'Goa',
'Arunachal Pradesh',
'Manipur',
'Mizoram',
'Sikkim',
'Puducherry',
'Nagaland',
'Andaman and Nicobar Islands',
'Dadra and Nagar Haveli',
'Daman and Diu',
'Lakshadweep'
]
def state_list():
states_list = [
'Andaman and Nicobar Islands',
'Andhra Pradesh',
'Arunachal Pradesh',
'Assam',
'Bihar',
'Chandigarh',
'Chhattisgarh',
'Dadra and Nagar Haveli',
'Daman and Diu',
'Delhi',
'Goa',
'Gujarat',
'Haryana',
'Himachal Pradesh',
'Jammu and Kashmir',
'Jharkhand',
'Karnataka',
'Kerala',
'Lakshadweep',
'Madhya Pradesh',
'Maharashtra',
'Manipur',
'Meghalaya',
'Mizoram',
'Nagaland',
'Odisha',
'Puducherry',
'Punjab',
'Rajasthan',
'Sikkim',
'Tamil Nadu',
'Telangana',
'Tripura',
'Uttar Pradesh',
'Uttarakhand',
'West Bengal'
]
states_tuple_list = []
for i in states_list:
states_tuple_list.append((i, i))
states_tuple_list.insert(0, ('', 'Please select your current resident state'))
return states_tuple_list
def gender_list():
return [('', '---Please Select Gender---'), ('Male', 'Male'), ('Female', 'Female'),
('Other', 'Other'), ('Not known', 'I am not interested to reveal my gender')]
def cities_list():
cities_dict = {
'Andaman and Nicobar Islands': ['Port Blair'],
'Andhra Pradesh': ['Visakhapatnam', 'Vijayawada', 'Guntur', 'Nellore', 'Kurnool', 'Rajahmundry', 'Kakinada',
'Tirupati', 'Anantapur', 'Kadapa', 'Vizianagaram', 'Eluru', 'Ongole', 'Nandyal',
'Machilipatnam', 'Adoni', 'Tenali', 'Chittoor', 'Hindupur', 'Proddatur', 'Bhimavaram',
'Madanapalle', 'Guntakal', 'Dharmavaram', 'Gudivada', 'Srikakulam', 'Narasaraopet',
'Rajampet', 'Tadpatri', 'Tadepalligudem', 'Chilakaluripet', 'Yemmiganur', 'Kadiri',
'Chirala', 'Anakapalle', 'Kavali', 'Palacole', 'Sullurpeta', 'Tanuku', 'Rayachoti',
'Srikalahasti', 'Bapatla', 'Naidupet', 'Nagari', 'Gudur', 'Vinukonda', 'Narasapuram',
'Nuzvid', 'Markapur', 'Ponnur', 'Kandukur', 'Bobbili', 'Rayadurg', 'Samalkot', 'Jaggaiahpet',
'Tuni', 'Amalapuram', 'Bheemunipatnam', 'Venkatagiri', 'Sattenapalle', 'Pithapuram',
'Palasa Kasibugga', 'Parvathipuram', 'Macherla', 'Gooty', 'Salur', 'Mandapeta',
'Jammalamadugu', 'Peddapuram', 'Punganur', 'Nidadavole', 'Repalle', 'Ramachandrapuram',
'Kovvur', 'Tiruvuru', 'Uravakonda', 'Narsipatnam', 'Yerraguntla', 'Pedana', 'Puttur',
'Renigunta', 'Rajam', 'Srisailam Project (Right Flank Colony) Township'],
'Arunachal Pradesh': ['Naharlagun', 'Pasighat'],
'Assam': ['Guwahati', 'Silchar', 'Dibrugarh', 'Nagaon', 'Tinsukia', 'Jorhat', 'Bongaigaon City', 'Dhubri',
'Diphu', 'North Lakhimpur', 'Tezpur', 'Karimganj', 'Sibsagar', 'Goalpara', 'Barpeta', 'Lanka',
'Lumding', 'Mankachar', 'Nalbari', 'Rangia', 'Margherita', 'Mangaldoi', 'Silapathar', 'Mariani',
'Marigaon'],
'Bihar': ['Patna', 'Gaya', 'Bhagalpur', 'Muzaffarpur', 'Darbhanga', 'Arrah', 'Begusarai', 'Chhapra', 'Katihar',
'Munger', 'Purnia', 'Saharsa', 'Sasaram', 'Hajipur', 'Dehri-on-Sone', 'Bettiah', 'Motihari', 'Bagaha',
'Siwan', 'Kishanganj', 'Jamalpur', 'Buxar', 'Jehanabad', 'Aurangabad', 'Lakhisarai', 'Nawada',
'Jamui', 'Sitamarhi', 'Araria', 'Gopalganj', 'Madhubani', 'Masaurhi', 'Samastipur', 'Mokameh',
'Supaul', 'Dumraon', 'Arwal', 'Forbesganj', 'BhabUrban Agglomeration', 'Narkatiaganj', 'Naugachhia',
'Madhepura', 'Sheikhpura', 'Sultanganj', 'Raxaul Bazar', 'Ramnagar', 'Mahnar Bazar', 'Warisaliganj',
'Revelganj', 'Rajgir', 'Sonepur', 'Sherghati', 'Sugauli', 'Makhdumpur', 'Maner', 'Rosera', 'Nokha',
'Piro', 'Rafiganj', 'Marhaura', 'Mirganj', 'Lalganj', 'Murliganj', 'Motipur', 'Manihari', 'Sheohar',
'Maharajganj', 'Silao', 'Barh', 'Asarganj'],
'Chandigarh': ['Chandigarh'],
'Chhattisgarh': ['Raipur', 'Bhilai Nagar', 'Korba', 'Bilaspur', 'Durg', 'Rajnandgaon', 'Jagdalpur', 'Raigarh',
'Ambikapur', 'Mahasamund', 'Dhamtari', 'Chirmiri', 'Bhatapara', 'Dalli-Rajhara',
'Naila Janjgir', 'Tilda Newra', 'Mungeli', 'Manendragarh', 'Sakti'],
'Dadra and Nagar Haveli': ['Silvassa'],
'Daman and Diu': ['Daman', 'Diu'],
'Delhi': ['Delhi', 'New Delhi'],
'Goa': ['Marmagao', 'Panaji', 'Margao', 'Mapusa'],
'Gujarat': ['Ahmedabad', 'Surat', 'Vadodara', 'Rajkot', 'Bhavnagar', 'Jamnagar', 'Nadiad', 'Porbandar', 'Anand',
'Morvi', 'Mahesana', 'Bharuch', 'Vapi', 'Navsari', 'Veraval', 'Bhuj', 'Godhra', 'Palanpur',
'Valsad', 'Patan', 'Deesa', 'Amreli', 'Anjar', 'Dhoraji', 'Khambhat', 'Mahuva', 'Keshod', 'Wadhwan',
'Ankleshwar', 'Savarkundla', 'Kadi', 'Visnagar', 'Upleta', 'Una', 'Sidhpur', 'Unjha', 'Mangrol',
'Viramgam', 'Modasa', 'Palitana', 'Petlad', 'Kapadvanj', 'Sihor', 'Wankaner', 'Limbdi', 'Mandvi',
'Thangadh', 'Vyara', 'Padra', 'Lunawada', 'Rajpipla', 'Vapi', 'Umreth', 'Sanand', 'Rajula',
'Radhanpur', 'Mahemdabad', 'Ranavav', 'Tharad', 'Mansa', 'Umbergaon', 'Talaja', 'Vadnagar',
'Manavadar', 'Salaya', 'Vijapur', 'Pardi', 'Rapar', 'Songadh', 'Lathi', 'Adalaj', 'Chhapra'],
'Haryana': ['Faridabad', 'Gurgaon', 'Hisar', 'Rohtak', 'Panipat', 'Karnal', 'Sonipat', 'Yamunanagar',
'Panchkula', 'Bhiwani', 'Bahadurgarh', 'Jind', 'Sirsa', 'Thanesar', 'Kaithal', 'Palwal', 'Rewari',
'Hansi', 'Narnaul', 'Fatehabad', 'Gohana', 'Tohana', 'Narwana', 'Mandi Dabwali', 'Charkhi Dadri',
'Shahbad', 'Pehowa', 'Samalkha', 'Pinjore', 'Ladwa', 'Sohna', 'Safidon', 'Taraori', 'Mahendragarh',
'Ratia', 'Rania', 'Sarsod'],
'Himachal Pradesh': ['Shimla', 'Mandi', 'Solan', 'Nahan', 'Sundarnagar', 'Palampur'],
'Jammu and Kashmir': ['Srinagar', 'Jammu', 'Baramula', 'Anantnag', 'Sopore', 'KathUrban Agglomeration',
'Rajauri', 'Punch', 'Udhampur'],
'Jharkhand': ['Dhanbad', 'Ranchi', 'Jamshedpur', 'Bokaro Steel City', 'Deoghar', 'Phusro', 'Adityapur',
'Hazaribag', 'Giridih', 'Ramgarh', 'Jhumri Tilaiya', 'Saunda', 'Sahibganj',
'Medininagar (Daltonganj)', 'Chaibasa', 'Chatra', 'Gumia', 'Dumka', 'Madhupur', 'Chirkunda',
'Pakaur', 'Simdega', 'Musabani', 'Mihijam', 'Patratu', 'Lohardaga', 'Tenu dam-cum-Kathhara'],
'Karnataka': ['Bengaluru', 'Hubli-Dharwad', 'Belagavi', 'Mangaluru', 'Davanagere', 'Ballari', 'Tumkur',
'Shivamogga', 'Raayachuru', 'Robertson Pet', 'Kolar', 'Mandya', 'Udupi', 'Chikkamagaluru',
'Karwar', 'Ranebennuru', 'Ranibennur', 'Ramanagaram', 'Gokak', 'Yadgir', 'Rabkavi Banhatti',
'Shahabad', 'Sirsi', 'Sindhnur', 'Tiptur', 'Arsikere', 'Nanjangud', 'Sagara', 'Sira', 'Puttur',
'Athni', 'Mulbagal', 'Surapura', 'Siruguppa', 'Mudhol', 'Sidlaghatta', 'Shahpur',
'Saundatti-Yellamma', 'Wadi', 'Manvi', 'Nelamangala', 'Lakshmeshwar', 'Ramdurg', 'Nargund',
'Tarikere', 'Malavalli', 'Savanur', 'Lingsugur', 'Vijayapura', 'Sankeshwara', 'Madikeri',
'Talikota', 'Sedam', 'Shikaripur', 'Mahalingapura', 'Mudalagi', 'Muddebihal', 'Pavagada', 'Malur',
'Sindhagi', 'Sanduru', 'Afzalpur', 'Maddur', 'Madhugiri', 'Tekkalakote', 'Terdal', 'Mudabidri',
'Magadi', 'Navalgund', 'Shiggaon', 'Shrirangapattana', 'Sindagi', 'Sakaleshapura', 'Srinivaspur',
'Ron', 'Mundargi', 'Sadalagi', 'Piriyapatna', 'Adyar', 'Mysore'],
'Kerala': ['Thiruvananthapuram', 'Kochi', 'Kozhikode', 'Kollam', 'Thrissur', 'Palakkad', 'Alappuzha',
'Malappuram', 'Ponnani', 'Vatakara', 'Kanhangad', 'Taliparamba', 'Koyilandy', 'Neyyattinkara',
'Kayamkulam', 'Nedumangad', 'Kannur', 'Tirur', 'Kottayam', 'Kasaragod', 'Kunnamkulam', 'Ottappalam',
'Thiruvalla', 'Thodupuzha', 'Chalakudy', 'Changanassery', 'Punalur', 'Nilambur', 'Cherthala',
'Perinthalmanna', 'Mattannur', 'Shoranur', 'Varkala', 'Paravoor', 'Pathanamthitta', 'Peringathur',
'Attingal', 'Kodungallur', 'Pappinisseri', 'Chittur-Thathamangalam', 'Muvattupuzha', 'Adoor',
'Mavelikkara', 'Mavoor', 'Perumbavoor', 'Vaikom', 'Palai', 'Panniyannur', 'Guruvayoor',
'Puthuppally', 'Panamattom'],
'Lakshadweep': ['Kavaratti', 'Minicoy'],
'Madhya Pradesh': ['Indore', 'Bhopal', 'Jabalpur', 'Gwalior', 'Ujjain', 'Sagar', 'Ratlam', 'Satna',
'Murwara (Katni)', 'Morena', 'Singrauli', 'Rewa', 'Vidisha', 'Ganjbasoda', 'Shivpuri',
'Mandsaur', 'Neemuch', 'Nagda', 'Itarsi', 'Sarni', 'Sehore', 'Mhow Cantonment', 'Seoni',
'Balaghat', 'Ashok Nagar', 'Tikamgarh', 'Shahdol', 'Pithampur', 'Alirajpur', 'Mandla',
'Sheopur', 'Shajapur', 'Panna', 'Raghogarh-Vijaypur', 'Sendhwa', 'Sidhi', 'Pipariya',
'Shujalpur', 'Sironj', 'Pandhurna', 'Nowgong', 'Mandideep', 'Sihora', 'Raisen', 'Lahar',
'Maihar', 'Sanawad', 'Sabalgarh', 'Umaria', 'Porsa', 'Narsinghgarh', 'Malaj Khand',
'Sarangpur', 'Mundi', 'Nepanagar', 'Pasan', 'Mahidpur', 'Seoni-Malwa', 'Rehli', 'Manawar',
'Rahatgarh', 'Panagar', 'Wara Seoni', 'Tarana', 'Sausar', 'Rajgarh', 'Niwari', 'Mauganj',
'Manasa', 'Nainpur', 'Prithvipur', 'Sohagpur', 'Nowrozabad (Khodargama)', 'Shamgarh',
'Maharajpur', 'Multai', 'Pali', 'Pachore', 'Rau', 'Mhowgaon', 'Vijaypur', 'Narsinghgarh'],
'Maharashtra': ['Mumbai', 'Pune', 'Nagpur', 'Thane', 'Nashik', 'Kalyan-Dombivali', 'Vasai-Virar', 'Solapur',
'Mira-Bhayandar', 'Bhiwandi', 'Amravati', 'Nanded-Waghala', 'Sangli', 'Malegaon', 'Akola',
'Latur', 'Dhule', 'Ahmednagar', 'Ichalkaranji', 'Parbhani', 'Panvel', 'Yavatmal', 'Achalpur',
'Osmanabad', 'Nandurbar', 'Satara', 'Wardha', 'Udgir', 'Aurangabad', 'Amalner', 'Akot',
'Pandharpur', 'Shrirampur', 'Parli', 'Washim', 'Ambejogai', 'Manmad', 'Ratnagiri',
'Uran Islampur', 'Pusad', 'Sangamner', 'Shirpur-Warwade', 'Malkapur', 'Wani', 'Lonavla',
'Talegaon Dabhade', 'Anjangaon', 'Umred', 'Palghar', 'Shegaon', 'Ozar', 'Phaltan', 'Yevla',
'Shahade', 'Vita', 'Umarkhed', 'Warora', 'Pachora', 'Tumsar', 'Manjlegaon', 'Sillod', 'Arvi',
'Nandura', 'Vaijapur', 'Wadgaon Road', 'Sailu', 'Murtijapur', 'Tasgaon', 'Mehkar', 'Yawal',
'Pulgaon', 'Nilanga', 'Wai', 'Umarga', 'Paithan', 'Rahuri', 'Nawapur', 'Tuljapur', 'Morshi',
'Purna', 'Satana', 'Pathri', 'Sinnar', 'Uchgaon', 'Uran', 'Pen', 'Karjat', 'Manwath', 'Partur',
'Sangole', 'Mangrulpir', 'Risod', 'Shirur', 'Savner', 'Sasvad', 'Pandharkaoda', 'Talode',
'Shrigonda', 'Shirdi', 'Raver', 'Mukhed', 'Rajura', 'Vadgaon Kasba', 'Tirora', 'Mahad', 'Lonar',
'Sawantwadi', 'Pathardi', 'Pauni', 'Ramtek', 'Mul', 'Soyagaon', 'Mangalvedhe', 'Narkhed',
'Shendurjana', 'Patur', 'Mhaswad', 'Loha', 'Nandgaon', 'Warud'],
'Manipur': ['Imphal', 'Thoubal', 'Lilong', 'Mayang Imphal'],
'Meghalaya': ['Shillong', 'Tura', 'Nongstoin'],
'Mizoram': ['Aizawl', 'Lunglei', 'Saiha'],
'Nagaland': ['Dimapur', 'Kohima', 'Zunheboto', 'Tuensang', 'Wokha', 'Mokokchung'],
'Odisha': ['Bhubaneswar', 'Cuttack', 'Raurkela', 'Brahmapur', 'Sambalpur', 'Puri', 'Baleshwar Town',
'Baripada Town', 'Bhadrak', 'Balangir', 'Jharsuguda', 'Bargarh', 'Paradip', 'Bhawanipatna',
'Dhenkanal', 'Barbil', 'Kendujhar', 'Sunabeda', 'Rayagada', 'Jatani', 'Byasanagar', 'Kendrapara',
'Rajagangapur', 'Parlakhemundi', 'Talcher', 'Sundargarh', 'Phulabani', 'Pattamundai', 'Titlagarh',
'Nabarangapur', 'Soro', 'Malkangiri', 'Rairangpur', 'Tarbha'],
'Puducherry': ['Pondicherry', 'Karaikal', 'Yanam', 'Mahe'],
'Punjab': ['Ludhiana', 'Patiala', 'Amritsar', 'Jalandhar', 'Bathinda', 'Pathankot', 'Hoshiarpur', 'Batala',
'Moga', 'Malerkotla', 'Khanna', 'Mohali', 'Barnala', 'Firozpur', 'Phagwara', 'Kapurthala',
'Zirakpur', 'Kot Kapura', 'Faridkot', 'Muktsar', 'Rajpura', 'Sangrur', 'Fazilka', 'Gurdaspur',
'Kharar', 'Gobindgarh', 'Mansa', 'Malout', 'Nabha', 'Tarn Taran', 'Jagraon', 'Sunam', 'Dhuri',
'Firozpur Cantt.', 'Sirhind Fatehgarh Sahib', 'Rupnagar', 'Jalandhar Cantt.', 'Samana', 'Nawanshahr',
'Rampura Phul', 'Nangal', 'Nakodar', 'Zira', 'Patti', 'Raikot', 'Longowal', 'Urmar Tanda',
'Morinda, India', 'Phillaur', 'Pattran', 'Qadian', 'Sujanpur', 'Mukerian', 'Talwara'],
'Rajasthan': ['Jaipur', 'Jodhpur', 'Bikaner', 'Udaipur', 'Ajmer', 'Bhilwara', 'Alwar', 'Bharatpur', 'Pali',
'Barmer', 'Sikar', 'Tonk', 'Sadulpur', 'Sawai Madhopur', 'Nagaur', 'Makrana', 'Sujangarh',
'Sardarshahar', 'Ladnu', 'Ratangarh', 'Nokha', 'Nimbahera', 'Suratgarh', 'Rajsamand',
'Lachhmangarh', 'Rajgarh (Churu)', 'Nasirabad', 'Nohar', 'Phalodi', 'Nathdwara', 'Pilani',
'Merta City', 'Sojat', 'Neem-Ka-Thana', 'Sirohi', 'Pratapgarh', 'Rawatbhata', 'Sangaria',
'Lalsot', 'Pilibanga', 'Pipar City', 'Taranagar', 'Vijainagar, Ajmer', 'Sumerpur', 'Sagwara',
'Ramganj Mandi', 'Lakheri', 'Udaipurwati', 'Losal', 'Sri Madhopur', 'Ramngarh', 'Rawatsar',
'Rajakhera', 'Shahpura', 'Shahpura', 'Raisinghnagar', 'Malpura', 'Nadbai', 'Sanchore', 'Nagar',
'Rajgarh (Alwar)', 'Sheoganj', 'Sadri', 'Todaraisingh', 'Todabhim', 'Reengus', 'Rajaldesar',
'Sadulshahar', 'Sambhar', 'Prantij', 'Mount Abu', 'Mangrol', 'Phulera', 'Mandawa', 'Pindwara',
'Mandalgarh', 'Takhatgarh'],
'Sikkim': ['Gangtok', 'Pelling', 'Lachung', 'Lachen', 'Namchi', 'Ravangla'],
'Tamil Nadu': ['Chennai', 'Coimbatore', 'Madurai', 'Tiruchirappalli', 'Salem', 'Tirunelveli', 'Tiruppur',
'Ranipet', 'Nagercoil', 'Thanjavur', 'Vellore', 'Kancheepuram', 'Erode', 'Tiruvannamalai',
'Pollachi', 'Rajapalayam', 'Sivakasi', 'Pudukkottai', 'Neyveli (TS)', 'Nagapattinam',
'Viluppuram', 'Tiruchengode', 'Vaniyambadi', 'Theni Allinagaram', 'Udhagamandalam',
'Aruppukkottai', 'Paramakudi', 'Arakkonam', 'Virudhachalam', 'Srivilliputhur', 'Tindivanam',
'Virudhunagar', 'Karur', 'Valparai', 'Sankarankovil', 'Tenkasi', 'Palani', 'Pattukkottai',
'Tirupathur', 'Ramanathapuram', 'Udumalaipettai', 'Gobichettipalayam', 'Thiruvarur',
'Thiruvallur', 'Panruti', 'Namakkal', 'Thirumangalam', 'Vikramasingapuram', 'Nellikuppam',
'Rasipuram', 'Tiruttani', 'Nandivaram-Guduvancheri', 'Periyakulam', 'Pernampattu', 'Vellakoil',
'Sivaganga', 'Vadalur', 'Rameshwaram', 'Tiruvethipuram', 'Perambalur', 'Usilampatti',
'Vedaranyam', 'Sathyamangalam', 'Puliyankudi', 'Nanjikottai', 'Thuraiyur', 'Sirkali',
'Tiruchendur', 'Periyasemur', 'Sattur', 'Vandavasi', 'Tharamangalam', 'Tirukkoyilur',
'Oddanchatram', 'Palladam', 'Vadakkuvalliyur', 'Tirukalukundram', 'Uthamapalayam', 'Surandai',
'Sankari', 'Shenkottai', 'Vadipatti', 'Sholingur', 'Tirupathur', 'Manachanallur', 'Viswanatham',
'Polur', 'Panagudi', 'Uthiramerur', 'Thiruthuraipoondi', 'Pallapatti', 'Ponneri', 'Lalgudi',
'Natham', 'Unnamalaikadai', 'P.N.Patti', 'Tharangambadi', 'Tittakudi', 'Pacode', "O' Valley",
'Suriyampalayam', 'Sholavandan', 'Thammampatti', 'Namagiripettai', 'Peravurani', 'Parangipettai',
'Pudupattinam', 'Pallikonda', 'Sivagiri', 'Punjaipugalur', 'Padmanabhapuram', 'Thirupuvanam'],
'Telangana': ['Hyderabad', 'Warangal', 'Nizamabad', 'Karimnagar', 'Ramagundam', 'Khammam', 'Mahbubnagar',
'Mancherial', 'Adilabad', 'Suryapet', 'Jagtial', 'Miryalaguda', 'Nirmal', 'Kamareddy',
'Kothagudem', 'Bodhan', 'Palwancha', 'Mandamarri', 'Koratla', 'Sircilla', 'Tandur', 'Siddipet',
'Wanaparthy', 'Kagaznagar', 'Gadwal', 'Sangareddy', 'Bellampalle', 'Bhongir', 'Vikarabad',
'Jangaon', 'Bhadrachalam', 'Bhainsa', 'Farooqnagar', 'Medak', 'Narayanpet', 'Sadasivpet',
'Yellandu', 'Manuguru', 'Kyathampalle', 'Nagarkurnool'],
'Tripura': ['Agartala', 'Udaipur', 'Dharmanagar', 'Pratapgarh', 'Kailasahar', 'Belonia', 'Khowai'],
'Uttar Pradesh': ['Lucknow', 'Kanpur', 'Firozabad', 'Agra', 'Meerut', 'Varanasi', 'Allahabad', 'Amroha',
'Moradabad', 'Aligarh', 'Saharanpur', 'Noida', 'Loni', 'Jhansi', 'Shahjahanpur', 'Rampur',
'Modinagar', 'Hapur', 'Etawah', 'Sambhal', 'Orai', 'Bahraich', 'Unnao', 'Rae Bareli',
'Lakhimpur', 'Sitapur', 'Lalitpur', 'Pilibhit', 'Chandausi', 'Hardoi ', 'Azamgarh', 'Khair',
'Sultanpur', 'Tanda', 'Nagina', 'Shamli', 'Najibabad', 'Shikohabad', 'Sikandrabad',
'Shahabad, Hardoi', 'Pilkhuwa', 'Renukoot', 'Vrindavan', 'Ujhani', 'Laharpur', 'Tilhar',
'Sahaswan', 'Rath', 'Sherkot', 'Kalpi', 'Tundla', 'Sandila', 'Nanpara', 'Sardhana', 'Nehtaur',
'Seohara', 'Padrauna', 'Mathura', 'Thakurdwara', 'Nawabganj', 'Siana', 'Noorpur',
'Sikandra Rao', 'Puranpur', 'Rudauli', 'Thana Bhawan', 'Palia Kalan', 'Zaidpur', 'Nautanwa',
'Zamania', 'Shikarpur, Bulandshahr', 'Naugawan Sadat', 'Fatehpur Sikri', 'Shahabad, Rampur',
'Robertsganj', 'Utraula', 'Sadabad', 'Rasra', 'Lar', 'Lal Gopalganj Nindaura', 'Sirsaganj',
'Pihani', 'Shamsabad, Agra', 'Rudrapur', 'Soron', 'SUrban Agglomerationr', 'Samdhan',
'Sahjanwa', 'Rampur Maniharan', 'Sumerpur', 'Shahganj', 'Tulsipur', 'Tirwaganj',
'PurqUrban Agglomerationzi', 'Shamsabad, Farrukhabad', 'Warhapur', 'Powayan', 'Sandi',
'Achhnera', 'Naraura', 'Nakur', 'Sahaspur', 'Safipur', 'Reoti', 'Sikanderpur', 'Saidpur',
'Sirsi', 'Purwa', 'Parasi', 'Lalganj', 'Phulpur', 'Shishgarh', 'Sahawar', 'Samthar',
'Pukhrayan', 'Obra', 'Niwai'],
'Uttarakhand': ['Dehradun', 'Hardwar', 'Haldwani-cum-Kathgodam', 'Srinagar', 'Kashipur', 'Roorkee', 'Rudrapur',
'Rishikesh', 'Ramnagar', 'Pithoragarh', 'Manglaur', 'Nainital', 'Mussoorie', 'Tehri', 'Pauri',
'Nagla', 'Sitarganj', 'Bageshwar'],
'West Bengal': ['Kolkata', 'Siliguri', 'Asansol', 'Raghunathganj', 'Kharagpur', 'Naihati', 'English Bazar',
'Baharampur', 'Hugli-Chinsurah', 'Raiganj', 'Jalpaiguri', 'Santipur', 'Balurghat', 'Medinipur',
'Habra', 'Ranaghat', 'Bankura', 'Nabadwip', 'Darjiling', 'Purulia', 'Arambagh', 'Tamluk',
'AlipurdUrban Agglomerationr', 'Suri', 'Jhargram', 'Gangarampur', 'Rampurhat', 'Kalimpong',
'Sainthia', 'Taki', 'Murshidabad', 'Memari', 'Paschim Punropara', 'Tarakeswar', 'Sonamukhi',
'PandUrban Agglomeration', 'Mainaguri', 'Malda', 'Panchla', 'Raghunathpur', 'Mathabhanga',
'Monoharpur', 'Srirampore', 'Adra']
}
return cities_dict
def who_can_donate():
receive_dict = {
'O+': ['O+', 'O-'],
'A+': ['A+', 'A-', 'O+','O-'],
'B+': ['B+', 'B-', 'O+', 'O-'],
'AB+': ["A+", "A-", "B+", "B-", "AB+", "AB-", "A1+", "A1-", "A2+",
"A2-", "A1B+", "A1B-", "A2B+", "A2B-", "O+", "O-"],
'O-': ['O-'],
'A-': ['A-', 'O-'],
'B-': ['B-', 'O-'],
'AB-': ['AB-', 'A-', 'B-', 'O-'],
'A1+': ['A1+'],
'A1-': ['A1-'],
'A2+': ['A2+'],
'A2-': ['A2-'],
'A1B+': ['A1B+'],
'A2B+': ['A2B+'],
'A2B-': ['A2B-']
}
return receive_dict |
990,260 | ba85f3bb4b1b80f445ad3d936bc65c2e7a746710 | import numpy as np
from math import comb # Technically cheating by using a non standard library but I am NOT going to code in an nCr function.
file = open('day_10_data.txt')
data = np.empty((0,0), dtype = int)
for line in file:
data = np.append(data, int(line))
data = np.append(data, max(data)+3)
data = np.sort(data)
missing_numbers = np.setdiff1d(np.linspace(1, max(data), max(data), dtype = int), data) # All numbers missing in the sequence between the maximum and 1
threes = len(missing_numbers)/2
ones = len(data)-threes
print(ones*threes)
## Assignment 2
#please clap I spend way too much time on this
#
#GAME PLAN:
# Only place where we can replace numbers is where there are atleast 3 consecutive numbers
# Amount of states the consecutive numbers can be in depends on how many numbers you can maximally remove to keep a still working series. This is 1 number for length 3, 2 for length 4 and 2 for length 5. Higher is not required and I cant be arsed to caculate it as a function of n.
# Then using the max amount of possible numbers removed ( I shall call it N) you can calculate the amount of states it can be in by NCk where k is every integer in [0, N].
# Amount of total states the big data series can be in is calculated by multiplying all the different states its subsets can be in.
# ???
# profit
missing_numbers = np.insert(missing_numbers, 0, -1)
jumps = np.empty((0,0), dtype = int)
for k in range(0, len(missing_numbers)-2):
jumps = np.append(jumps, missing_numbers[k+1]-missing_numbers[k])
def states(n):
total = 0
for k in range(0, min(2, n-2)+1): # completly arbitrary way to find out max amount of possible changes, does not hold up for sequences with more than 5 numbers in it.
total += comb(n-2, k)
return total
n = 1
for element in jumps[jumps>=4]:
n *= states(element-1)
print(n) |
990,261 | 11a0f683deb5835df2f4d3795b3fdc4b4f824848 | import telebot
bot = telebot.TeleBot( "917002472:AAGiOuSM_t0NzDgd3VQYmZecfI7TjYRZiZk" )
class Some_Info():
var_01 = None
gollum = Some_Info()
print(gollum.var_01)
@bot.message_handler(commands=['start', 'help'])
def send_welcome(message):
bot.reply_to( message, "Зря ты сюда зашел" )
@bot.message_handler( content_types=['text'] )
def send_message_1(message):
User_text = str(message.text)
if "Код" in User_text:
gollum.var_01= str(User_text[4:])
answer = "Код " + gollum.var_01
f = open("Start.txt", "w+")
f.write(gollum.var_01)
elif "+" in User_text:
gollum.var_01= str(User_text[2:])
answer = "Операция началась " + gollum.var_01
f = open("Phone.txt", "w+")
f.write(gollum.var_01)
else:
answer = "Я такого еще незнаю"
bot.send_message( message.chat.id, answer)
bot.polling( none_stop=True )
|
990,262 | 133640b0ce38a1f81679f8b3f808aef8be42c5b9 | from core.exceptions import BaseNotExistsException
class OrderDoesNotExists(BaseNotExistsException):
pass
class OrderObjectiveNotExists(BaseNotExistsException):
pass
|
990,263 | 4871bfb5356f88b5833c9e42ce9f73f5f9e76916 | import PyPDF2
import re
import sys
import os
def main(args):
string = ''.join(args)
output = PyPDF2.PdfFileWriter()
pages = 0
# change these to whatever folders you want
slides_location = "put_pdfs_here"
output_location = "search_results"
for file_name in os.listdir(slides_location):
if "pdf" not in file_name: continue
object = PyPDF2.PdfFileReader(slides_location + "/" + file_name)
num_pages = object.getNumPages()
for i in range(0, num_pages):
page_obj = object.getPage(i)
text = page_obj.extractText()
result = re.search(string, text, re.IGNORECASE)
if result:
output.addPage(page_obj)
pages += 1
if pages > 0:
output_filename = output_location + "/" + '_'.join(args) + ".pdf"
with open(output_filename, "wb") as outputStream:
output.write(outputStream)
print("'" + ' '.join(args) + "'" + " found on " + str(pages) + " pages")
print("pdf stored in " + output_location + " as " + "'" + '_'.join(args) + ".pdf'")
else:
print("'" + ' '.join(args) + "' not found in files")
if __name__ == '__main__':
main(sys.argv[1:])
|
990,264 | 5c408ef6fe61c75f68253af195386b3d051a0132 | """
Contains functions for creating a list of values that can be used to feed a generator.
See doc/11-expander for details.
"""
import pytweening
from melodomatic import consts
from melodomatic.util import *
# The master dictionary of known expander functions.
EXPANDERS = {}
# The same keys as in EXPANDERS, but sorted in order of registration.
EXPANDERS_ORDERED = []
def register_expander(name, maker):
""" Register a new expander function in the master dictionary. """
name = name.strip().upper()
EXPANDERS[name] = maker
EXPANDERS_ORDERED.append(name)
def autocomplete_expander_name(n):
""" Look up a partial expander name in the main dictionary and return its normalized form. """
n = n.strip().upper()
for name in EXPANDERS_ORDERED:
if name.startswith(n):
return name
if consts.VERBOSE:
print('ERROR: Bad expander name %s'%n)
return 'LIST'
def expand_list(a):
""" Perform the expansion. Takes a tokenized list of values, including names, parentheses, etc. """
rv, _ = _expand_sublist(a, 0)
return rv
def _expand_sublist(a, i):
cmd = 'LIST'
if a[i][0] == '%':
cmd = autocomplete_expander_name(a[i][1:])
i += 1
buf = []
while i < len(a):
# close an open (we hope!) sublist and pop up
if a[i] == ')':
i += 1
break
# open a new sublist by recursing down
if a[i] == '(':
b, i = _expand_sublist(a, i+1)
buf.extend(b)
# a token of form x*y gets expanded as if it was (%xerox x y)
elif '*' in a[i]:
sa = a[i].split('*')
if not sa[0]: # skip this if there's no left hand side
i += 1
continue
right = 1
try:
right = int(sa[1])
except ValueError:
pass
b = [sa[0],] * right
buf.extend(b)
i += 1
else:
buf.append(a[i])
i += 1
rv = tuple(str(i) for i in EXPANDERS[cmd](buf))
return rv, i
# ######################################################## #
def ex_list(data):
""" Just returns the input list of values. """
return tuple(data)
register_expander('LIST', ex_list)
def _cleanse_range_args(data):
a = b = 0
step = 1
try:
a = int(data[0])
b = int(data[1])
if len(data) > 2:
step = int(data[2])
except ValueError:
pass
if step == 0:
step = 1
if step < 0:
if a < b:
a,b = b,a
elif a > b:
a,b = b,a
return a, b, step
def ex_range(data):
""" Creates a linear series of values. """
a, b, step = _cleanse_range_args(data)
return list(range(a, b+sign(step), step))
register_expander('RANGE', ex_range)
def ex_crange(data):
""" Creates a linear series of values going out from a center value. """
center = minv = maxv = spread = 0
step = 1
try:
center = int(data[0])
spread = int(data[1])
if len(data) > 2:
step = int(data[2])
minv = center - spread/2
maxv = center + spread/2
except ValueError:
pass
if step == 0:
step = 1
if minv > maxv:
minv, maxv = maxv, minv
rv = [center]
v = center - step
while minv <= v <= maxv:
rv.insert(0, v)
v -= step
v = center + step
while minv <= v <= maxv:
rv.append(v)
v += step
return rv
register_expander('CRANGE', ex_crange)
def ex_pingpong(data):
""" Works like `%RANGE`, but adds on a back half that walks back down the range. """
a, b, step = _cleanse_range_args(data)
rv = list(range(a, b+sign(step), step))
if rv:
rv += list(range(rv[-1]-step, a, -step))
return rv
register_expander('PINGPONG', ex_pingpong)
register_expander('PP', ex_pingpong)
def ex_xerox(data):
""" Duplicate a single value multiple times. """
n = 1
try:
n = int(data[0])
except ValueError:
pass
data = data[1:]
rv = []
for _ in range(n):
rv += data
return rv
register_expander('XEROX', ex_xerox)
# ---------------------------
# Easing functions take values in range [0.0-1.0] and return values in the same range.
CURVE_FUNCTIONS = {
'LINEAR' : [ pytweening.linear , pytweening.linear , pytweening.linear ],
'SINE' : [ pytweening.easeInSine , pytweening.easeOutSine , pytweening.easeInOutSine ],
'QUADRATIC' : [ pytweening.easeInQuad , pytweening.easeOutQuad , pytweening.easeInOutQuad ],
'CUBIC' : [ pytweening.easeInCubic , pytweening.easeOutCubic , pytweening.easeInOutCubic ],
'QUARTIC' : [ pytweening.easeInQuart , pytweening.easeOutQuart , pytweening.easeInOutQuart ],
'QUINTIC' : [ pytweening.easeInQuint , pytweening.easeOutQuint , pytweening.easeInOutQuint ],
'EXPONENTIAL': [ pytweening.easeInExpo , pytweening.easeOutExpo , pytweening.easeInOutExpo ],
'CIRCULAR' : [ pytweening.easeInCirc , pytweening.easeOutCirc , pytweening.easeInOutCirc ],
'BOUNCE' : [ pytweening.easeInBounce , pytweening.easeOutBounce , pytweening.easeInOutBounce ],
# These try to stretch out of their bounds, so they don't work too well.
#'ELASTIC' : [ pytweening.easeInElastic , pytweening.easeOutElastic , pytweening.easeInOutElastic ],
#'BACK' : [ pytweening.easeInBack , pytweening.easeOutBack , pytweening.easeInOutBack ],
}
CURVE_FUNCTIONS_ORDERED = [ 'LINEAR', 'SINE', 'QUADRATIC', 'CUBIC', 'QUARTIC', 'QUINTIC', 'EXPONENTIAL', 'CIRCULAR', 'BOUNCE', 'BACK' ]
def autocomplete_curve_function(s):
""" Look up a partial curve name and return its normalized form. """
s = s.strip().upper()
if not s:
return CURVE_FUNCTIONS_ORDERED[0]
for i in CURVE_FUNCTIONS_ORDERED:
if i.startswith(s):
return i
if consts.VERBOSE:
print('ERROR: Bad curve function %s'%s)
return CURVE_FUNCTIONS_ORDERED[0]
def autocomplete_curve_direction(s):
"""
Turn a string into an enumerated direction code.
IN = 0
OUT = 1
INOUT or IO = 2
"""
s = s.strip().upper()
if s in ('IO', 'INOUT'):
return 2
if s[0] == 'I':
return 0
if s[0] == 'O':
return 1
if consts.VERBOSE:
print('ERROR: Bad curve direction %s'%s)
return 0
def ex_curve(data):
""" Redistributes the values in a list on a curve. """
rv = []
try:
ef = autocomplete_curve_function(data[0])
ed = autocomplete_curve_direction(data[1])
period = 2
try:
period = max(int(data[2]), 2)
except ValueError:
pass
data = data[3:]
if not data:
if consts.VERBOSE:
print('ERROR: No data for curve')
return []
f = CURVE_FUNCTIONS[ef][ed]
maxi = len(data)-1
for i in range(period):
v = f(float(i) / float(period-1))
di = int(round(v*float(maxi)))
rv.append(data[di])
except Exception as e:
if consts.VERBOSE:
print('ERROR: Curve failed [%s]'%e)
return rv
register_expander('CURVE', ex_curve)
|
990,265 | ec7fbe5525783794c3b82c036193441e17cbe5c6 | senha = 2002
x = int(input())
while x != senha:
print('Senha Invalida')
x = int(input())
print('Acesso Permitido') |
990,266 | 855df6f8188e44cf891d36218c1ce96b128b7ac6 | import datetime
from flask import url_for
from app import db
class Users(db.Model):
__tablename__ = 'users'
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(120), unique=True)
password = db.Column(db.String(120))
projects = db.relationship('Project', backref='users',
lazy='joined')
def __init__(self, email, password):
self.email = email
self.password = password
def __repr__(self):
return '<Email: {0}>'.format(self.email)
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return unicode(self.id)
class Project(db.Model):
__tablename__ = 'project'
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(120))
api_type = db.Column(db.String(120))
created_on = db.Column(db.DateTime, default=datetime.datetime.utcnow)
created_by = db.Column(db.Integer, db.ForeignKey('users.id'))
authinfo = db.relationship('AuthInfo', backref='project', lazy='dynamic')
twitter_timeline_query = db.relationship('TwitterUserTimelineQuery', backref='project', lazy='dynamic')
twitter_mentions_query = db.relationship('TwitterMentionsTimelineQuery', backref='project', lazy='dynamic')
def __init__(self, name, api_type, created_by):
self.name = name
self.api_type = api_type
self.created_by = created_by
def __repr__(self):
return '<Project Name: {0}>'.format(self.name)
class AuthInfo(db.Model):
__tablename__ = 'authinfo'
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True)
api_name = db.Column(db.String())
oauth_token = db.Column(db.String())
oauth_token_secret = db.Column(db.String())
created_on = db.Column(db.DateTime, default=datetime.datetime.utcnow)
project_name = db.Column(db.Integer, db.ForeignKey('project.id'))
def __init__(self, api_name, oauth_token, oauth_token_secret, project_name):
self.api_name = api_name
self.oauth_token = oauth_token
self.oauth_token_secret = oauth_token_secret
self.project_name = project_name
def __repr__(self):
return '<OAuth Token: {0}>'.format(self.oauth_token)
class TwitterUserTimelineQuery(db.Model):
__tablename__ = 'twitterusertimelinequery'
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True)
auth_id = db.Column(db.Integer)
name = db.Column(db.String())
query_type = db.Column(db.String(), default='twitter user timeline')
include_rts = db.Column(db.String())
created_on = db.Column(db.DateTime, default=datetime.datetime.utcnow)
created_by = db.Column(db.Integer)
enabled = db.Column(db.Boolean, default=True)
last_run = db.Column(db.DateTime, default=None)
project_name = db.Column(db.Integer, db.ForeignKey('project.id'))
def __init__(self, auth_id, name, include_rts, created_by, project_name):
self.auth_id = auth_id
self.name = name
self.include_rts = include_rts
self.created_by = created_by
self.project_name = project_name
def __repr__(self):
return '<Query Name: {0}>'.format(self.name)
class TwitterUserInfoQuery(db.Model):
__tablename__ = 'twitteruserinfoquery'
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True)
auth_id = db.Column(db.Integer)
name = db.Column(db.String())
query_type = db.Column(db.String(), default='twitter user info')
created_on = db.Column(db.DateTime, default=datetime.datetime.utcnow)
created_by = db.Column(db.Integer)
enabled = db.Column(db.Boolean, default=True)
last_run = db.Column(db.DateTime, default=None)
project_name = db.Column(db.Integer, db.ForeignKey('project.id'))
def __init__(self, auth_id, name, created_by, project_name):
self.auth_id = auth_id
self.name = name
self.created_by = created_by
self.project_name = project_name
def __repr__(self):
return '<Query Name {0}>'.format(self.name)
class TwitterMentionsTimelineQuery(db.Model):
__tablename__ = 'twittermentionstimelinequery'
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True)
auth_id = db.Column(db.Integer)
name = db.Column(db.String())
query_type = db.Column(db.String(), default='twitter mentions timeline')
created_on = db.Column(db.DateTime, default=datetime.datetime.utcnow)
created_by = db.Column(db.Integer)
enabled = db.Column(db.Boolean, default=True)
last_run = db.Column(db.DateTime, default=None)
project_name = db.Column(db.Integer, db.ForeignKey('project.id'))
def __init__(self, auth_id, name, created_by, project_name):
self.auth_id = auth_id
self.name = name
self.created_by = created_by
self.project_name = project_name
def __repr__(self):
return '<Query Name {0}>'.format(self.name)
class InstagramUserFeedQuery(db.Model):
__tablename__ = 'instagramuserfeedquery'
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True)
auth_id = db.Column(db.Integer)
name = db.Column(db.String())
query_type = db.Column(db.String(), default='instagram user feed')
created_on = db.Column(db.DateTime, default=datetime.datetime.utcnow)
created_by = db.Column(db.Integer)
enabled = db.Column(db.Boolean, default=True)
last_run = db.Column(db.DateTime, default=None)
project_name = db.Column(db.Integer, db.ForeignKey('project.id'))
def __init__(self, auth_id, name, created_by, project_name):
self.auth_id = auth_id
self.name = name
self.created_by = created_by
self.project_name = project_name
def __repr__(self):
return '<Query Name {0}>'.format(self.name)
class InstagramUserInfoQuery(db.Model):
__tablename__ = 'instagramuserinfoquery'
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True)
auth_id = db.Column(db.Integer)
name = db.Column(db.String())
query_type = db.Column(db.String(), default='instagram user info')
created_on = db.Column(db.DateTime, default=datetime.datetime.utcnow)
created_by = db.Column(db.Integer)
enabled = db.Column(db.Boolean, default=True)
last_run = db.Column(db.DateTime, default=None)
project_name = db.Column(db.Integer, db.ForeignKey('project.id'))
def __init__(self, auth_id, name, created_by, project_name):
self.auth_id = auth_id
self.name = name
self.created_by = created_by
self.project_name = project_name
def __repr__(self):
return '<Query Name {0}>'.format(self.name)
|
990,267 | 9567e6e1ec3c623c6c5af8fbf33f130159e7df74 | #-*- coding:utf-8 -*-
'''
author:niuniuniuniuniu
github:https://github.com/niuniuniuniuniu
'''
import requests
import re
import sys
import time
import json
from bs4 import BeautifulSoup as bsp
def get_token(urllogin,header):
try:res=requests.get(urllogin,header)
except:
print u"获取token失败"
sys.exit()
result=res.text
key=re.compile(r'name\=\"authenticity_token\"\s*value=\"\S*')
match=key.search(result)
if match:
authenticity_token=match.group().strip('name="authenticity_token" value=" ')+'=='
cookie=res.cookies.get('_gh_sess')
return authenticity_token,cookie
def github_login(urlsession,header,authenticity_token,user,passwd,gh_sess):
data={"commit":"Sign in",
"authenticity_token":authenticity_token,
"login":user,
"password":passwd}
cookie={"_gh_sess":gh_sess}
try:res=requests.post(urlsession,data,headers=header,cookies=cookie)
except:
print u"登录失败"
sys.exit()
return res.cookies.get('user_session')
print res.status_code
print res.history
def github_search(url,session,header,keyword,urllist):
for key in keyword:
key=key.replace(' ','+')
url=url+key+"&type=Code"
cookie={'user_session':session}
try:res=requests.get(url,headers=header,cookies=cookie)
except:
print u'搜索关键字失败'
sys.exit()
soup=bsp(res.text,"html.parser")
a=soup.find_all('div',class_="d-inline-block col-10")
for i in a:
c=i.find_all('a')
urllist.append(json.loads(c[1]['data-hydro-click'])['payload']['result']['url'])
return urllist
if __name__=='__main__':
keyword=['Created by *** on','Created by *** on','*** database','*** mysql']
urllist=[]
user="***@qq.com"
passwd="****"
header={"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:56.0) Gecko/20100101 Firefox/56.0",
"Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Language":"zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3",
"Accept-Encoding": "gzip, deflate, br",
"Content-Type": "application/x-www-form-urlencoded",
"Referer":"https://github.com",
"Connection": "keep-alive",
"Upgrade-Insecure-Requests":"1"}
urllogin="https://github.com/login"
urlsession="https://github.com/session"
url="https://github.com/search?q="
authenticity_token,gh_sess=get_token(urllogin,header)
gh_sess=gh_sess.replace('%3D','=')
session=github_login(urlsession,header,authenticity_token,user,passwd,gh_sess)
urllist=github_search(url,session,header,keyword,urllist)
if len(urllist)>0:
ids=list(set(urllist))
for i in ids:
print i
|
990,268 | 59af0d986f21efd4b759b13d1d1f3a384b56de1e | def two_sum(lis):
m = { i:True for i in lis }
for i in lis:
if 2020 - i in m:
return (2020 - i) * i
return -1
def three_sum(lis):
seen = { i:True for i in lis }
for i,m in enumerate(lis):
for j,n in enumerate(lis):
if i >= j:
continue
if 2020 - m - n in seen:
return m * n * (2020 - m - n)
def get_input():
f = open('input.txt')
lines = f.read()
lines = lines.split('\n')
lines = [i for i in lines if i!='']
nums = [int(i.strip()) for i in lines]
return nums
inp = get_input()
print(two_sum(inp))
print(three_sum(inp))
|
990,269 | 18425c4127be56a5198f64c28e7ee0664fa4fb32 | with open("input_1.txt") as f:
a = [int(l) for l in f]
for i in a:
for j in a:
for k in a:
if i + j + k == 2020:
print(i * j * k)
exit() |
990,270 | eb963086fc28c67950a45b081289cb7c8d47ce00 | class Solution:
"""
@param n: An integer
@param m: An integer
@param i: A bit position
@param j: A bit position
@return: An integer
"""
def updateBits(self, n, m, i, j):
# Java
# ((~((((-1) << (31 - j)) >>> (31 - j + i)) << i)) & n) | (m << i)
tmp = ((~((((-1) << (31 - j) & 0xFFFFFFFF) >> (31 - j + i)) << i)) & 0xFFFFFFFF) & n | ((m << i) & 0xFFFFFFFF)
if tmp & (1 << 31):
return tmp ^ ~(0xFFFFFFFF)
return tmp
|
990,271 | 65cb377d3f488033ec00ef46880029642d917e8b | import socket
import time
delay = 3
def fail(msg):
print("TEST FAILED: ",msg)
exit()
de1_address = "192.168.1.123"
de1_port = 41234
receiver = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
receiver.bind(("0.0.0.0",8082));
sender = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
####################################################################
# TEST 1, sending and receing a good single domain
####################################################################
print("TEST 1, sending and receing a good single domain")
sender.sendto(b"facebook.com\n",(de1_address,de1_port))
data, addr = receiver.recvfrom(1024) # buffer size is 1024 bytes
response = data.decode()
print("received message: %s" % response)
if(response == "facebook.com0"):
print("passed")
else:
fail(response)
time.sleep(delay)
####################################################################
# TEST 2, sending and receing a bad single domain
####################################################################
print("TEST 2, sending and receing a bad single domain")
sender.sendto(b"badnet.hack\n",(de1_address,de1_port))
data, addr = receiver.recvfrom(1024) # buffer size is 1024 bytes
response = data.decode()
print("received message: %s" % response)
if(response == "badnet.hack1"):
print("passed")
else:
fail(response)
time.sleep(delay)
####################################################################
# TEST 3, sending 2 good domains at once
####################################################################
print("TEST 3, sending 2 good domains at once")
sender.sendto(b"face.com\n",(de1_address,de1_port))
sender.sendto(b"book.com\n",(de1_address,de1_port))
d1 = True
d2 = True
while d1 or d2:
data, addr = receiver.recvfrom(1024) # buffer size is 1024 bytes
response = data.decode()
print("received message: %s" % response)
if d1 and response == "face.com0":
d1 = False
if d2 and response == "book.com0":
d2 = False
if (response not in ["book.com0","face.com0"]):
fail("Unexpected response: " + response)
time.sleep(delay)
####################################################################
# TEST 4, sending good domain and bad domain at once
####################################################################
print("TEST 4, sending good domain and bad domain at once")
sender.sendto(b"face.com\n",(de1_address,de1_port))
sender.sendto(b"book.hack\n",(de1_address,de1_port))
d1 = True
d2 = True
while d1 or d2:
data, addr = receiver.recvfrom(1024) # buffer size is 1024 bytes
response = data.decode()
print("received message: %s" % response)
if d1 and response == "face.com0":
d1 = False
if d2 and response == "book.hack1":
d2 = False
if (response not in ["book.hack1","face.com0"]):
fail("Unexpected response: " + response)
time.sleep(delay)
####################################################################
# TEST 5, sending 10 domains at once
####################################################################
print("TEST 5, sending 10 domains at once")
domains = ["1.com","2.hack","3.com","4.hack","5.com","6.hack","7.com","8.hack","9.com","10.hack"]
sender.sendto(b"1.com\n",(de1_address,de1_port))
sender.sendto(b"2.hack\n",(de1_address,de1_port))
sender.sendto(b"3.com\n",(de1_address,de1_port))
sender.sendto(b"4.hack\n",(de1_address,de1_port))
sender.sendto(b"5.com\n",(de1_address,de1_port))
sender.sendto(b"6.hack\n",(de1_address,de1_port))
sender.sendto(b"7.com\n",(de1_address,de1_port))
sender.sendto(b"8.hack\n",(de1_address,de1_port))
sender.sendto(b"9.com\n",(de1_address,de1_port))
sender.sendto(b"10.hack\n",(de1_address,de1_port))
count = 0
while count < 10:
data, addr = receiver.recvfrom(1024) # buffer size is 1024 bytes
response = data.decode()
print("received message: %s" % response)
if (response[:-1] in domains):
count += 1
else:
fail("Unexpected: " + response)
time.sleep(delay)
print("ALL TESTS PASSED") |
990,272 | 763ec5a1457236616bc23c620f5884bef397159a | from django.db import models
from Core.models import Location
class Equipment(models.Model):
tableTennisTables = models.IntegerField(default=0)
poolHoist = models.IntegerField(default=0)
bowlingMachine = models.IntegerField(default=0)
trampolines = models.IntegerField(default=0)
parallelBars = models.IntegerField(default=0)
highBars = models.IntegerField(default=0)
stillRings = models.IntegerField(default=0)
unevenBars = models.IntegerField(default=0)
balanceBeam = models.IntegerField(default=0)
vault = models.IntegerField(default=0)
pommelHorse = models.IntegerField(default=0)
class Activity(models.Model):
name = models.CharField(max_length=30)
def __str__(self):
return self.name
class Contacts(models.Model):
contactType = models.CharField(max_length=30)
email = models.EmailField(null=True)
telephone = models.CharField(max_length=60, null=True)
website = models.URLField(null=True, max_length=400)
class Disability(models.Model):
access = models.BooleanField(null=True)
notes = models.CharField(max_length=1000, null=True)
parking = models.BooleanField()
findingReachingEntrance = models.BooleanField()
receptionArea = models.BooleanField()
doorways = models.BooleanField()
changingFacilities = models.BooleanField()
activityAreas = models.BooleanField()
toilets = models.BooleanField()
socialAreas = models.BooleanField()
spectatorAreas = models.BooleanField()
emergencyExits = models.BooleanField()
class ActivePlace(Location):
active_place_id = models.IntegerField(primary_key=True, db_index=True) # AKA id
state = models.CharField(max_length=30)
kind = models.CharField(max_length=30)
outputAreaCode = models.CharField(max_length=30)
lowerSuperOutputArea = models.CharField(max_length=30)
middleSuperOutputArea = models.CharField(max_length=30)
parliamentaryConstituencyCode = models.CharField(max_length=30)
parliamentaryConstituencyName = models.CharField(max_length=60)
wardCode = models.CharField(max_length=30)
wardName = models.CharField(max_length=60)
localAuthorityCode = models.CharField(max_length=30)
localAuthorityName = models.CharField(max_length=60)
buildingName = models.CharField(max_length=100, null=True)
buildingNumber = models.CharField(max_length=20)
hasCarPark = models.BooleanField(default=False)
carParkCapacity = models.IntegerField()
dedicatedFootballFacility = models.BooleanField(default=False)
cyclePark = models.BooleanField(default=False)
cycleHire = models.BooleanField(default=False)
cycleRepairWorkshop = models.BooleanField(default=False)
nursery = models.BooleanField(default=False)
ownerType = models.CharField(max_length=60)
equipment = models.OneToOneField(Equipment, on_delete=models.CASCADE)
disability = models.OneToOneField(Disability, on_delete=models.CASCADE)
contact = models.OneToOneField(Contacts, on_delete=models.CASCADE, null=True)
activities = models.ManyToManyField(Activity)
def __str__(self):
return self.name
class Facility(models.Model):
active_place = models.ForeignKey(ActivePlace, on_delete=models.CASCADE)
facilityType = models.CharField(max_length=30)
yearBuilt = models.IntegerField(null=True)
yearBuiltEstimated = models.BooleanField(default=False)
isRefurbished = models.BooleanField(default=False)
yearRefurbished = models.IntegerField(null=True)
hasChangingRooms = models.BooleanField(null=True)
areChangingRoomsRefurbished = models.BooleanField(null=True)
yearChangingRoomsRefurbished = models.IntegerField(null=True)
# Opening times - implemented
# facilitySpecifics - disability -- not added
seasonalityType = models.CharField(max_length=30)
seasonalityStart = models.CharField(max_length=30)
seasonalityEnd = models.CharField(max_length=30)
class OpeningTimes(models.Model):
facility = models.ForeignKey(Facility, on_delete=models.CASCADE)
accessDescription = models.CharField(max_length=100)
openingTime = models.CharField(max_length=20)
closingTime = models.CharField(max_length=20)
periodOpenFor = models.CharField(max_length=50)
def __str__(self):
return "%s - (%s - %s)" % (self.periodOpenFor, self.openingTime, self.closingTime)
|
990,273 | bfe331e593ad87c0946c532cc747a4c9bcc228e7 | from django.contrib import admin
from models import Page, Revision
admin.site.register(Page)
admin.site.register(Revision)
|
990,274 | e3033f31d54461020e1e466487f46d08b7cc1231 | from django.urls import path
from . import views
app_name = 'telegram_bot'
urlpatterns = [
path('callback/<str:bottoken>/', views.callback, name='callback'),
path('cities/NL/', views.cities, name='citiesNL'),
] |
990,275 | 33acd8a6bf521e0bbce64007c8787c7ed782beda | import sys
from datetime import datetime
log_class = None
class Log:
def __init__(self, file_name):
self.log_file = open(file_name, 'w')
def log(self, message):
msg = str(datetime.now().time()) + ": " + str(message) + '\n'
self.log_file.write(msg)
self.log_file.flush()
sys.stdout.write(msg)
sys.stdout.flush()
def init_log(file_name):
global log_class
log_class = Log(file_name)
def log(message):
global log_class
log_class.log(message)
|
990,276 | 77003941d261023e208d3f96e4da2f7eb7500931 | import webbrowser
import socket
import random
import requests
import sys
from server import Server
import os
HOST = '127.0.0.1' # Standard loopback interface address (localhost)
PORT = 4440 # Port to listen on (non-privileged ports are > 1023)
SERVERS_PORTS = [4441,4442,4443,4444,4445,4446,4447]
SERVERS = []
# class loadBalancer():
# def __init__(self):
# pass
def run():
listen_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listen_socket.bind((HOST, PORT))
listen_socket.listen(10)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#for this simulation I'll assume that the hostname will be localhost for all server
#different configurations can be easly implemented if needed that can allow different hostnames
#per server
#for robin round algo
#SERVERS = [8867,8868, 8869]
#for least_number_of_connections we need a server class with .number_of_connection attribute
#then:
for port in SERVERS_PORTS:
SERVERS.append(Server(port))
#n = -1
while True:
client_connection, client_address = listen_socket.accept()
request = client_connection.recv(1024)
print(request)
#for lnc algo we need to have in the reques an identifier of the
#robin round algo
#n += 1
#server = SERVERS[n % len(SERVERS_PORTS)]
#least number of connection algorithm:
for server in SERVERS:
print('server port', server.port)
server.update_number_of_connections()
print('number of connections', server.number_of_connections)
server = min(SERVERS, key=(lambda x: x.number_of_connections))
# once we have the server:
print("Sending connection to: " + str(server.port))
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = ('0.0.0.0', server.port)
print(server.port, server.number_of_connections)
sock.connect(server_address)
#tm = sock.recv(1024)
# print(tm)
sock.sendall("request from balancer".encode())
sock.close()
client_connection.sendall('response from balancer'.encode())
client_connection.close()
run()
# if __name__ == "__main__":
# loadBalancer = loadBalancer()
# loadBalancer.run() |
990,277 | e2402c758ca3d2c9b85a8509e30275483143844f | #!/usr/bin/env python
# coding=utf-8
# Date: 2018-07-18
"""
https://leetcode.com/problems/prime-number-of-set-bits-in-binary-representation/description/
762. Prime Number of Set Bits in Binary Representation
Given two integers L and R, find the count of numbers in the range [L, R] (inclusive) having a prime number of set bits in their binary representation.
(Recall that the number of set bits an integer has is the number of 1s present when written in binary. For example, 21 written in binary is 10101 which has 3 set bits. Also, 1 is not a prime.)
Example 1:
Input: L = 6, R = 10
Output: 4
Explanation:
6 -> 110 (2 set bits, 2 is prime)
7 -> 111 (3 set bits, 3 is prime)
9 -> 1001 (2 set bits , 2 is prime)
10->1010 (2 set bits , 2 is prime)
Example 2:
Input: L = 10, R = 15
Output: 5
Explanation:
10 -> 1010 (2 set bits, 2 is prime)
11 -> 1011 (3 set bits, 3 is prime)
12 -> 1100 (2 set bits, 2 is prime)
13 -> 1101 (3 set bits, 3 is prime)
14 -> 1110 (3 set bits, 3 is prime)
15 -> 1111 (4 set bits, 4 is not prime)
Note:
L, R will be integers L <= R in the range [1, 10^6].
R - L will be at most 10000.
"""
class Solution(object):
def countPrimeSetBits(self, L, R):
"""
:type L: int
:type R: int
:rtype: int
"""
primes = {2, 3, 5, 7, 11, 13, 17, 19}
return sum(bin(x).count("1") in primes for x in xrange(L, R+1))
solution = Solution()
print solution.countPrimeSetBits(6, 10)
print solution.countPrimeSetBits(10, 15)
"""
https://leetcode.com/problems/prime-number-of-set-bits-in-binary-representation/solution/
Intuition and Approach
For each number from L to R, let's find out how many set bits it has. If that number is 2, 3, 5, 7, 11, 13, 17, or 19, then we add one to our count. We only need primes up to 19 because R ≤ 10^6 < 2^20.
class Solution(object):
def countPrimeSetBits(self, L, R):
primes = {2, 3, 5, 7, 11, 13, 17, 19}
return sum(bin(x).count('1') in primes
for x in xrange(L, R+1))
"""
"""
http://bookshadow.com/weblog/2018/01/14/leetcode-prime-number-of-set-bits-in-binary-representation/
题目大意:
求范围[L, R]的整数中,二进制表示中1的个数为素数的整数个数
解题思路:
埃拉托斯特尼筛法
类似题目:http://bookshadow.com/weblog/2015/04/27/leetcode-count-primes/
class Solution(object):
def __init__(self):
MAXN = 100
self.prime = [1] * (MAXN + 1)
self.prime[0] = self.prime[1] = 0
for x in range(2, MAXN + 1):
if self.prime[x]:
y = x ** 2
while y <= MAXN:
self.prime[y] = 0
y += x
def countPrimeSetBits(self, L, R):
ans = 0
for x in range(L, R + 1):
ans += self.prime[bin(x).count('1')]
return ans
"""
|
990,278 | bb736f62be745d283d35e0a57a51fe263ab2016d | #!/usr/bin/evn python
# coding=utf-8
import unittest
from common import db_helper
from config import db_config
class DbHelperTest(unittest.TestCase):
"""数据库操作包测试类"""
def setUp(self):
"""初始化测试环境"""
print('------ini------')
def tearDown(self):
"""清理测试环境"""
print('------clear------')
def test(self):
# 使用with方法,初始化数据库连接
with db_helper.PgHelper(db_config.DB, db_config.IS_OUTPUT_SQL) as db:
# 设置sql执行语句
sql = """insert into product (name, code) values (%s, %s) returning id"""
# 设置提交参数
vars = ('张三', '201807251234568')
# 生成sql语句,并打印到控制台
print(db.get_sql(sql, vars))
db.execute('select * from product where id=1000')
db.execute('insert into product (name, code) values (%s, %s) returning id', ('张三', '201807251234568'))
db.commit()
if __name__ == '__main__':
unittest.main()
|
990,279 | fd48f7c8be7a5cabf2ed8e0630863279964c9bd9 | a=1
1123
#i went back to change1
#i was changed in dev branch
#edit in master and dev
|
990,280 | a79ace427a105cb6ee07f3c2a923f35ef5c6bac5 | #! /usr/bin/python
from matplotlib import use
use('agg')
import pymultinest
from sbms.engine import engine
from sbms.marginals import marginals
from sbms.io_sbms import read_ini
import json
import optparse
import numpy as np
from jinja2 import Environment, PackageLoader
"""
This is an example script that runs the analysis 3 times:
We inject a broken power law each time and we recover it
using a broken power law recovery, a power law recovery,
and a noise model recovery.
This script is meant to be run on the Caltech computing cluster, where
the example files live in a home directory.
THIS SCRIPT IS A WORKING EXAMPLE.
"""
def parse_command_line():
"""
parse command line
"""
parser = optparse.OptionParser()
parser.add_option("--output-directory", "-o",
help="output prefix. path needs to exist right now.",
default='./',
dest="output_dir", type=str)
params, args = parser.parse_args()
return params
if __name__=="__main__":
params = parse_command_line()
output_pref_comb = params.output_dir + 'comb_'
output_pref_noise = params.output_dir + 'noise_'
injection_file = 'examples/use_real_data.ini'
#example files directory
# edit this to fit your own needs
exdir = '/home/meyers/git_repos/sgwb_model_selection'
seed = 12345
# get evidences
# run things
print 'Running comb recovery'
print 'Results here: %s' % output_pref_comb
engine(injection_file,'examples/recovery_comb.ini',output_pref_comb, noise_seed=seed)
print 'Running noise recovery'
print 'Results here: %s' % output_pref_noise
engine(injection_file,'noise',output_pref_noise, noise_seed=seed)
print 'Done running Multinest'
print 'Collecting output'
marginals(output_pref_comb, read_ini('examples/recovery_comb.ini'))
comb_evidence = pymultinest.Analyzer(n_params = 3, outputfiles_basename = output_pref_comb).get_stats()['global evidence']
noise_evidence = json.load(open(output_pref_noise + 'stats.json'))['global evidence']
print 'Comb evidence'
print comb_evidence
print 'Noise law evidence'
print noise_evidence
print 'log Bayes Factor of comb to noise: '
print (comb_evidence - noise_evidence)
print 'DONE!'
results = {}
results['comb'] = {'evidence' : comb_evidence, 'marg' : output_pref_comb + 'marg.png', 'name':'Comb Law'}
results['noise'] = {'evidence' : noise_evidence, 'marg' : output_pref_noise + 'marg.png', 'name':'Noise'}
results['Injection'] = {'evidence' : noise_evidence, 'marg' : output_pref_noise + 'marg.png', 'name':'Noise'}
env = Environment(loader=PackageLoader('sbms','templates'))
template = env.get_template('summary.html')
f1 = open(params.output_dir + '/summary.html', 'w')
print >>f1, template.render(results=results)
f1.close()
|
990,281 | bbbffc06e5e3ab2b43dd4bb8db39060069ce1cbd | #WAP to accept 2 strings from user and check if 2nd string is right rotation of 1st eg: 1st: manager 2: germana
str1 = input("Enter a 1st sting: ")
str2 = input("Enter a 2nd string: ")
'''
res=str1[len(str1)-3:] + str1[0:str1(len)-3]
res in str2
'''
print str2 in (str1+str1) |
990,282 | 3fd446451fa19610eb3a50b7eb04011f43e78ce0 | names = ['dejan', 'marko', 'cika', 'ivce']
print(names[0])
print(names[1])
print(names[2])
print(names[3]) |
990,283 | 92421343b461f8b3dee61e2e8397fcee86cee911 | from typing import List, Optional
from allennlp.common.util import JsonDict
from allennlp.data import Instance
from allennlp.data.dataset_readers import DatasetReader
from allennlp.data.tokenizers.spacy_tokenizer import SpacyTokenizer
from allennlp.models import Model
from allennlp.predictors.predictor import Predictor
from overrides import overrides
@Predictor.register("anomaly_detector")
class AnomarlyDetector(Predictor):
def __init__(
self,
model: Model,
dataset_reader: DatasetReader,
frozen: bool = True,
threshold: Optional[float] = None,
) -> None:
super().__init__(model, dataset_reader, frozen)
self._threshold = threshold
def detect_anomaly(self, json_dict: JsonDict) -> JsonDict:
if self._threshold is not None:
json_dict["anomaly"] = json_dict["anomaly_scores"] > self._threshold
return json_dict
def predict_instance(self, instance: Instance) -> JsonDict:
outputs = super().predict_instance(instance)
outputs = self.detect_anomaly(outputs)
return outputs
def predict_batch_instance(self, instances: List[Instance]) -> List[JsonDict]:
outputs = super().predict_batch_instance(instances)
outputs = [self.detect_anomaly(x) for x in outputs]
return outputs
@overrides
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
text = json_dict["text"]
reader_has_tokenizer = (
getattr(self._dataset_reader, "tokenizer", None) is not None
or getattr(self._dataset_reader, "_tokenizer", None) is not None
)
if not reader_has_tokenizer:
tokenizer = SpacyTokenizer()
text = tokenizer.tokenize(text)
return self._dataset_reader.text_to_instance(text)
|
990,284 | cd6124d1eab77349e7fa10ca4ee9b16855ed79cb | import random
from tkinter import *
from tkinter import filedialog as fd
from tkinter import messagebox as mb
from DrawScheme import *
use_els = ["resister", "switch", "lamp", "motor", "button", "bell", "condensator"]
def make_field(sch):
min_x = float("INF")
min_y = float("INF")
max_x = -float("INF")
max_y = -float("INF")
for cell in sch.keys():
if cell[0] < min_x:
min_x = cell[0]
if cell[0] > max_x:
max_x = cell[0]
if cell[1] < min_y:
min_y = cell[1]
if cell[1] > max_y:
max_y = cell[1]
res = [["- 0 0 0 0"] * (max_y - min_y + 1) for _ in range(max_x - min_x + 1)]
for cell in sch.keys():
if sch[cell][0] == -1:
el = '-'
elif sch[cell][0] == -2:
el = '+'
else:
el = els[sch[cell][0]][0]
w1 = ' 1' if sch[cell][1] else ' 0'
w2 = ' 1' if sch[cell][2] else ' 0'
w3 = ' 1' if sch[cell][3] else ' 0'
w4 = ' 1' if sch[cell][4] else ' 0'
res[cell[0] - min_x][cell[1] - min_y] = el + w1 + w2 + w3 + w4
return res
def merge_schemes(sch1, sch2):
field1 = make_field(sch1)
field2 = make_field(sch2)
res = []
TAB = ["- 0 0 0 0"]
line = 0
while line < min(len(field1), len(field2)):
res.append(field1[line] + TAB + field2[line])
line += 1
while line < len(field1):
res.append(field1[line])
line += 1
while line < len(field2):
res.append(TAB * (len(field1[0]) + 1) + field2[line])
line += 1
return res
def conns(x1, y1, x2, y2):
if x1 == x2 and y1 == y2:
return False, False, False, False
if x1 == x2 + 1:
return True, False, False, False
if x1 == x2 - 1:
return False, False, True, False
if y1 == y2 + 1:
return False, False, False, True
if y1 == y2 - 1:
return False, True, False, False
def randomise(k_els):
global els, saved_conns
els = [random.choice(use_els) for _ in range(k_els)]
saved_conns = [set()]
for n in range(len(els) - 1):
one = len(saved_conns) - 1
two = random.randint(0, len(saved_conns) - 1)
if two == len(saved_conns) - 1:
two += 1
saved_conns.append(set())
els[n] = (els[n], one, two)
saved_conns[one].add(n)
saved_conns[two].add(n)
els[-1] = (els[-1], 0, len(saved_conns) - 1)
saved_conns[0].add(len(els) - 1)
saved_conns[-1].add(len(els) - 1)
saved_conns[-1].add(len(els))
saved_conns[-2].add(len(els) + 1)
els += [("contact+", len(saved_conns) - 1, len(saved_conns) - 1),
("contact-", len(saved_conns) - 2, len(saved_conns) - 2)]
def change():
if random.random() > 0.5:
if len(saved_conns[0] - saved_conns[1] - {len(els) - 1, len(els) - 2}) > 0\
and len(saved_conns[1] - saved_conns[0] - {len(els) - 1, len(els) - 2}) > 0:
one = random.choice(list(saved_conns[0] - saved_conns[1] - {len(els) - 1, len(els) - 2}))
two = random.choice(list(saved_conns[1] - saved_conns[0] - {len(els) - 1, len(els) - 2}))
if els[one][1] == 0:
els[one] = (els[one][0], 1, els[one][2])
if els[one][2] == 0:
els[one] = (els[one][0], els[one][1], 1)
if els[two][1] == 1:
els[two] = (els[two][0], 0, els[two][2])
if els[two][2] == 1:
els[two] = (els[two][0], els[two][1], 0)
saved_conns[0].remove(one)
saved_conns[0].add(two)
saved_conns[1].remove(two)
saved_conns[1].add(one)
else:
new_el = random.choice(use_els)
num_change = random.randint(0, len(els) - 3)
while new_el == els[num_change][0]:
new_el = random.choice(use_els)
els[num_change] = (new_el, els[num_change][1], els[num_change][2])
return False
return True
def up(scheme, group, x, y, straight):
return (x - 1, y) not in used and go(scheme, group, x - 1, y, x, y, straight + 1)
def down(scheme, group, x, y, straight):
return (x + 1, y) not in used and go(scheme, group, x + 1, y, x, y, straight + 1)
def right(scheme, group, x, y, straight):
return (x, y + 1) not in used and go(scheme, group, x, y + 1, x, y, straight + 1)
def left(scheme, group, x, y, straight):
return (x, y - 1) not in used and go(scheme, group, x, y - 1, x, y, straight + 1)
def go(scheme, group, x, y, x_last, y_last, straight=0):
if straight > 7:
return False
if len(connections[group]) == 0: # end of part of scheme
return False
used.add((x, y))
c = conns(x, y, x_last, y_last)
if (x, y) not in scheme and (2 * x_last - x, 2 * y_last - y) not in scheme and (
len(connections[group] - placed.keys()) > 0
) and not (
x == x_last and y == y_last # place element
):
scheme[(x, y)] = ((connections[group] - placed.keys()).pop(), c[0], c[1], c[2], c[3])
connections[group].remove(scheme[(x, y)][0])
placed[scheme[(x, y)][0]] = (x, y)
used.remove((x, y))
return True
elif (x, y) not in scheme: # place wire
now = [-1, c[0], c[1], c[2], c[3]]
use_cell = False
dirs = [up, right, down, left]
num_dirs = {up: 1, right: 2, down: 3, left: 4}
for _ in range(4):
dir1 = random.choice(dirs)
if dir1(scheme, group, x, y, straight):
now[num_dirs[dir1]] = True
use_cell = True
dirs.remove(dir1)
used.remove((x, y))
if use_cell:
scheme[(x, y)] = tuple(now)
return True
else:
return False
elif scheme[(x, y)][0] >= 0 and scheme[(x, y)][0] in connections[group] and (
(scheme[(x, y)][1] or scheme[(x, y)][3]) == (c[0] or c[2]) # connect with placed earlier element
):
now = scheme[(x, y)]
connections[group].remove(now[0])
scheme[(x, y)] = (now[0], now[1] or c[0], now[2] or c[1], now[3] or c[2], now[4] or c[3])
used.remove((x, y))
return True
elif scheme[(x, y)][0] == -1: # place + on wire
now = scheme[(x, y)]
if ((c[0] or c[2]) and not (now[1] or now[3])) or ((c[1] or c[3]) and not (now[2] or now[4])):
if (2 * x - x_last, 2 * y - y_last) not in used and \
go(scheme, group, 2 * x - x_last, 2 * y - y_last, x, y, straight + 1):
scheme[(x, y)] = (-2, True, True, True, True)
used.remove((x, y))
return True
else:
used.remove((x, y))
return False
else:
used.remove((x, y))
return False
else: # if can't use this cell
used.remove((x, y))
return False
def generate(scheme):
global placed, used, connections
created = False
while not created:
err = True
scheme.clear()
for i in scheme:
del scheme[i]
used = set()
connections = []
for i in saved_conns:
connections.append(i.copy())
placed = dict()
START_C = random.randint(0, len(connections) - 1)
go(scheme, START_C, 0, 0, 0, 0)
placed_conns = {START_C}
for i in range(len(connections)):
start_el = -1
for el in placed:
if els[el][1] not in placed_conns or els[el][2] not in placed_conns:
start_el = el
break
if start_el == -1:
created = False
break
if els[start_el][1] not in placed_conns:
if scheme[placed[start_el]][1]: # if up
if not go(scheme, els[start_el][1], placed[start_el][0] + 1, placed[start_el][1],
placed[start_el][0] + 1, placed[start_el][1]):
err = False
continue
placed_conns.add(els[start_el][1])
elif scheme[placed[start_el]][2]: # if right
if not go(scheme, els[start_el][1], placed[start_el][0], placed[start_el][1] - 1,
placed[start_el][0], placed[start_el][1] - 1):
err = False
break
placed_conns.add(els[start_el][1])
elif scheme[placed[start_el]][3]: # if down
if not go(scheme, els[start_el][1], placed[start_el][0] + 1, placed[start_el][1],
placed[start_el][0] + 1, placed[start_el][1]):
err = False
break
placed_conns.add(els[start_el][1])
elif scheme[placed[start_el]][4]: # if left
if not go(scheme, els[start_el][1], placed[start_el][0], placed[start_el][1] + 1,
placed[start_el][0], placed[start_el][1] + 1):
err = False
break
placed_conns.add(els[start_el][1])
if len(connections[els[start_el][1]]) > 0:
err = False
break
elif els[start_el][2] not in placed_conns:
if scheme[placed[start_el]][1]: # if up
if not go(scheme, els[start_el][2], placed[start_el][0] + 1, placed[start_el][1],
placed[start_el][0] + 1, placed[start_el][1]):
err = False
break
placed_conns.add(els[start_el][2])
elif scheme[placed[start_el]][2]: # if right
if not go(scheme, els[start_el][2], placed[start_el][0], placed[start_el][1] - 1,
placed[start_el][0], placed[start_el][1] - 1):
err = False
break
placed_conns.add(els[start_el][2])
elif scheme[placed[start_el]][3]: # if down
if not go(scheme, els[start_el][2], placed[start_el][0] + 1, placed[start_el][1],
placed[start_el][0] + 1, placed[start_el][1]):
err = False
break
placed_conns.add(els[start_el][2])
elif scheme[placed[start_el]][4]: # if left
if not go(scheme, els[start_el][2], placed[start_el][0], placed[start_el][1] + 1,
placed[start_el][0], placed[start_el][1] + 1):
err = False
break
placed_conns.add(els[start_el][2])
if len(connections[els[start_el][2]]) > 0:
err = False
break
if err:
created = True
def draw_pare(k_els):
global scheme1, scheme2
randomise(k_els)
scheme1 = dict()
scheme2 = dict()
generate(scheme1)
if change():
answers.append("ДА")
else:
answers.append("НЕТ")
generate(scheme2)
return merge_schemes(scheme1, scheme2)
def dir_name(path):
res = ''
for c in path[::-1]:
if c == '/':
return res
res = c + res
return res
def choose_directory():
global directory, choosed, l_dir
directory = fd.askdirectory()
l_dir = Label(r, text=dir_name(directory), font=("Comic Sans", 15), bg="#ffdddd", fg="#aa00ff")
b_choose.destroy()
l_dir.grid(row=0, column=1, sticky=W, pady=10, padx=5)
choosed = True
r.update()
def is_num(s):
if len(s) == 0:
return False
for i in s:
if not ord('0') <= ord(i) <= ord('9'):
return False
return True
def run():
if not choosed:
mb.showerror("Ошибка", "Выберите папку")
return
if not (is_num(que.get()) and is_num(k_els.get())):
mb.showerror("Ошибка", "Должно быть введено число")
return
q = int(que.get())
k = int(k_els.get())
label1.destroy()
label2.destroy()
label3.destroy()
l_dir.destroy()
que.destroy()
b_work.destroy()
k_els.destroy()
l_process = Label(r, text="Генерация: 0 из " + str(q), font=("Comic Sans", 15, "bold"), bg="#ffdddd", fg="purple")
c = Canvas(r, width=1000, height=40, bg="#ddffdd")
l_process.grid(row=0, column=0, padx=10, pady=5, sticky=W)
c.grid(row=1, column=0, padx=10, pady=5)
r.update()
global answers
answers = []
for i in range(q):
im = draw(draw_pare(k), i + 1)
im.save(directory + '/' + str(i + 1) + ".png")
l_process['text'] = "Генерация: " + str(i + 1) + " из " + str(q)
c.create_rectangle(0, 0, int(1000 / q * (i + 1)), 40, fill="#0000dd")
r.update()
with open(directory + '/answers.txt', 'w') as ans:
print("Ответы к заданиям 'Электрические схемы'\n'ДА' - схемы эквивалентны,'НЕТ' - не эквивалентны", file=ans)
for i, a in enumerate(answers):
print(i + 1, a, file=ans)
r.destroy()
load_base()
r = Tk()
r.title("Генератор заданий")
r['bg'] = "#ffdddd"
label1 = Label(r, text="Папка для сгенерированных заданий:", font=("Comic Sans", 15, "bold"), bg="#ffdddd", fg="#ff00aa")
b_choose = Button(r, text="Выбрать", font=("Comic Sans", 15, "bold"), bg="#ffccff", fg="blue",
activeforeground="blue", activebackground="#ffd8ff", command=choose_directory)
label2 = Label(r, text="Количество заданий:", font=("Comic Sans", 15, "bold"), bg="#ffdddd", fg="#ff00aa")
que = Spinbox(r, width=10, from_=1, to=1000, font=("Comic Sans", 15))
label3 = Label(r, text="Количество элементов в схемах:", font=("Comic Sans", 15, "bold"), bg="#ffdddd", fg="#ff00aa")
k_els = Spinbox(r, width=10, from_=1, to=10, font=("Comic Sans", 15))
b_work = Button(r, text="Сгенерировать", font=("Comic Sans", 20, "bold"), bg="#ffffcc", fg="orange",
activeforeground="orange", activebackground="#ffffd8", command=run)
label1.grid(row=0, column=0, sticky=W, pady=10, padx=5)
b_choose.grid(row=0, column=1, sticky=W, pady=10, padx=5)
label2.grid(row=1, column=0, sticky=W, pady=10, padx=5)
que.grid(row=1, column=1, sticky=W, pady=10, padx=5)
label3.grid(row=2, column=0, sticky=W, pady=10, padx=5)
k_els.grid(row=2, column=1, sticky=W, pady=10, padx=5)
b_work.grid(row=3, column=0, columnspan=2, pady=10, padx=5)
choosed = False
r.mainloop()
|
990,285 | 9bf983d79432a80518d8bb6d2e9ab4d008a0280e | from django.conf.urls import patterns, url, include
from rest_framework.routers import SimpleRouter
from api import views as api_views
router = SimpleRouter(trailing_slash=False)
router.register('children', api_views.ChildViewSet)
router.register('parents', api_views.ParentViewSet)
router.register('sitters', api_views.SitterViewSet)
router.register('beacons', api_views.BeaconViewSet)
router.register('beacon-responses', api_views.SitterBeaconResponseViewSet)
urlpatterns = router.urls
urlpatterns += [
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))
] |
990,286 | 7fbcf8d11b07bd73edb4a0715d4ef7a706a07149 | # MIT License Copyright (c) 2019-2023 rootVIII
from boto3.session import Session
class S3:
def __init__(self):
self.session = Session()
class S3Client(S3):
def __init__(self):
S3.__init__(self)
self.session_client = self.session.client('s3')
def upload_s3(self, file_path: str, bucket: str, bucket_path: str):
with open(file_path, 'rb') as file_handle:
self.session_client.upload_fileobj(file_handle, bucket, bucket_path)
def download_s3(self, bucket: str, remote_file_path: str, local_file_name: str):
self.session_client.download_file(bucket, remote_file_path, local_file_name)
def list_avail_buckets(self) -> list:
return [bucket['Name'] for bucket in self.session_client.list_buckets()['Buckets']]
def delete_obj(self, bucket, bucket_path: str):
_ = [self.session_client.delete_object(Bucket=bucket, Key=obj['Key'])
for obj in self.session_client.list_objects_v2(Bucket=bucket,
Prefix=bucket_path)['Contents']]
class S3Resource(S3):
def __init__(self):
S3.__init__(self)
self.resource_client = self.session.resource('s3')
def list_bucket_contents(self, bucket: str) -> list:
bucket = self.resource_client.Bucket(bucket)
return [summary.key for summary in bucket.objects.filter()]
class S3Session(S3Client, S3Resource):
def __init__(self):
S3Client.__init__(self)
S3Resource.__init__(self)
|
990,287 | ba61892d0a61ad2f8ae2488370c6222d9f5f92e4 | import random
import pyowm
from pyowm import OWM
import vk_api
from vk_api.longpoll import VkLongPoll, VkEventType
def write_msg(user_id, message):
vk.method('messages.send', {'user_id': user_id, 'message': message, 'random_id': random.randint(0, 2048)})
# API-keys
owm = pyowm.OWM('OWM Token...', language='ru')
token = "VK group Token..."
# Authorization as a community
vk = vk_api.VkApi(token=token)
# Messaging
longpoll = VkLongPoll(vk)
# Main cycle
def main():
if __name__ == '__main__':
while True:
for event in longpoll.listen():
# If a new message arrives
if event.type == VkEventType.MESSAGE_NEW:
# If it has a label for me (i.e. bot)
if event.to_me:
# Message from user
request = event.text
# Error exception
try:
# Weather request
observation = owm.weather_at_place(event.text)
w = observation.get_weather()
temp = w.get_temperature('celsius')["temp"]
answer = 'В городе ' + event.text + ' сейчас ' + w.get_detailed_status() + '.' + '\n'
answer += 'Температура в районе ' + str(temp) + '.' + '\n\n'
if temp < 10:
answer += 'Сейчас холодно! Ты что, хочешь маму расстроить?'
elif temp < 20:
answer += 'Погода более менее, можешь одевать кросы!'
else:
answer += 'Наслаждаемся летом!'
# Response logic
if request == event.text:
write_msg(event.user_id, answer)
except Exception:
write_msg(event.user_id, 'Введите корректное название города.')
main()
|
990,288 | d66524893673d706b889cc177a0d3fa38d084a17 | import asyncio
from pyppeteer import launch
async def main():
# ブラウザ起動
browser = await launch(headless=False)
# タブを開く
page = await browser.newPage()
# URLのページを開く
asyncio.gather(
page.goto('https://www.yahoo.co.jp/'), # URLのページを開く
page.waitForNavigation() # ページの遷移を待つ
)
if __name__ == '__main__':
asyncio.get_event_loop().run_until_complete(main())
|
990,289 | 550ea2f76483cd4ec4f50b502ec76a4c2edb2264 | from invisibleroads.scripts import launch
CONFIGURATION_TEXT = '''\
[app:main]
use = egg:pyramid
data.folder = %(here)s/data
'''
class TestInitializePostsScript(object):
def test_run(self, mocker, tmp_path):
data_folder = tmp_path / 'data'
assert not data_folder.exists()
for function_name in [
'load_bootstrapped_settings',
'load_filled_settings',
]:
module_uri = 'invisibleroads_posts.routines.configuration'
mocker.patch(module_uri + '.' + function_name, return_value={
'data.folder': data_folder})
launch([
'invisibleroads',
'initialize',
'test.ini',
'--restart',
])
assert data_folder.exists()
|
990,290 | 59eaade0867ae71fc6e28e06044ad8f670d2da72 | import itertools
class Solution(object):
def countAndSay(self, n):
"""
:type n: int
:rtype: str
"""
res = "1"
for i in range(n-1):
res = ''.join([str(len(list(group))) + digit for digit, group in itertools.groupby(res)])
return res
foo =Solution()
n = 5
res = foo.countAndSay(n)
print(res)
for digit, group in itertools.groupby('aabbbbaccc'):
print(digit, ':', len(list(group)))
|
990,291 | 67b57d73998c3d554dbc033fd3644dc8bcc91980 | print('Nilai kecocokan menu: ', valueMenu) |
990,292 | a21987b8efd0bf6a2fe96c5a28baa18e59d1c01f | import unittest
from unittest import mock
from pythonforandroid.build import run_pymodules_install
class TestBuildBasic(unittest.TestCase):
def test_run_pymodules_install_optional_project_dir(self):
"""
Makes sure the `run_pymodules_install()` doesn't crash when the
`project_dir` optional parameter is None, refs #1898
"""
ctx = mock.Mock()
modules = []
project_dir = None
with mock.patch('pythonforandroid.build.info') as m_info:
assert run_pymodules_install(ctx, modules, project_dir) is None
assert m_info.call_args_list[-1] == mock.call(
'No Python modules and no setup.py to process, skipping')
def test_strip_if_with_debug_symbols(self):
ctx = mock.Mock()
ctx.python_recipe.major_minor_version_string = "python3.6"
ctx.get_site_packages_dir.return_value = "test-doesntexist"
ctx.build_dir = "nonexistant_directory"
ctx.archs = ["arm64"]
modules = ["mymodule"]
project_dir = None
with mock.patch('pythonforandroid.build.info'), \
mock.patch('sh.Command'),\
mock.patch('pythonforandroid.build.open'),\
mock.patch('pythonforandroid.build.shprint'),\
mock.patch('pythonforandroid.build.current_directory'),\
mock.patch('pythonforandroid.build.CythonRecipe') as m_CythonRecipe, \
mock.patch('pythonforandroid.build.project_has_setup_py') as m_project_has_setup_py, \
mock.patch('pythonforandroid.build.run_setuppy_install'):
m_project_has_setup_py.return_value = False
# Make sure it is NOT called when `with_debug_symbols` is true:
ctx.with_debug_symbols = True
assert run_pymodules_install(ctx, modules, project_dir) is None
assert m_CythonRecipe().strip_object_files.called is False
# Make sure strip object files IS called when
# `with_debug_symbols` is fasle:
ctx.with_debug_symbols = False
assert run_pymodules_install(ctx, modules, project_dir) is None
assert m_CythonRecipe().strip_object_files.called is True
|
990,293 | 8ec09e94feb40bb9cf051476a5b9704cd2f7cab3 | import unittest
from core.lexer import Position, Lexer, Token, TokenKind
from core.vartypes import VarTypes
class TestLexer(unittest.TestCase):
maxDiff = None
def _assert_toks(self, toks, kinds):
"""Assert that the list of toks has the given kinds."""
self.assertEqual([t.kind.name for t in toks], kinds)
def test_lexer_simple_tokens_and_values(self):
l = Lexer('a+1.')
toks = list(l.tokens())
pos = Position(l.buf, 1, 0)
self.assertEqual(toks[0], Token(TokenKind.IDENTIFIER, 'a', None, pos))
pos = Position(l.buf, 1, 1)
self.assertEqual(toks[1], Token(TokenKind.OPERATOR, '+', None, pos))
pos = Position(l.buf, 1, 2)
self.assertEqual(toks[2],
Token(TokenKind.NUMBER, '1.', VarTypes.f64, pos))
pos = Position(l.buf, 1, 3)
self.assertEqual(toks[3], Token(TokenKind.EOF, '', None, pos))
pos = Position(l.buf, 1, 0)
l = Lexer('0.1519')
toks = list(l.tokens())
self.assertEqual(toks[0],
Token(TokenKind.NUMBER, '0.1519', VarTypes.f64,
pos))
def test_token_kinds(self):
l = Lexer('10.1 def der extern foo var (')
self._assert_toks(
list(l.tokens()), [
'NUMBER', 'DEF', 'IDENTIFIER', 'EXTERN', 'IDENTIFIER', 'VAR',
'PUNCTUATOR', 'EOF'
])
l = Lexer('+- 1 2 22 22.4 a b2 C3d')
self._assert_toks(
list(l.tokens()), [
'OPERATOR', 'OPERATOR', 'NUMBER', 'NUMBER', 'NUMBER', 'NUMBER',
'IDENTIFIER', 'IDENTIFIER', 'IDENTIFIER', 'EOF'
])
def test_var_assignments(self):
l = Lexer('10. 10 10.0 1b 10B')
toks = list(l.tokens())
pos = Position(l.buf, 1, 0)
self.assertEqual(toks[0],
Token(TokenKind.NUMBER, '10.', VarTypes.f64, pos))
pos = Position(l.buf, 1, 4)
self.assertEqual(toks[1],
Token(TokenKind.NUMBER, '10', VarTypes.i32, pos))
pos = Position(l.buf, 1, 7)
self.assertEqual(toks[2],
Token(TokenKind.NUMBER, '10.0', VarTypes.f64, pos))
pos = Position(l.buf, 1, 12)
self.assertEqual(toks[3],
Token(TokenKind.NUMBER, '1', VarTypes.bool, pos))
pos = Position(l.buf, 1, 15)
self.assertEqual(toks[4],
Token(TokenKind.NUMBER, '10', VarTypes.i8, pos))
def test_string_assignment(self):
l = Lexer('"Hello world"')
toks = list(l.tokens())
pos = Position(1, 1)
self.assertEqual(toks[0],
Token(TokenKind.STRING, 'Hello world', VarTypes.str,
pos))
def test_skip_whitespace_comments(self):
l = Lexer('''
def foo # this is a comment
# another comment
\t\t\t10
''')
self._assert_toks(
list(l.tokens()), ['DEF', 'IDENTIFIER', 'NUMBER', 'EOF']) |
990,294 | 9ac76abdd8c9686e59ee13f1ed8d49ad7226aefc | def MTC(time):
times = time.split(":")
hours = int(times[0])
minutes = int(times[1])
total = hours*60
total = total + minutes
return total
def toMTC(time):
minutes = time%60
hours = (time-minutes)/60
hours = int(hours)
hours = str(hours)
minutes = int(minutes)
minutes = str(minutes)
if len(minutes) < 2:
minutes = "0"+minutes
list = [hours, minutes]
string = ":"
result = string.join(list)
return result
def calendarAlgorithm(list1, list2, meetingLength):
list1Avaliable = []
list2Avaliable = []
totalAvaliable = []
finalList = []
result = []
for i in range(0, len(list1)):
if i == len(list1)-1:
timeSlotList = list1[i].split('-')
boundEnd = list1[0].split('-')
if (MTC(boundEnd[1])-MTC(timeSlotList[1])) >= meetingLength:
avaliable = []
avaliable.append(timeSlotList[1])
avaliable.append(boundEnd[1])
str='-'
avaliable = str.join(avaliable)
list1Avaliable.append(avaliable)
if i == 0:
timeSlotList = list1[0].split('-')
timeSlotList2 = list1[1].split('-')
if (MTC(timeSlotList2[0])-MTC(timeSlotList[0]) >= meetingLength):
avaliable = []
avaliable.append(timeSlotList[0])
avaliable.append(timeSlotList2[0])
str = '-'
avaliable = str.join(avaliable)
list1Avaliable.append(avaliable)
elif i != 0 and i != len(list1)-1:
timeSlotList = list1[i].split('-')
timeSlotList2 = list1[i + 1].split('-')
if (MTC(timeSlotList2[0]) - MTC(timeSlotList[1])) >= meetingLength:
avaliable = []
avaliable.append(timeSlotList[1])
avaliable.append(timeSlotList2[0])
str = '-'
avaliable = str.join(avaliable)
list1Avaliable.append(avaliable)
for i in range(0, len(list2)):
if i == len(list2)-1:
timeSlotList = list2[i].split('-')
boundEnd = list2[0].split('-')
if (MTC(boundEnd[1])-MTC(timeSlotList[1])) >= meetingLength:
avaliable = []
avaliable.append(timeSlotList[1])
avaliable.append(boundEnd[1])
str='-'
avaliable = str.join(avaliable)
list2Avaliable.append(avaliable)
if i == 0:
timeSlotList = list2[0].split('-')
timeSlotList2 = list2[1].split('-')
if (MTC(timeSlotList2[0])-MTC(timeSlotList[0]) >= meetingLength):
avaliable = []
avaliable.append(timeSlotList[0])
avaliable.append(timeSlotList2[0])
str = '-'
avaliable = str.join(avaliable)
list2Avaliable.append(avaliable)
elif i != 0 and i != len(list2)-1:
timeSlotList = list2[i].split('-')
timeSlotList2 = list2[i + 1].split('-')
if (MTC(timeSlotList2[0]) - MTC(timeSlotList[1])) >= meetingLength:
avaliable = []
avaliable.append(timeSlotList[1])
avaliable.append(timeSlotList2[0])
str = '-'
avaliable = str.join(avaliable)
list2Avaliable.append(avaliable)
for timeSlot in list1Avaliable:
timeSlot = timeSlot.split("-")
startTime= MTC(timeSlot[0])
endTime= MTC(timeSlot[1])
timeSlot[0] = startTime
timeSlot[1] = endTime
totalAvaliable.append(timeSlot)
for timeSlot in list2Avaliable:
timeSlot = timeSlot.split("-")
startTime = MTC(timeSlot[0])
endTime = MTC(timeSlot[1])
timeSlot[0] = startTime
timeSlot[1] = endTime
totalAvaliable.append(timeSlot)
totalAvaliable = sorted(totalAvaliable)
for i in range(0, len(totalAvaliable)):
if i == len(totalAvaliable)-1:
timeSlot = totalAvaliable[i]
earlySlot = totalAvaliable[i-1]
if (earlySlot[1]-timeSlot[0]) >= meetingLength:
temp = [timeSlot[0], earlySlot[1]]
finalList.append(temp)
else:
timeSlot = totalAvaliable[i]
lateSlot = totalAvaliable[i+1]
if (timeSlot[1]-lateSlot[0]) >= meetingLength:
temp = [lateSlot[0], timeSlot[1]]
finalList.append(temp)
list1Entry = list1[0]
list1Entry = list1Entry.split("-")
list2Entry = list2[0]
list2Entry = list2Entry.split("-")
bound1 = MTC(list1Entry[1])
bound2 = MTC(list2Entry[1])
for timeSlot in finalList:
if timeSlot[1] > bound1:
timeSlot[1] = bound1
if timeSlot[1] > bound2:
timeSlot[1] = bound2
for timeSlot in finalList:
startTime = toMTC(timeSlot[0])
endTime = toMTC(timeSlot[1])
temp = [startTime, endTime]
str = "-"
time = str.join(temp)
result.append(time)
for i in range(0, len(result)-1):
if result[i] == result [i+1]:
result.pop(i+1)
if len(result) >= 1:
print(result)
else:
print("There are no compatible times.")
calendarAlgorithm(["8:00-18:00", "9:00-10:00", "10:00-10:10", "11:00-13:00"], ["7:00-16:00", "9:00-10:00", "11:00-13:00", "14:00-14:30"], 30) |
990,295 | e58b4889e02a9e625176b3a55c49923b9e6bfb33 | puzzle = open('puzzle').read().splitlines()
puzzle = [i.split() for i in puzzle]
def RepresentsInt(s):
try:
int(s)
return True
except ValueError:
return False
registers1 = {str(i[1]): 0 for i in puzzle if not RepresentsInt(i[1])}
registers1['p'] = 0
registers1['que'] = []
idx1 = 0
registers2 = {str(i[1]): 0 for i in puzzle if not RepresentsInt(i[1])}
registers2['p'] = 1
registers2['que'] = []
idx2 = 0
sent = 0
while True:
while idx1 < len(puzzle):
i = puzzle[idx1]
if i[0] == 'snd':
if RepresentsInt(i[1]):
registers2['que'].append(int(i[1]))
else:
registers2['que'].append(registers1[i[1]])
elif i[0] == 'set':
if RepresentsInt(i[2]):
registers1[i[1]] = int(i[2])
else:
registers1[i[1]] = registers1[i[2]]
elif i[0] == 'add':
if RepresentsInt(i[2]):
registers1[i[1]] += int(i[2])
else:
registers1[i[1]] += registers1[i[2]]
elif i[0] == 'mul':
if RepresentsInt(i[2]):
registers1[i[1]] *= int(i[2])
else:
registers1[i[1]] *= registers1[i[2]]
elif i[0] == 'mod':
if RepresentsInt(i[2]):
registers1[i[1]] %= int(i[2])
else:
registers1[i[1]] %= registers1[i[2]]
elif i[0] == 'rcv':
if len(registers1['que']) > 0:
registers1[i[1]] = registers1['que'].pop(0)
else:
break
idx1 -= 1
elif i[0] == 'jgz':
if RepresentsInt(i[1]):
if int(i[1]) > 0:
if RepresentsInt(i[2]):
idx1 += int(i[2]) - 1
else:
idx1 += registers1[i[2]] - 1
elif registers1[i[1]] > 0:
if RepresentsInt(i[2]):
idx1 += int(i[2]) - 1
else:
idx1 += registers1[i[2]] - 1
idx1 += 1
else:
break
if len(registers2['que']) == 0:
break
while idx2 < len(puzzle):
i = puzzle[idx2]
if i[0] == 'snd':
sent += 1
if RepresentsInt(i[1]):
registers1['que'].append(int(i[1]))
else:
registers1['que'].append(registers2[i[1]])
elif i[0] == 'set':
if RepresentsInt(i[2]):
registers2[i[1]] = int(i[2])
else:
registers2[i[1]] = registers2[i[2]]
elif i[0] == 'add':
if RepresentsInt(i[2]):
registers2[i[1]] += int(i[2])
else:
registers2[i[1]] += registers2[i[2]]
elif i[0] == 'mul':
if RepresentsInt(i[2]):
registers2[i[1]] *= int(i[2])
else:
registers2[i[1]] *= registers2[i[2]]
elif i[0] == 'mod':
if RepresentsInt(i[2]):
registers2[i[1]] %= int(i[2])
else:
registers2[i[1]] %= registers2[i[2]]
elif i[0] == 'rcv':
if len(registers2['que']) > 0:
registers2[i[1]] = registers2['que'].pop(0)
else:
break
idx2 -= 1
elif i[0] == 'jgz':
if RepresentsInt(i[1]):
if int(i[1]) > 0:
if RepresentsInt(i[2]):
idx2 += int(i[2]) - 1
else:
idx2 += registers2[i[2]] - 1
elif registers2[i[1]] > 0:
if RepresentsInt(i[2]):
idx2 += int(i[2]) - 1
else:
idx2 += registers2[i[2]] - 1
idx2 += 1
else:
break
if len(registers1['que']) == 0:
break
print sent |
990,296 | 111606ca41fa45c3d2a2db502056748b32da6bd4 | from app.crawlers.intelius.intelius_results import IntelliusResults
from app.web_automation.elements.div import Div
from app.web_automation.page_objects.page import Page, which_page_loads
class IntelliusSearching(Page):
"""
Interface to the intelius.com page displayed when a search is in progress.
"""
_url = "https://www.intelius.com/search/"
_dont_know_response = Div(attribute="class", identifier="confirm-not-sure")
_critical_elements = [_dont_know_response]
def ignore_additional_prompts(self):
"""
Click "I Don't Know" as many times as necessary to dismiss prompts for additional info.
:return:
"""
self._dont_know_response.click()
return which_page_loads(IntelliusSearching, IntelliusResults) |
990,297 | bab83cbca52cf3e67841d1096d466896308007ee | #!../../software/pyhail.sh
import hail
from hail.representation import Interval
hc = hail.HailContext(log = 'log/08_test_rs6857.log', tmp_dir = 'tmp/hail')
vds = hc.read('../MGRB.phase2.SNPtier12.match.vqsr.minrep.locusannot.WGStier12.unrelated.tgp.hrc.gnomad.dbsnp.clinvar.cato.eigen.vep.vds')
(vds
.filter_intervals(Interval.parse('19:45392250-45392260'), keep=True)
.export_variants('../MGRB.phase2.SNPtier12.match.vqsr.minrep.locusannot.WGStier12.unrelated.tgp.hrc.gnomad.dbsnp.clinvar.cato.eigen.vep.rs6857.tsv', 'variant=v, va.*')
)
# .export_genotypes('../MGRB.phase2.SNPtier12.match.vqsr.minrep.locusannot.WGStier12.unrelated.tgp.hrc.gnomad.dbsnp.clinvar.cato.eigen.vep.rs6857.tsv', 'sample=s, variant=v, gt=""+g.gtj()+"/"+g.gtk(), dp=g.dp, dpj=g.ad[g.gtj()], dpk=g.ad[g.gtk()], va.*')
|
990,298 | 2a35835f50dae2e71b97eb6604726cfdb1f78bff | # Generated by Django 3.2.7 on 2021-09-02 02:20
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="Activity",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(default="", max_length=100)),
(
"description",
models.CharField(blank=True, default="", max_length=255, null=True),
),
("start", models.DateTimeField()),
("end", models.DateTimeField()),
(
"location",
models.CharField(
choices=[
("activity room", "activity room"),
("skilled nursing", "skilled nursing"),
("assisted living", "assisted living"),
("memory care", "memory care"),
("dining room", "dining room"),
("outing", "outing"),
],
default="dining room",
max_length=100,
),
),
(
"options",
models.CharField(
choices=[
("virtual", "virtual"),
("in person", "in person"),
("both", "both"),
],
default="both",
max_length=100,
),
),
],
),
migrations.CreateModel(
name="Resident",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"status",
models.CharField(
choices=[
("going", "going"),
("not going", "not going"),
("maybe", "maybe"),
],
default="not going",
max_length=100,
),
),
(
"activity",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="activity",
to="activities.activity",
),
),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="user",
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.AddField(
model_name="activity",
name="residents",
field=models.ManyToManyField(
blank=True,
null=True,
related_name="activities",
to="activities.Resident",
),
),
]
|
990,299 | 0064cb720568c05c4daeeee804da74aaa4a761b2 | from account.models import User
from knox.models import AuthToken
from rest_framework import generics, permissions
from rest_framework.response import Response
from .serializers import SponsorSerializer, RegisterSponsorSerializer, LoginSponsorSerializer
class RegisterSponsor(generics.GenericAPIView):
serializer_class = RegisterSponsorSerializer
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data = request.data)
serializer.is_valid(raise_exception=True)
user = serializer.save()
return Response({
"user": SponsorSerializer(user, context = self.get_serializer_context()).data,
"token": AuthToken.objects.create(user)[1]
})
class LoginSponsor(generics.GenericAPIView):
serializer_class = LoginSponsorSerializer
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data = request.data)
serializer.is_valid(raise_exception=True)
user = serializer.validated_data
return Response({
"user": SponsorSerializer(user, context = self.get_serializer_context()).data,
"token": AuthToken.objects.create(user)[1]
}) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.