index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
998,900 | 5f5f46757c83a3e6fdc1ea6f3333707fe0d1fee5 | import sys
import getcred as gc
def createSheet(spreadsheet_id, sheetName):
batch_update_spreadsheet_request_body = {
'requests': [
{
'addSheet': {
'properties': {
'title': sheetName
}
}
}
],
}
request = gc.service.spreadsheets().batchUpdate(spreadsheetId=spreadsheet_id, body=batch_update_spreadsheet_request_body)
response = request.execute()
|
998,901 | 8538ae51085c6debdd1fc582ccb4d57679670b23 |
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
import time
import os
import wget
PATH = 'D:\Code\python\crawler\chromedriver.exe'
driver = webdriver.Chrome(PATH)
driver.get('https://www.instagram.com/')
usermane = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.NAME, "username"))
)
password = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.NAME, "password"))
)
login = driver.find_element_by_xpath('//*[@id="loginForm"]/div/div[3]/button/div')
usermane.clear()
password.clear()
usermane.send_keys('') #帳號
password.send_keys('') #密碼
login.click()
wait = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.XPATH, '/html/body/div[5]/div/div/div/div[3]/button[2]'))
)
wait.click()
search = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.XPATH, '//*[@id="react-root"]/section/nav/div[2]/div/div/div[2]/input'))
)
keyword = "#leggings"
search.send_keys(keyword)
time.sleep(1)
search.send_keys(Keys.RETURN)
time.sleep(1)
search.send_keys(Keys.RETURN)
time.sleep(1)
WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.XPATH,'//*[@id="react-root"]/section/main/header/div[2]/div/div[1]/h1'))
)
for i in range(5):
driver.execute_script("window.scrollTo(0, document.body.scrollHeight)")
time.sleep(5)
imgs = driver.find_elements_by_class_name("FFVAD")
path = os.path.join(keyword) #設定一個路徑
os.mkdir(path) #在這個路徑建立資料夾
count = 0
for img in imgs:
save_as = os.path.join(path, keyword + str(count) + '.jpg')
#print(img.get_attribute("src"))
wget.download(img.get_attribute("src"), save_as)
count +=1
##########################
|
998,902 | c5c3b0c1709108ab55da4e6ec65b15ee80e17909 | from django.conf import settings
DEFAULT_CONFIG = getattr(settings, 'TINYMCE_DEFAULT_CONFIG', {
'convert_urls': False,
'height': '350',
'theme': 'advanced',
'plugins': 'advimage,advlink,fullscreen,media,safari,table,paste',
'theme_advanced_toolbar_location': 'top',
'theme_advanced_buttons1': 'fullscreen,|,bold,italic,underline,'
'strikethrough,|,justifyleft,justifycenter,'
'justifyright,justifyfull,|,'
'forecolor,backcolor,|,'
'formatselect,fontselect,fontsizeselect',
'theme_advanced_buttons2': 'cut,copy,paste,pastetext,pasteword,|,'
'bullist,numlist,|,outdent,indent,'
'blockquote,|,sub,sup,|,undo,redo,|,'
'link,unlink,anchor,image,cleanup,help,code',
'theme_advanced_buttons3': 'tablecontrols,|,hr,removeformat,visualaid,'
'|,charmap,media',
'theme_advanced_toolbar_align': 'center',
'theme_advanced_statusbar_location': 'bottom',
'theme_advanced_resizing': 'true',
})
DEFAULT_CONFIG.update(getattr(settings, 'TINYMCE_CONFIG', {}))
JS_URL = getattr(settings, 'TINYMCE_JS_URL',
'%stinymce/js/tiny_mce.js' % settings.STATIC_URL)
|
998,903 | 1e36a9b1e6fb05d7661631a6ca3a4262beef2706 | # coding=utf-8
COUNT=0
def perm(n,begin,end):
global COUNT
if begin>=end:
for i in n:
print(i,end=" ")
print()
COUNT +=1
else:
i=begin
for num in range(begin,end):
n[num],n[i]=n[i],n[num]
perm(n,begin+1,end)
n[num],n[i]=n[i],n[num]
while True:
try:
m=eval(input())
if m == 0:
break
n=list(range(1,m))
n.append(m)
perm(n,0,len(n))
except:
break
pass |
998,904 | 2a9f578670f812bff5fd552995e85a22dd3e06cb | from operator import attrgetter
class Users:
def __init__(self,x,y):
self.name = x
self.user_id = y
def __repr__(self):
return self.name + " : "+str(self.user_id)
users = [
Users('Chris',67),
Users('Ankita',34),
Users('Phil',5),
Users('Christina',90),
Users('Mo',56)
]
for user in users:
print(user)
print("-------")
for user in sorted(users,key=attrgetter('name')):
print(user)
print("-------")
for user in sorted(users, key=attrgetter('user_id')):
print(user) |
998,905 | 69f4b7cd8d888d0f5bfc8817c4f7905c36a88b2d | ###import statements
import nltk
from collections import defaultdict
import operator
from collections import OrderedDict
from operator import itemgetter
from nltk.stem.lancaster import LancasterStemmer
from collections import Counter
import pandas as pd
###function_words:
##input: string with all words
##output: list with dictionary of all function words and their respective number of occurences, total number of function words and percentual relation of function words to the total number of words
def function_words(reader):
numwords = 0
allwords = []
for sent in reader:
for word in sent:
word = word.split('/')
#stem = lancaster_stemmer.stem(word[0])
#allstems.append(stem)
numwords = numwords + 1
allwords.append(word[0])
#initialize result list
resultlist = []
#initialize dictionary for the function words and their respective occurences
mydict = {}
#define a variable for the number of function words
numfwords = 0
#define a variable for the percentual relation between the total number of words and the number of function words in the text
percent_fwords = 0
#open the list of function words
with open('functionwords.txt', 'r', encoding='utf-8') as fwords:
#read in the list of function words and split it (the function words are seperated through commas)
fwords1 = fwords.read()
fwordslist = fwords1.split(',')
#iterate the list of function words
for item in fwordslist:
#iterate the list of strings
for item2 in allwords:
#compare the respective items to each other
if item == item2:
numfwords = numfwords + 1
if numwords > 0:
percent_fwords = (numfwords*100)/numwords
#return resultlist
return percent_fwords
###adjectives
###input: pos_tagged_list
##output: list with dictionary of all function words and their respective number of occurences, total number of function words and percentual relation of function words to the total number of words
def adjectives(reader):
#initialize list
postaggedlist1 = []
endlist = ''
allwords = []
sentence = ''
for sent in reader:
for word in sent:
word = word.split('/')
word = tuple(word)
if len(word) == 2:
allwords.append(word)
sentence = sentence + ' ' + word[0]
numwords = len(allwords)
#initialize lists for every adjective and adverb form
normal = []
comparative = []
superlative = []
#initialize dictionaries for all adjectives and adverbs
alladjs = []
#initialize Boolean variables
has_normal = False
has_comp = False
has_sup = False
#define variables for length values
occurences = 0
numnormal = 0
numcomp = 0
numsup = 0
#define variables for percentage values
percent_normal = 0
percent_comp = 0
percent_sup = 0
##
for (word, tag) in allwords:
# print(words)
# print(tag)
#continue to use adjectives/adverbs with normal form
if tag == 'JJ' or tag == 'RB':
has_normal = True
numnormal = 1 + numnormal
# numnormal = numnormal + 1
## if words in normaladjs:
## normaladjs[words] = normaladjs.get(words) + 1
## else:
## normaladjs[words] = 1
## normaldict = sorted(normaladjs.items(), key = itemgetter(1), reverse = True)
## numnormal = numnormal + 1
#continue to use adjectives/adverbs with comparative form
if tag == 'JJR' or tag == 'RBR':
numcomp = numcomp + 1
has_comp = True
#continue to use adjectives/adverbs with superlative form
if tag == 'JJS' or tag == 'RBS':
numsup = numsup + 1
has_sup = True
#define the dictionaries if there is no entry in the list with the respective form
if not has_normal or numwords < 1:
percent_normal = 0
else:
percent_normal = (numnormal*100)/numwords
#alladjs.append("percentage of normal adjectives: " + str(percent_normal))
alladjs.append(percent_normal)
if not has_comp or numwords < 1:
percent_comp = 0
else:
percent_comp = (numcomp*100)/numwords
#alladjs.append("percentage of comperative adjectives: " + str(percent_comp))
alladjs.append(percent_comp)
if not has_sup or numwords < 1:
percent_sup = 0
else:
percent_sup = (numsup*100)/numwords
#alladjs.append("percentage of superlative adjectives: " + str(percent_sup))
alladjs.append(percent_sup)
#define the total number of adjectives and adverbs
numtotal = numnormal + numcomp + numsup
#avoid 'Division by Zero' issue if no adjectives or adverbs are found
if numtotal == 0:
alladjs.append(0)
#define the percentual relation of all adjectives and adverbs to all words and append it to the result list
else:
alladjs.append((numtotal*100)/numwords)
#return result list which contains a result list for each form
return alladjs
###det_n:
##input: pos-tagged and tokenized list of input texts
##output: result list with dictionary {Det N bigram:number of occurences}, number of Det N bigrams in general and percentual relation of number of Det N bigrams to number of words in total
def det_n(reader):
allwords = []
#initialize list of results
detnlist = []
#initialize dictionary
mydict = {}
sorteddict = {}
#define boolean variable 'is_DT' which gets the value 'True' if a determiner is followed by a noun and the bigram is a Det N
is_DT = False
sentence = ''
for sent in reader:
for word in sent:
word = word.split('/')
word = tuple(word)
if len(word) == 2:
allwords.append(word)
sentence = sentence + ' ' + word[0]
numwords = len(allwords)
#define 'numdetn' for the total number of Det N bigrams in the text
numdetn = 0
#-->#remove later --> build class variable to save the total number of words/use length of list
has_DT_N = False
#define variable for the percentual relation from Det N bigrams to number of words in general
percent_Det_N = 0
#iterate the pos-tagged list of words
for word, tag in allwords:
#if the last item of the list was a determiner
if is_DT:
#if the current item is a noun
if 'NN' in tag:
#verify that the text contains Det N bigrams
has_DT_N = True
#continue using the current item
numdetn = numdetn + 1
is_DT = False
#use word if it is a determiner
if tag == 'DT':
#set the boolean = True to send the next iteration into the first if-statement:
is_DT = True
#define dictionary if no Det N bigrams are in the text
if has_DT_N == False:
percent_Det_N = 0
elif numwords > 0:
percent_Det_N = (numdetn*100)/numwords
else:
percent_Det_N = 0
#append results to resultlist
#detnlist.append('percent_Det_N:' + str(percent_Det_N))
detnlist.append(percent_Det_N)
#return resultlist
return detnlist
###chunk
##input: POS-tagged list
##output: number of named entities (geographical positions, persons, locations, organizations)
def chunk_named_entities(reader):
tagged_sents = list()
for sent in reader:
sentence = list()
for word in sent:
word = word.split('/')
word_pos = tuple(word)
if len(word_pos[0]) > 1:
sentence.append(word_pos)
tagged_sents.append(sentence)
num_words = sum([len([word for word in sent]) for sent in tagged_sents])
ne_dict = defaultdict(list)
num_per, num_orga, num_loc, num_gp, num_total = 0, 0, 0, 0, 0
for sent in tagged_sents:
ne_sent = nltk.ne_chunk(sent, binary=False)
for subtree in ne_sent.subtrees(filter=lambda t: t.label() == 'PERSON'):
num_per += 1
num_total += 1
for subtree in ne_sent.subtrees(filter=lambda t: t.label() == 'ORGANIZATION'):
num_orga += 1
num_total += 1
for subtree in ne_sent.subtrees(filter=lambda t: t.label() == 'LOCATION'):
num_loc += 1
num_total += 1
for subtree in ne_sent.subtrees(filter=lambda t: t.label() == 'GPE'):
num_gp += 1
num_total += 1
ne_dict[num_words].append(num_per)
ne_dict[num_words].append(num_orga)
ne_dict[num_words].append(num_loc)
ne_dict[num_words].append(num_gp)
ne_dict[num_words].append(num_total)
ne_dist = list()
for key, value in ne_dict.items():
if value[0] == 0: # num_per
ne_dist.append(0)
else:
ne_dist.append(value[0]/key)
if value[1] == 0: # num_orga
ne_dist.append(0)
else:
ne_dist.append(value[1]/key)
if value[2] == 0: # num_loc
ne_dist.append(0)
else:
ne_dist.append(value[2]/key)
if value[3] == 0: # num_gp
ne_dist.append(0)
else:
ne_dist.append(value[3]/key)
if value[4] == 0: # num_total
ne_dist.append(0)
else:
ne_dist.append(value[4]/key)
return ne_dist
reader = open("data_pre_processed.txt", 'r', encoding = 'utf-8')
labels = list()
clean_data = list()
for line in reader.readlines():
line_split = line.split("\t")
labels.append(line_split[0])
sentence = list()
for sent in line_split[1:]:
sent_split = sent.split(", ")
temp = list()
for tagged_word in sent_split:
if tagged_word.startswith("['"):
temp.append(tagged_word[2:-1])
elif tagged_word.endswith("']"):
temp.append(tagged_word[1:-2])
else:
temp.append(tagged_word[1:-1])
if len(temp) > 1:
sentence.append(temp)
clean_data.append(sentence)
header = list()
func_words = list()
header.append("funct_words")
det_noun = list()
header.append("det_n")
norm = list()
header.append("norms")
comp = list()
header.append("comps")
superl = list()
header.append("supers")
alladj = list()
header.append("all_adj")
ne_geo_pos = list()
header.append("ne_geo_pos")
ne_pers = list()
header.append("ne_persons")
ne_loc = list()
header.append("ne_locations")
ne_org = list()
header.append("ne_organizations")
ne_total = list()
header.append("ne_total")
for entry in clean_data:
func_words.append(function_words(entry))
det_noun.append(det_n(entry))
adj = adjectives(entry)
norm.append(adj[0])
comp.append(adj[1])
superl.append(adj[2])
alladj.append(adj[3])
ne_chunks = chunk_named_entities(entry)
ne_pers.append(ne_chunks[0])
ne_org.append(ne_chunks[1])
ne_loc.append(ne_chunks[2])
ne_geo_pos.append(ne_chunks[3])
ne_total.append(ne_chunks[4])
# store vectorized data in 'test_data_sent_vector.csv' file, including labels at first position
#data_matrix = pd.concat([pd.DataFrame(labels), pd.DataFrame(func_words), pd.DataFrame(det_noun), pd.DataFrame(norm), pd.DataFrame(comp), pd.DataFrame(superl), pd.DataFrame(alladj)], axis=1)
data_matrix = pd.concat([pd.DataFrame(func_words), pd.DataFrame(det_noun), pd.DataFrame(norm), pd.DataFrame(comp), pd.DataFrame(superl), pd.DataFrame(alladj), pd.DataFrame(ne_pers), pd.DataFrame(ne_org), pd.DataFrame(ne_loc), pd.DataFrame(ne_geo_pos), pd.DataFrame(ne_total)], axis=1)
data_matrix.to_csv('_data_vectors/sent02_vector.csv', index=False, delimiter=',', header=header)
#data_matrix.to_csv('_data_vectors_wider/sent02_vector.csv', index=False, delimiter=',', header=header)
|
998,906 | 85aec9fd783f7f62c5fcf820c984e9cac3f691af | '''
Quick Note:
This search is by no means perfect and thorough. I have applied
4 cv strategies and theoretically they should be having their
own optimal parameters. However, that process is way too slow.
Here is a quick computation. For this problem we need ~4 minutes
to grow a tree. For the following code, a search with n_iter= 50
will cost me roughly 4 hours. On the other hand, to find the best
parameters using cross validation, each iteration takes 5*5 minutes,
and I need to repeat 4 times, which results in 4*20 = 80+ hours. A
huge commitment with imaginable minimal gain.
'''
import gc
import sys
import numpy as np
import pandas as pd
from datetime import datetime
from sklearn.model_selection import train_test_split
sys.path.append("../")
from general.preprocess import data_preparation
from general.ClfWrappers import LgbWrapper
from features.f0 import features_set_f0
from bayes_opt import BayesianOptimization
def lgb_evaluate(
min_child_sample, num_leaves, max_bin,
min_child_weight, subsample, subsample_freq,
colsample_bytree, reg_alpha, reg_lambda,
feature_fraction, bagging_fraction
):
target = 'visitors'
features = features_set_f0()
split = 0.33
seed = 177
full_data, ntrain, ntest = data_preparation()
trn = full_data[:ntrain]
x_train, x_valid, y_train, y_valid = train_test_split(
trn[features].values, trn[target].values,
test_size=split, random_state=seed
)
del full_data, trn
gc.collect()
lgb_params = dict()
lgb_params['objective'] = 'regression_l2'
lgb_params['metric'] = 'l2_root'
lgb_params['learning_rate'] = 0.1
lgb_params['random_state'] = seed
lgb_params['silent'] = True # does help
lgb_params['verbose_eval'] = False
lgb_params['n_estimators'] = 500
lgb_params['min_child_samples'] = int(np.round(min_child_sample))
lgb_params['num_leaves'] = int(np.round(num_leaves))
lgb_params['max_bin'] = int(np.round(max_bin))
lgb_params['subsample_freq'] = int(np.round(subsample_freq))
lgb_params['colsample_bytree'] = colsample_bytree
lgb_params['reg_alpha'] = reg_alpha
lgb_params['reg_lambda'] = reg_lambda
lgb_params['min_child_weight'] = min_child_weight
lgb_params['subsample'] = subsample
lgb_params['feature_fraction'] = feature_fraction
lgb_params['bagging_freq'] = int(np.round(bagging_fraction))
lgb_clf = LgbWrapper(params=lgb_params)
lgb_clf.train(x_train, y_train, x_valid, y_valid)
return lgb_clf.best_score
if __name__ == "__main__":
print("Tuning process initiating...")
gp_params = {"alpha": 1e-5}
str_time = datetime.now().replace(microsecond=0)
search_space = {
'min_child_sample': (20, 200),
'num_leaves': (16, 32),
'max_bin': (100, 255),
'min_child_weight': (1e-3, 5e-2),
'subsample': (0.8, 1),
'subsample_freq': (1, 5),
'colsample_bytree': (0.7, 1),
'reg_alpha': (0, 5),
'reg_lambda': (0, 5),
'feature_fraction': (0, 1),
'bagging_freq': (0, 10)
}
lgb_BO = BayesianOptimization(
lgb_evaluate,
pbounds=search_space,
)
lgb_BO.maximize(init_points=10, n_iter=80, ) # **gp_params
lgb_BO_scores = pd.DataFrame(lgb_BO.res['all']['params'])
lgb_BO_scores['score'] = pd.DataFrame(lgb_BO.res['all']['values'])
lgb_BO_scores = lgb_BO_scores.sort_values(by='score', ascending=True)
lgb_BO_scores.to_csv('01_lgb_tuning.csv')
end_time = datetime.now().replace(microsecond=0)
print("Time used", (end_time-str_time))
|
998,907 | 5ecf52b0abe86aa84c58c6c0d16f863de1c789ab | import keras
import os
# two classes
num_classes = 2
# Y_train.shape = (num, 1) -> y_train.shape = (num, 2)
y_train = keras.utils.to_categorical(Y_train, num_classes)
y_test = keras.utils.to_categorical(Y_test, num_classes)
def myKerasModel():
# input_shape should be the shape without example dim.
# e.g. X_train.shape = (10000, 64, 64, 3)
# input_shape should be (64, 64, 3)
# then Input(input_shape) would be (None, 64, 64, 3)
input_shape = X_train.shape[1:]
print("input_shape = " + str(input_shape))
model = keras.models.Sequential()
model.add(Conv2D(32, (3,3), padding = 'same', input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(64, (3,3), padding = 'same'))
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(32))
model.add(Activation('relu'))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
op = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)
model.compile(optimizer=op, loss='categorical_crossentropy', metrics = ['accuracy'])
model.summary()
model.fit(x=X_train, y=y_train, batch_size=32, verbose=2, epochs=10)
preds = model.evaluate(x = X_test, y = y_test)
print()
print(preds)
save_dir = os.path.join(os.getcwd(), 'saved_models')
model_name = 'myKerasModel.h5'
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
model_path = os.path.join(save_dir, model_name)
model.save(model_path)
print('Saved trained model at %s ' % model_path)
myKerasModel()
|
998,908 | e6a499bca3bcd595bf84abfae7dea180d2d52e92 | WS_PORT = 8856 # Port http server will run on
SCHEMA_LOCAL = "/usr/share/ipp-connectionmanagement/schemas/"
|
998,909 | 913c11de08434565872c655e143ffca63446d4b0 | import os
import numpy as np
import random
from shutil import copyfile, copytree
import sys
sys.path.insert(0, '/home/docker/2017-tfm-nuria-oyaga')
from Utils import utils
if __name__ == '__main__':
# General parameters
base_path = "/home/docker/data_far_50/Frames_dataset"
to_mix = ["linear_point_255_var_100000_80_120",
"parabolic_point_255_var_1_100000_80_120",
"sinusoidal_point_255_var_2_100000_80_120"]
samples_type_proportion = np.array([0.33, 0.33, 0.34])
n_samples = 100000
save_dir = os.path.join(base_path, "mix_lin_par_sin_255_var_100000_80_120")
print("Creating dir")
utils.check_dirs(save_dir, True)
# Train parameters
train_proportion = 0.8
train_interval = [0, int(n_samples*train_proportion)]
train_samples = samples_type_proportion * n_samples * train_proportion
train_dir = os.path.join(save_dir, "mix_train")
print("Creating train dir")
utils.check_dirs(train_dir)
utils.check_dirs(os.path.join(train_dir, "raw_samples"))
utils.check_dirs(os.path.join(train_dir, "modeled_samples"))
# Test parameters
test_proportion = 0.1
test_interval = [train_interval[1], int(train_interval[1] + n_samples * test_proportion)]
test_samples = samples_type_proportion * n_samples * test_proportion
test_dir = os.path.join(save_dir, "mix_test")
print("Creating test dir")
utils.check_dirs(test_dir)
utils.check_dirs(os.path.join(test_dir, "raw_samples"))
utils.check_dirs(os.path.join(test_dir, "modeled_samples"))
# Val parameters
val_proportion = 0.1
val_interval = [test_interval[1], int(test_interval[1] + n_samples * val_proportion)]
val_samples = samples_type_proportion * n_samples * val_proportion
val_dir = os.path.join(save_dir, "mix_val")
print("Creating val dir")
utils.check_dirs(val_dir)
utils.check_dirs(os.path.join(val_dir, "raw_samples"))
utils.check_dirs(os.path.join(val_dir, "modeled_samples"))
num = 0
num_train = 0
num_test = 0
num_val = 0
for i, mix_elem in enumerate(to_mix):
for action in ["train", "test", "val"]:
sub_elem = [k for k in os.listdir(os.path.join(base_path, mix_elem)) if action in k][0]
if "train" in sub_elem:
n_sub_samples = int(train_samples[i])
interval = train_interval
sub_save_dir = train_dir
elif "test" in sub_elem:
n_sub_samples = int(test_samples[i])
interval = test_interval
sub_save_dir = test_dir
else: # "val" in sub_elem:
n_sub_samples = int(val_samples[i])
interval = val_interval
sub_save_dir = val_dir
# Get random samples
samples = random.sample(range(interval[0], interval[1]), n_sub_samples)
# Original samples paths
original_modeled_dir = os.path.join(base_path, mix_elem, sub_elem, "modeled_samples")
original_raw_dir = os.path.join(base_path, mix_elem, sub_elem, "raw_samples")
# Mix samples paths
save_modeled_dir = os.path.join(sub_save_dir, "modeled_samples")
save_raw_dir = os.path.join(sub_save_dir, "raw_samples")
print(n_sub_samples)
for n, sample in enumerate(samples):
num += 1
if action == "train":
save_num = num_train
num_train += 1
elif action == "test":
save_num = test_interval[0] + num_test
num_test += 1
else:
save_num = val_interval[0] + num_val
num_val += 1
if n % 500 == 0 or n == len(samples)-1:
print(action, sub_elem, n, num, num_train, num_test, num_val, save_num)
original_copy_file = "sample{}".format(sample)
save_copy_file = "sample{}".format(save_num)
copyfile(os.path.join(original_modeled_dir, original_copy_file + ".txt"),
os.path.join(save_modeled_dir, save_copy_file + ".txt"))
copytree(os.path.join(original_raw_dir, original_copy_file),
os.path.join(save_raw_dir, save_copy_file))
|
998,910 | 7b8bf1ab9fb4aec6977df34ebe92e92b8c24612a | import json
import os
from time import sleep
from models.result_types import ResultType
from processing import pipeline_zoo
from processing.basic_pipelines import MergePipelineStep, OutputPipelineStep, SplitPipelineStep
from processing.output.file_output_pipeline import FileOutputPipeline
from processing.pipeline_zoo import _targeted_sentiment_analysis_pipeline, only_class, only_readable_class, \
get_data_pipeline, _tweet_length_pipeline
from processing.post_processing.SentimentDistributionPipeline import SentimentDistributionPipeline
from processing.utils.flatten_pipeline import FlattenPipeline
from processing.utils.map_pipeline import MapPipeline
import datetime
from processing.utils.to_json_pipeline import ToJsonPipeline
dir_path = os.path.dirname(os.path.realpath(__file__))
def _get_sentiment_for_person_pipeline(filename: str):
pipeline, output = _targeted_sentiment_analysis_pipeline(custom_data_step=True)
output \
.link(MergePipelineStep()) \
.link(MapPipeline('only_readable_class', func=only_readable_class)) \
.link(SentimentDistributionPipeline('sentiment_distribution')) \
.link(ToJsonPipeline('to_json')) \
.link(FileOutputPipeline(filename)) \
.link(OutputPipelineStep('output_sentiment_distribution', lambda x: x))
return pipeline
def _get_tweet_length(filename: str):
pipeline, output = _tweet_length_pipeline(custom_data_step=True)
output.link(ToJsonPipeline('to_json')) \
.link(FileOutputPipeline(filename)) \
.link(OutputPipelineStep('output_tweet_lengths', lambda x: x))
return pipeline
def _get_raw_tweets(filename: str):
pipeline = FlattenPipeline('flatten_data')
pipeline.link(MapPipeline('status_to_text', lambda x: x._json))\
.link(ToJsonPipeline('to_json')) \
.link(FileOutputPipeline(filename)) \
.link(OutputPipelineStep('output_tweet_lengths', lambda x: x))
return pipeline
def get_pipeline(filename_sentiment: str, filename_length: str, filename_raw: str, result_type: ResultType = None):
input_step, data_step = get_data_pipeline(
with_handle=True, with_tags=True, with_keywords=True,
result_type=result_type
)
data_step.link(SplitPipelineStep(outputs=[
_get_sentiment_for_person_pipeline(filename_sentiment),
_get_tweet_length(filename_length),
_get_raw_tweets(filename_raw)
]))
return input_step
def get_prominent_people():
with open(f'{dir_path}/resources/prominent_people.json', 'r') as f:
loaded_json = json.load(f)
return loaded_json.keys()
# def run_pipeline(person:str, result_type:'ResultType'):
# _get_sentiment_for_person_pipeline(
# f'./data_trove/{person}_{str(datetime.datetime.now())}_{result_type.name}.json',
# result_type=result_type
# ).feed_data(person)
if __name__ == '__main__':
people = get_prominent_people()
for person in people:
print(f'Fetching Person "{person}"')
error = True
tries = 0
while error:
try:
# _get_sentiment_for_person_pipeline(
# f'./data_trove/{person}_{str(datetime.datetime.now())}_mixed.json',
# result_type=ResultType.MIXED
# ).feed_data(person)
get_pipeline(
f'./data_trove/{person}_{str(datetime.datetime.now())}_recent.json',
f'./data_trove/tweetlength/{person}_{str(datetime.datetime.now())}_recent.json',
f'./data_trove/raw/{person}_{str(datetime.datetime.now())}_recent.json',
result_type=ResultType.RECENT
).feed_data(person)
get_pipeline(
f'./data_trove/{person}_{str(datetime.datetime.now())}_popular.json',
f'./data_trove/tweetlength/{person}_{str(datetime.datetime.now())}_popular.json',
f'./data_trove/raw/{person}_{str(datetime.datetime.now())}_popular.json',
result_type=ResultType.POPULAR
).feed_data(person)
error = False
except Exception as e:
print(f'Error occured for {person}')
print(e)
error = True
tries += 1
if tries == 2:
print('Maximum amount of tries, breaking')
break
|
998,911 | 4d4db6fcd77154604ee44978bad9a59af4db1797 | import logging
import shutil
import os
from rest_framework import mixins
from rest_framework import viewsets
from rest_framework.exceptions import ValidationError
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import status
from rest_framework.decorators import action
from django_filters.rest_framework import DjangoFilterBackend
from goods2.dl import imagedetection
from goods2 import common
from goods2 import util
from tradition.hand.hand_detect import HandDetect
from goods2.serializers import *
logger = logging.getLogger("django2")
class DefaultMixin:
paginate_by = 25
paginate_by_param = 'page_size'
max_paginate_by = 100
class DeviceidViewSet(DefaultMixin, viewsets.ReadOnlyModelViewSet):
queryset = Deviceid.objects.order_by('-id')
serializer_class = DeviceidSerializer
class DeviceidTrainViewSet(DefaultMixin, viewsets.ModelViewSet):
queryset = DeviceidTrain.objects.order_by('-id')
serializer_class = DeviceidTrainSerializer
class UserImageViewSet(DefaultMixin, mixins.ListModelMixin, mixins.RetrieveModelMixin,
viewsets.GenericViewSet, mixins.DestroyModelMixin):
queryset = Image.objects.exclude(image_ground_truth_id=None).filter(is_train=False).filter(is_hand=False).order_by('-id')
serializer_class = UserImageSerializer
filter_backends = (DjangoFilterBackend,)
filter_fields = ('deviceid', 'upc')
@action(methods=['get'], detail=False)
def device_list(self, request):
devices = Image.objects.values('deviceid').distinct()
ret = []
for deviceid in devices:
ret.append(deviceid['deviceid'])
return Response(ret)
@action(methods=['get'], detail=False)
def upc_list(self, request):
if 'deviceid' in request.query_params:
deviceid = request.query_params['deviceid']
upcs = Image.objects.exclude(image_ground_truth_id=None).filter(is_train=False).filter(is_hand=False).filter(deviceid=deviceid).values('upc').distinct()
else:
upcs = Image.objects.exclude(image_ground_truth_id=None).filter(is_train=False).filter(is_hand=False).values('upc').distinct()
ret = []
for upc in upcs:
ret.append(upc['upc'])
return Response(ret)
@action(methods=['put'], detail=True)
def add_to_train(self, request, pk=None):
instance = self.get_object()
# serializer = self.get_serializer(instance, data=request.data)
# serializer.is_valid(raise_exception=True)
train_source = '{}/{}/{}/{}'.format(common.get_dataset_dir(), instance.deviceid, instance.upc,
'image_' + os.path.basename(instance.source.path))
train_source_dir = '{}/{}/{}'.format(common.get_dataset_dir(True), instance.deviceid,
instance.upc)
import tensorflow as tf
if not tf.gfile.Exists(train_source_dir):
tf.gfile.MakeDirs(train_source_dir)
train_source_path = '{}/{}'.format(train_source_dir, 'image_' + os.path.basename(instance.source.path))
shutil.copy(instance.source.path, train_source_path)
instance.is_train = True
instance.save()
TrainImage.objects.create(
deviceid=instance.deviceid,
source=train_source,
upc=instance.upc,
source_image_id=instance.pk,
source_from=2,
score=1.0,
)
return Response(util.wrap_ret([]), status=status.HTTP_200_OK)
# def list(self, request, *args, **kwargs):
# queryset = self.filter_queryset(self.get_queryset())
#
# page = self.paginate_queryset(queryset)
# if page is not None:
# serializer = self.get_serializer(page, many=True)
# # logger.info(len(serializer.data))
# groundtruthpk_to_cnt = {}
# remove_indexes = []
# for i in range(len(serializer.data)):
# index = len(serializer.data)-i-1
# one = serializer.data[index]
# groundtruthpk = one['image_ground_truth']['pk']
# if groundtruthpk in groundtruthpk_to_cnt:
# if groundtruthpk_to_cnt[groundtruthpk] >= 5:
# remove_indexes.append(index)
# else:
# groundtruthpk_to_cnt[groundtruthpk] += 1
# else:
# groundtruthpk_to_cnt[groundtruthpk] = 1
# # logger.info(len(remove_indexes))
# for index in remove_indexes:
# serializer._data.pop(index) # _data is the real data
# # logger.info(len(serializer.data))
# return self.get_paginated_response(serializer.data)
#
# serializer = self.get_serializer(queryset, many=True)
# return Response(serializer.data)
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
try:
os.remove(instance.source.path)
except:
pass
self.perform_destroy(instance)
return Response(util.wrap_ret(None), status=status.HTTP_200_OK)
class ImageViewSet(DefaultMixin, mixins.CreateModelMixin, mixins.ListModelMixin, mixins.RetrieveModelMixin,
viewsets.GenericViewSet):
queryset = Image.objects.order_by('-id')
serializer_class = ImageSerializer
def create(self, request, *args, **kwargs):
# TODO 只保留deviceid=485进入存储和检测环节
if request.data['deviceid'] != '485':
return Response([], status=status.HTTP_201_CREATED)
try:
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
except Exception as e:
logger.error(e)
raise e
try:
device = Deviceid.objects.get(deviceid=serializer.instance.deviceid)
except Deviceid.DoesNotExist as e:
device = Deviceid.objects.create(
deviceid=serializer.instance.deviceid,
)
# hand_detect = HandDetect(serializer.instance.source.path, debug_type=1)
# is_hand = hand_detect.detect()
# if is_hand:
# serializer.instance.is_hand = True
# serializer.instance.save()
# logger.info('[{}]detect result: {}'.format(serializer.instance.deviceid, 'has hand'))
# return Response([], status=status.HTTP_201_CREATED, headers=headers)
if device.deviceid not in ['485','200']:
return Response([],status=status.HTTP_201_CREATED, headers=headers)
scores, upcs = self.ai_detect(serializer)
if len(upcs)>0 and upcs[0] == 'hand':
serializer.instance.is_hand = True
serializer.instance.save()
ret = []
if True: #device.deviceid in ['485','200'] or device.state >= common.DEVICE_STATE_COMMERCIAL:
# 没有商用的不返回结果
# upc_to_scores = {}
# weight = 0.5
# image_qs = Image.objects.filter(identify=serializer.instance.identify).order_by('-id')
# for image in image_qs:
# image_result_qs = image.image_results.all()
# for image_result in image_result_qs:
# upc = image_result.upc
# score = image_result.score
# if upc in upc_to_scores:
# upc_to_scores[upc] = upc_to_scores[upc]*(1-weight) + score*weight
# else:
# upc_to_scores[upc] = score
# weight -= 0.05 #前面次数衰减
# if weight <= 0:
# break
#
# upcs, scores = sort_upc_to_scores(upc_to_scores)
# logger.info(scores)
for i in range(len(upcs)):
# TODO 只推荐正确的
if scores[i]>=0.85 and upcs[i] not in ['hand', 'bag']:
ret.append(
{
'upc': upcs[i],
'score': scores[i],
}
)
# elif device.deviceid == '36':
# ret = [
# {'upc': '2000000000103', 'score': 0.99},
# {'upc': '2000000000097', 'score': 0.99},
# {'upc': '2000000000093', 'score': 0.99}
# ]
# elif device.deviceid == '3061':# 苹果
# ret = [
# {'upc': '2000000001540', 'score': 0.07},
# {'upc': '2000000001598', 'score': 0.25},
# {'upc': '3287583', 'score': 0.04},
# ]
# elif device.deviceid == '3062':# 香蕉
# ret = [
# {'upc': '2000000001541', 'score': 0.95},
# {'upc': '3960271', 'score': 0.03},
# {'upc': '3283458', 'score': 0.01},
# ]
logger.info('[{}]detect result: {}'.format(serializer.instance.deviceid, ret))
return Response(ret, status=status.HTTP_201_CREATED, headers=headers)
def ai_detect(self, serializer):
upcs = []
scores = []
# 检测阶段
if serializer.instance.deviceid in common.good_neighbour_bind_deviceid_list:
# 好邻居联合计算
last_normal_train_qs = TrainAction.objects.filter(state=common.TRAIN_STATE_COMPLETE).filter(
deviceid__in=common.good_neighbour_bind_deviceid_list).exclude(action='TC').order_by('-id')
if len(last_normal_train_qs) > 0:
logger.info('[{}]begin detect image:{}'.format(serializer.instance.deviceid, serializer.instance.identify))
last_train = last_normal_train_qs[0]
last_normal_train_model = \
TrainModel.objects.filter(train_action_id=last_train.pk).exclude(model_path='').order_by('-id')[0]
detector = imagedetection.ImageDetectorFactory.get_static_detector(
last_normal_train_model)
upcs, scores = detector.detect(serializer.instance)
ImageTrainModel.objects.create(
train_model_id=last_normal_train_model.pk,
image_id=serializer.instance.pk
)
else:
last_normal_train_qs = TrainAction.objects.filter(state=common.TRAIN_STATE_COMPLETE).filter(
deviceid=serializer.instance.deviceid).exclude(action='TC').order_by('-id')
if len(last_normal_train_qs) > 0:
logger.info('[{}]begin detect image:{}'.format(serializer.instance.deviceid, serializer.instance.identify))
last_train = last_normal_train_qs[0]
last_normal_train_model = \
TrainModel.objects.filter(train_action_id=last_train.pk).exclude(model_path='').order_by('-id')[0]
detector = imagedetection.ImageDetectorFactory.get_static_detector(
last_normal_train_model)
upcs, scores = detector.detect(serializer.instance)
ImageTrainModel.objects.create(
train_model_id=last_normal_train_model.pk,
image_id=serializer.instance.pk
)
# last_tc_train_qs = TrainAction.objects.filter(state=common.TRAIN_STATE_COMPLETE).filter(
# deviceid=serializer.instance.deviceid).filter(action='TC').filter(
# complete_time__gt=last_normal_train_model.create_time).order_by('-id')
# if len(last_tc_train_qs) > 0:
# last_tc_train = last_tc_train_qs[0]
# last_tc_train_model = \
# TrainModel.objects.filter(train_action_id=last_tc_train.pk).exclude(model_path='').order_by('-id')[0]
# detector2 = imagedetection.ImageDetectorFactory.get_static_detector(
# last_tc_train_model)
# upcs2, scores2 = detector2.detect(serializer.instance)
# ImageTrainModel.objects.create(
# train_model_id=last_tc_train_model.pk,
# image_id=serializer.instance.pk
# )
# # 联合计算
# upc_to_scores = {}
# for i in range(len(upcs)):
# if upcs[i] in upc_to_scores:
# upc_to_scores[upcs[i]] = upc_to_scores[upcs[i]] * 0.5 + scores[i] * 0.5
# else:
# upc_to_scores[upcs[i]] = scores[i]
# for i in range(len(upcs2)):
# if upcs2[i] in upc_to_scores:
# upc_to_scores[upcs2[i]] = upc_to_scores[upcs2[i]] * 0.5 + scores2[i] * 0.5
# else:
# upc_to_scores[upcs2[i]] = scores2[i]
#
# upcs, scores = sort_upc_to_scores(upc_to_scores)
# 输出结果
for i in range(len(upcs)):
if i < 5: # 不超过5个
ImageResult.objects.create(
image_id=serializer.instance.pk,
upc=upcs[i],
score=scores[i]
)
return scores, upcs
def sort_upc_to_scores(upc_to_scores):
items = upc_to_scores.items()
backitems = [[v[1], v[0]] for v in items]
backitems.sort(reverse=True)
upcs = [backitems[i][1] for i in range(0, len(backitems))]
scores = [backitems[i][0] for i in range(0, len(backitems))]
return upcs, scores
class ImageGroundTruthViewSet(DefaultMixin, mixins.CreateModelMixin, mixins.ListModelMixin, viewsets.GenericViewSet):
queryset = ImageGroundTruth.objects.order_by('-id')
serializer_class = ImageGroundTruthSerializer
def create(self, request, *args, **kwargs):
try:
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
except Exception as e:
if isinstance(e, ValidationError):
try:
logger.info(request.data)
logger.info(serializer.data)
image_ground_truth = ImageGroundTruth.objects.get(identify=serializer.data['identify'])
image_ground_truth.upc = serializer.data['upc']
image_ground_truth.save()
serializer.instance = image_ground_truth
headers = self.get_success_headers(serializer.data)
logger.info('[{}]update image ground truth:{},{}'.format(serializer.data['deviceid'], serializer.data['identify'], serializer.data['upc']))
except Exception as e:
logger.error(e)
raise e
else:
logger.error(e)
raise e
else:
logger.info('[{}]create image ground truth:{},{}'.format(serializer.data['deviceid'], serializer.data['identify'], serializer.data['upc']))
images = Image.objects.filter(identify=serializer.instance.identify).filter(deviceid=serializer.instance.deviceid)
truth_image_result_cnt = 0
false_image_result_cnt = 0
for image in images:
truth_image_result_qs = image.image_results.filter(upc=serializer.instance.upc).filter(score__gt=0.85)
if len(truth_image_result_qs)>0:
truth_image_result_cnt += 1
false_image_result_qs = image.image_results.exclude(upc=serializer.instance.upc).exclude(upc='hand').exclude(upc='bag').filter(score__gt=0.85)
if len(false_image_result_qs)>0:
false_image_result_cnt += 1
image.image_ground_truth=serializer.instance
image.upc = serializer.instance.upc
image.save()
serializer.instance.cnt = len(images)
if truth_image_result_cnt+false_image_result_cnt > 0:
if truth_image_result_cnt>0:
serializer.instance.truth_rate = 1.0
serializer.instance.precision = 1.0
else:
serializer.instance.truth_rate = 0.0
serializer.instance.precision = 0.0
else:
serializer.instance.truth_rate = 1.0
serializer.instance.precision = 0.0
serializer.instance.save()
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
class UpcBindViewSet(DefaultMixin, viewsets.ModelViewSet):
queryset = UpcBind.objects.order_by('-id')
serializer_class = UpcBindSerializer
filter_backends = (DjangoFilterBackend,)
filter_fields = ('deviceid',)
class TrainImageViewSet(DefaultMixin, viewsets.ModelViewSet):
# TODO 为了临时显示,去掉手和袋的显示
queryset = TrainImage.objects.filter(special_type=0).order_by('-id')
serializer_class = TrainImageSerializer
filter_backends = (DjangoFilterBackend,)
filter_fields = ('deviceid', 'upc')
@action(methods=['get'], detail=False)
def device_list(self, request):
devices = TrainImage.objects.values('deviceid').distinct()
ret = []
for deviceid in devices:
ret.append(deviceid['deviceid'])
return Response(ret)
@action(methods=['get'], detail=False)
def device_to_precision(self, request):
devices = TrainImage.objects.values('deviceid').distinct()
ret = {}
for deviceid in devices:
try:
device = Deviceid.objects.get(deviceid=deviceid['deviceid'])
except Deviceid.DoesNotExist as e:
device = Deviceid.objects.create(
deviceid=deviceid['deviceid'],
)
precision_qs = device.device_precisions.order_by('-id')
if len(precision_qs)>0:
ret[deviceid['deviceid']] = precision_qs[0].truth_rate
else:
ret[deviceid['deviceid']] = 0.0
return Response(ret)
@action(methods=['get'], detail=False)
def upc_list(self, request):
if 'deviceid' in request.query_params:
deviceid = request.query_params['deviceid']
upcs = TrainImage.objects.filter(deviceid=deviceid).values('upc').distinct()
else:
upcs = TrainImage.objects.values('upc').distinct()
ret = []
for upc in upcs:
ret.append(upc['upc'])
return Response(ret)
@action(methods=['put'], detail=True)
def set_special_type(self, request, pk=None):
instance = self.get_object()
if 'type' not in request.query_params:
type = 0
else:
type = int(request.query_params['type'])
if type>0:
if type==1:
special_type_name = 'hand'
elif type==2:
special_type_name = 'bag'
special_source_dir = '{}/{}'.format(common.get_dataset_dir(True),special_type_name)
special_source_path = '{}/{}_{}'.format(special_source_dir, special_type_name, os.path.basename(instance.source.path))
shutil.copy(instance.source.path, special_source_path)
else:
if instance.special_type == 1:
special_type_name = 'hand'
elif instance.special_type == 2:
special_type_name = 'bag'
special_source_dir = '{}/{}'.format(common.get_dataset_dir(True), special_type_name)
special_source_path = '{}/{}_{}'.format(special_source_dir, special_type_name,
os.path.basename(instance.source.path))
if os.path.isfile(special_source_path):
os.remove(special_source_path)
instance.special_type = type
instance.save()
return Response(util.wrap_ret([]), status=status.HTTP_200_OK)
def create(self, request, *args, **kwargs):
if 'source_from' not in request.data:
request.data['source_from'] = 1
if 'score' not in request.data:
request.data['score'] = 1.0
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
try:
os.remove(instance.source.path)
except:
pass
self.perform_destroy(instance)
return Response(util.wrap_ret(None), status=status.HTTP_200_OK)
class TrainActionViewSet(DefaultMixin, viewsets.ModelViewSet):
queryset = TrainAction.objects.order_by('-id')
serializer_class = TrainActionSerializer
filter_backends = (DjangoFilterBackend,)
filter_fields = ('deviceid',)
class TrainModelViewSet(DefaultMixin, viewsets.ReadOnlyModelViewSet):
queryset = TrainModel.objects.order_by('-id')
serializer_class = TrainModelSerializer
class TaskLogViewSet(DefaultMixin, viewsets.ReadOnlyModelViewSet):
queryset = TaskLog.objects.order_by('-id')
serializer_class = TaskLogSerializer
class CreateTrain(APIView):
def get(self, request):
if 'action' not in request.query_params:
action = 'TA'
else:
action = request.query_params['action']
deviceid = request.query_params['deviceid']
if deviceid == 'bag':
from goods2.cron import do_create_train_bag
train_action = do_create_train_bag(action,deviceid)
logger.info('[{}]create_train_bag by menu: {}'.format(deviceid, action))
return Response(util.wrap_ret(None), status=status.HTTP_201_CREATED)
if deviceid in common.good_neighbour_bind_deviceid_list:
# 好邻居联合计算
waiting_ta_tf = TrainAction.objects.exclude(action='TC').filter(deviceid__in=common.good_neighbour_bind_deviceid_list).filter(
state__lte=common.TRAIN_STATE_WAITING)
if len(waiting_ta_tf) == 0:
from goods2.cron import do_create_train_bind
train_action = do_create_train_bind(action, deviceid, None, common.good_neighbour_bind_deviceid_list)
logger.info('[{}]create_train_bind by menu: {}'.format(deviceid, action))
return Response(util.wrap_ret(None), status=status.HTTP_201_CREATED)
else:
return Response(status=status.HTTP_204_NO_CONTENT)
else:
waiting_ta_tf = TrainAction.objects.exclude(action='TC').filter(deviceid=deviceid).filter(state__lte=common.TRAIN_STATE_WAITING)
if len(waiting_ta_tf) == 0:
from goods2.cron import do_create_train
train_action = do_create_train(action, deviceid, None)
logger.info('[{}]create_train by menu: {}'.format(deviceid, action))
return Response(util.wrap_ret(None), status=status.HTTP_201_CREATED)
else:
return Response(status=status.HTTP_204_NO_CONTENT)
class TestTrain(APIView):
def get(self, request):
action = 'TA'
doing_ta_tf = TrainAction.objects.exclude(action='TC').filter(
state__lte=common.TRAIN_STATE_TRAINING)
if len(doing_ta_tf) == 0:
from goods2.cron import do_test_train
train_action = do_test_train(action)
logger.info('begin test_train')
return Response(util.wrap_ret(None), status=status.HTTP_201_CREATED)
else:
return Response(status=status.HTTP_204_NO_CONTENT)
class ClearData(APIView):
def get(self, request):
# deviceid = request.query_params['deviceid']
# train_image_qs = TrainImage.objects.filter(source_image_id__gt=0).filter(deviceid=deviceid).order_by('id')
# for train_image in train_image_qs:
# os.remove(train_image.source.path)
# train_image.delete()
image_qs = Image.objects.filter(image_ground_truth=None)
for image in image_qs:
if os.path.isfile(image.source.path):
logger.info('delete image: {}'.format(image.source.path))
os.remove(image.source.path)
for result in image.image_results.all():
result.delete()
image.delete()
return Response([], status=status.HTTP_201_CREATED)
|
998,912 | bfa70c449d93d6b19ea8a46c0e604ec610515d4a | from django.contrib.auth.models import User
from tastypie import fields
from tastypie.authorization import Authorization
from tastypie.resources import ModelResource
import butternut.account.api as AccountAPI
from models import Match
import datetime
class MatchResource(ModelResource):
winner = fields.ForeignKey(AccountAPI.ProfileResource, 'winner', full=False)
loser = fields.ForeignKey(AccountAPI.ProfileResource, 'loser', full=False)
# winner_id = fields.IntegerProperty()
class Meta:
queryset = Match.objects.order_by('-create_date').all()
resource_name = 'match'
list_allowed_methods = ['get', 'post', 'put', 'delete']
always_return_data = True
authorization= Authorization()
def obj_create(self, bundle, **kwargs):
bundle.data['create_date'] = datetime.datetime.now()
result = super(MatchResource, self).obj_create(bundle, **kwargs)
match_result = result.obj.calc_result(save_rating=True, update_profiles=True)
result.data['winner_last_name'] = result.obj.winner.last_name
result.data['winner_first_name'] = result.obj.winner.first_name
result.data['new_winner_elo'] = result.obj.winner.elo
result.data['new_winner_mu'] = result.obj.winner.mu
result.data['new_winner_sigma'] = result.obj.winner.sigma
result.data['loser_last_name'] = result.obj.loser.last_name
result.data['loser_first_name'] = result.obj.loser.first_name
result.data['new_loser_elo'] = result.obj.loser.elo
result.data['new_loser_mu'] = result.obj.loser.mu
result.data['new_loser_sigma'] = result.obj.loser.sigma
return result
def dehydrate(self, bundle):
bundle.data['winner_id'] = bundle.obj.winner_id
bundle.data['loser_id'] = bundle.obj.loser_id
return bundle |
998,913 | 3cc85aee5cd82f213d3ef2eace078662b2cc5494 | from dataclasses import dataclass, is_dataclass
from tkinter import *
from tkinter.ttk import *
from typing import Union
@dataclass
class NetworkSettings:
network_settings: Widget
gradient_multiplier: Union[DoubleVar, None] = None
selection_method: Union[StringVar, None] = None
def __post_init__(self):
self.gradient_multiplier = DoubleVar(self.network_settings, 1.1)
self.selection_method = StringVar(self.network_settings, 'def')
@dataclass
class EmptySettings:
no_settings_structure_given: Widget
class LabeledWidget(Frame):
def __init__(self, master=None, **kw):
super().__init__(master)
self.label = Label(self, **kw)
self.label.pack(side=LEFT)
self.widget = None
def set_widget(self, widget):
self.widget = widget
self.widget.pack(side=LEFT)
class SettingsGUI(Frame):
def __init__(self, master=None, schema=None, **kw):
super().__init__(master, **kw)
self.schema = schema if schema is not None else EmptySettings(self)
self._parse_schema(self, self.schema)
def change_schema(self, new_schema):
self.schema = new_schema
self._parse_schema(self, self.schema)
def wait_for(self, function, is_not=False):
value = False
while value is is_not:
value = function()
self.update_idletasks()
self.update()
return value
@staticmethod
def try_get(get_function):
try:
return get_function()
except TclError:
return None
@staticmethod
def _parse_schema(root, schema):
attributes = schema.__dict__
for name, value in attributes.items():
name = name.replace('_', ' ')
if is_dataclass(value):
frame = Frame(root)
SettingsGUI._parse_schema(frame, value)
frame.pack(fill='x', pady=10)
elif isinstance(value, (DoubleVar, StringVar, IntVar)):
widget = LabeledWidget(root, text=name)
widget.set_widget(Entry(widget, textvariable=value))
widget.pack()
elif isinstance(value, BooleanVar):
widget = LabeledWidget(root, text=name)
widget.set_widget(Checkbutton(widget, variable=value))
widget.pack()
else:
widget = Label(root, text=name.capitalize(), background='#232829', anchor="center")
widget.pack(fill='x')
|
998,914 | 61f02f7b2fd90918e75498200d8310602b9ac1a7 | import time
import copy
import sys
import a3dc_module_interface as a3
from modules.packages.a3dc.utils import error, warning
from modules.packages.a3dc.ImageClass import VividImage
from modules.packages.a3dc.constants import SEPARATOR
def module_main(ctx):
try:
#Inizialization
tstart = time.clock()
print(SEPARATOR)
#Load and reshape image
##TempTempTemp##
img = VividImage(a3.inputs['Image'], copy.deepcopy(a3.inputs['MetaData']))
#img = Image(a3.inputs['Image'], a3.inputs['MetaData'])
img.reorder('XYZCT')
#Get channel from image.
ch=a3.inputs['Channel']
print('Loading the following channel: ', img.metadata['Name'][ch])
if ch>=img.metadata['SizeC']:
raise Exception('Image has %s channels! Invalid channel %s' % (str(img.metadata['SizeC']), str(ch)))
#Check if image is time series
if img.metadata['SizeT']>1:
warning("Image is a time series! Only the first time step will be extracted!", file=sys.stderr)
#Modify metadata
img.metadata['SamplesPerPixel']=img.metadata['SamplesPerPixel'][ch]
img.metadata['Name']=img.metadata['Name'][ch]
#img.metadata['Path']=filename
#Create Output
#Extract channel from image array
a3.outputs['Channel 1'] = img.get_dimension(a3.inputs['Channel'], 'C').to_multidimimage()
#to_multidimimage(Image(array.astype(np.float),copy.deepcopy(img.metadata)))
#Finalization
tstop = time.clock()
print('Processing finished in ' + str((tstop - tstart)) + ' seconds! ')
print('Image loaded successfully!')
print(SEPARATOR)
except Exception as e:
raise error("Error occured while executing '"+str(ctx.type())+"' module '"+str(ctx.name())+"' !",exception=e)
config = [a3.Parameter('Channel', a3.types.int8)
.setFloatHint('default', 0)
.setFloatHint('unusedValue',0),
a3.Input('Image', a3.types.GeneralPyType),
a3.Input('MetaData', a3.types.GeneralPyType),
a3.Output('Channel', a3.types.ImageFloat)
]
a3.def_process_module(config, module_main)
|
998,915 | 6e314590828dccd434eb65de23b8652070f485c1 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 8 16:15:28 2019
Predictions with models of type Ratings and Genres with Chronological Data
@author: nicholas
"""
######## IMPORTS ########
import sys
import json
import torch
import numpy as np
import matplotlib.pyplot as plt
# Personnal imports
import AutoEncoders
import Utils
import Settings
from Arguments import args
# To use Orion
if args.orion:
from orion.client import report_results
######## INIT ########
# Print agrs that will be used
print(sys.argv)
# Add 1 to nb_movies_in_total because index of movies starts at 1
nb_movies = Settings.nb_movies_in_total + 1
# Cuda availability check
if args.DEVICE == "cuda" and not torch.cuda.is_available():
raise ValueError("DEVICE specify a GPU computation but CUDA is not available")
# Seed
if args.seed:
manualSeed = 1
# Python
# random.seed(manualSeed)
# Numpy
np.random.seed(manualSeed)
# Torch
torch.manual_seed(manualSeed)
# Torch with GPU
if args.DEVICE == "cuda":
torch.cuda.manual_seed(manualSeed)
torch.cuda.manual_seed_all(manualSeed)
torch.backends.cudnn.enabled = False
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
######## DATA ########
######## LOAD DATA
# R (ratings) - Format [ [UserID, [movies uID], [ratings 0-1]] ]
print('******* Loading SAMPLES from *******', args.dataPATH + args.dataTrain)
#train_data = json.load(open(args.dataPATH + args.dataTrain))
valid_data = json.load(open(args.dataPATH + args.dataValid))
# Use only samples where there is a genres mention
valid_g_data = [[c,m,g,tbm] for c,m,g,tbm in valid_data if g != []]
if args.DEBUG:
# train_data = train_data[:128]
valid_data = valid_data[:128]
# G (genres) - Format [ [UserID, [movies uID of genres mentionned]] ]
print('******* Loading GENRES from *******', args.genresDict)
dict_genresInter_idx_UiD = json.load(open(args.dataPATH + args.genresDict))
# Getting the popularity vector
print('** Including popularity')
popularity = np.load(args.dataPATH + 'popularity_vector.npy')
popularity = torch.from_numpy(popularity).float()
######## CREATING DATASET ListRatingDataset
print('******* Creating torch datasets *******')
#train_dataset = Utils.RnGChronoDataset(train_data, dict_genresInter_idx_UiD, \
# nb_movies, popularity, args.DEVICE, args.exclude_genres, \
# args.no_data_merge, args.noiseTrain, args.top_cut)
#valid_dataset = Utils.RnGChronoDataset(valid_data, dict_genresInter_idx_UiD, \
# nb_movies, popularity, args.DEVICE, args.exclude_genres, \
# args.no_data_merge, args.noiseEval, args.top_cut)
# FOR CHRONO (hence no_data_merge is True). With genres mentions or not
valid_chrono_dataset = Utils.RnGChronoDataset(valid_data, dict_genresInter_idx_UiD, \
nb_movies, popularity, args.DEVICE, args.exclude_genres, \
True, args.noiseEval, args.top_cut)
# FOR CHRONO (hence no_data_merge is True) + use only samples where there is a genres mention
valid_g_chrono_dataset = Utils.RnGChronoDataset(valid_g_data, dict_genresInter_idx_UiD, \
nb_movies, popularity, args.DEVICE, args.exclude_genres, \
True, args.noiseEval, args.top_cut)
######## CREATE DATALOADER
print('******* Creating dataloaders *******\n\n')
kwargs = {}
if(args.DEVICE == "cuda"):
kwargs = {'num_workers': 0, 'pin_memory': False}
#train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch,\
# shuffle=True, drop_last=True, **kwargs)
#valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=args.batch,\
# shuffle=True, drop_last=True, **kwargs)
# For PredChrono
valid_chrono_loader = torch.utils.data.DataLoader(valid_chrono_dataset, batch_size=args.batch, shuffle=True, **kwargs)
valid_g_chrono_loader = torch.utils.data.DataLoader(valid_g_chrono_dataset, batch_size=args.batch, shuffle=True, **kwargs)
######## MODEL ########
# Organize args layers
if args.layer2 == 0:
layers = [nb_movies, args.layer1]
else:
layers = [nb_movies, args.layer1, args.layer2]
# Load Model 1
print('******* Loading Model 1 *******')
# model_base = AutoEncoders.AsymmetricAutoEncoder(layers, nl_type=args.activations, \
# is_constrained=False, dp_drop_prob=0.0, \
# last_layer_activations=False, \
# lla = args.last_layer_activation).to(args.DEVICE)
# model1 = AutoEncoders.GenresWrapperChrono(model_base, args.g_type).to(args.DEVICE)
# checkpoint = torch.load(args.M1_path, map_location=args.DEVICE)
# model1.load_state_dict(checkpoint['state_dict'])
checkpoint1 = torch.load(args.M1_path, map_location=args.DEVICE)
model_base1 = AutoEncoders.AsymmetricAutoEncoder(checkpoint1['layers'], \
nl_type=checkpoint1['activations'], \
is_constrained=False, dp_drop_prob=0.0, \
last_layer_activations=False, \
lla = checkpoint1['last_layer_activation']).to(args.DEVICE)
model1 = AutoEncoders.GenresWrapperChrono(model_base1, checkpoint1['g_type']).to(args.DEVICE)
model1.load_state_dict(checkpoint1['state_dict'])
if checkpoint1['loss_fct'] == 'BCEWLL':
criterion1 = torch.nn.BCEWithLogitsLoss(reduction='none')
elif checkpoint1['loss_fct'] == 'BCE':
criterion1 = torch.nn.BCELoss(reduction='none')
# For print: Liked or not predictions
print_not_liked = ''
if args.pred_not_liked: print_not_liked = 'NOT '
# CHRONO EVALUATION
# If one model (do with and without genres)
if args.M2_path == 'none':
# Make predictions
print("\n\nPrediction Chronological...")
l1, l0, e1, e0, a1, a0, mr1, mr0, r1, r0, d1, d0 = \
Utils.EvalPredictionRnGChrono(valid_g_chrono_loader, model1, criterion1, \
args.zero12, True, args.pred_not_liked, \
args.completionPredChrono, args.topx)
#without_genres is True because only one model, so do "without genres" pred
# Print results
print("\n ====> RESULTS <==== \n")
print("Global avrg pred error with {:.4f} and without {:.4f}".format(l1, l0))
print("\n ==> BY Nb of mentions, on to be mentionned <== \n")
histo1 = []
histo0 = []
for k, v in sorted(e1.items()):
histo1 += [k for i in v]
for k, v in sorted(e0.items()):
histo0 += [k for i in v]
plt.hist(histo1, len(e1), alpha=0.3)
plt.hist(histo0, len(e0), alpha=0.3)
plt.xlabel('Nb of mentionned movies before prediction')
plt.legend()
plt.show()
# List of metrics to evaluate and graph
graphs_titles = ['Avrg Pred Error', 'MMRR','NDCG'] # 'Avrg Rank', 'MRR'
graphs_data = [[e0, e1], [mr0, mr1], [d0, d1]] # [a0, a1], [r0, r1]
# Evaluate + graph
for i in range(len(graphs_titles)):
avrgs = Utils.ChronoPlot(graphs_data[i], graphs_titles[i])
print(graphs_titles[i]+" on {}liked ReDial movies: {}={:.4f} and {}={:.4f}"\
.format(print_not_liked, \
'withOUT genres', avrgs[0], \
'with genres', avrgs[1]))
# If two models
else:
# Load Model 2
print('\n******* Loading Model 2 *******')
checkpoint2 = torch.load(args.M2_path, map_location=args.DEVICE)
model_base2 = AutoEncoders.AsymmetricAutoEncoder(checkpoint2['layers'], \
nl_type=checkpoint2['activations'], \
is_constrained=False, dp_drop_prob=0.0, \
last_layer_activations=False, \
lla = checkpoint2['last_layer_activation']).to(args.DEVICE)
model2 = AutoEncoders.GenresWrapperChrono(model_base2, checkpoint2['g_type']).to(args.DEVICE)
model2.load_state_dict(checkpoint2['state_dict'])
if checkpoint2['loss_fct'] == 'BCEWLL':
criterion2 = torch.nn.BCEWithLogitsLoss(reduction='none')
elif checkpoint2['loss_fct'] == 'BCE':
criterion2 = torch.nn.BCELoss(reduction='none')
# Make predictions
print("\n\nPrediction Chronological Model1...")
# Make prediction with and without genres in input
l1, l0, e1, e0, a1, a0, mr1, mr0, r1, r0, d1, d0 = \
Utils.EvalPredictionRnGChrono(valid_chrono_loader, model1, criterion1, \
args.zero12, True, args.pred_not_liked, \
args.completionPredChrono, args.topx)
# without_genres True because do the "without genres" pred
print("Prediction Chronological Model2...")
l2, _, e2, _, a2, _, mr2, _, r2, _, d2, _ = \
Utils.EvalPredictionRnGChrono(valid_chrono_loader, model2, criterion2, \
args.zero12, False, args.pred_not_liked, \
args.completionPredChrono, args.topx)
# without_genres False because don't do the "without genres" pred
# Print results
print("\n ====> RESULTS <==== \n")
# print("Global avrg pred error with {:.4f} and without {:.4f}".format(l1, l2))
print("\n ==> BY Nb of mentions, on to be mentionned <== \n")
histo1 = []
histo0 = []
for k, v in sorted(e1.items()):
histo1 += [k for i in v]
for k, v in sorted(e2.items()):
histo0 += [k for i in v]
plt.hist(histo1, len(e1), alpha=0.3)
plt.hist(histo0, len(e2), alpha=0.3)
plt.xlabel('Nb of mentionned movies before prediction')
plt.legend()
plt.show()
# List of metrics to evaluate and graph
graphs_titles = ['Avrg Pred Error', 'MMRR', 'NDCG'] # 'Avrg Rank', 'MRR'
graphs_data = [[e0, e1, e2], [mr0, mr1, mr2], [d0, d1, d2]] # [a0, a1, a2], [r0, r1, r2]
# Evaluate + graph
for i in range(len(graphs_titles)):
avrgs = Utils.ChronoPlot(graphs_data[i], graphs_titles[i] , \
[args.M1_label+'(out)', args.M1_label, args.M2_label])
print(graphs_titles[i]+" on {}liked ReDial movies: {}={:.4f}, {}={:.4f} and {}={:.4f}"\
.format(print_not_liked, \
args.M1_label+'(out)', avrgs[0], \
args.M1_label, avrgs[1], \
args.M2_label, avrgs[2]))
|
998,916 | 9425a7a69684cec9583621dff27b961ddf3ea146 | """
faster_rcnn_chainercv
train_SUNRGBD
created by Kazunari on 2018/06/26
"""
from __future__ import division
import argparse
import numpy as np
import os.path as osp
import datetime
import matplotlib
matplotlib.use('Agg')
import chainer
from chainer.datasets import TransformDataset
from chainer import training
from chainer.training import extensions
from chainer.training.triggers import ManualScheduleTrigger
from chainercv.datasets import voc_bbox_label_names
from chainercv.extensions import DetectionVOCEvaluator
from chainercv.links import FasterRCNNVGG16
from chainercv.links.model.faster_rcnn import FasterRCNNTrainChain
from chainercv import transforms
import sys
sys.path.append(osp.curdir)
from SUNRGBD_dataset import SUNRGBDDataset
class Transform(object):
def __init__(self, faster_rcnn):
self.faster_rcnn = faster_rcnn
def __call__(self, in_data):
img, bbox, label = in_data
_, H, W = img.shape
img = self.faster_rcnn.prepare(img)
_, o_H, o_W = img.shape
scale = o_H / H
bbox = transforms.resize_bbox(bbox, (H, W), (o_H, o_W))
# horizontally flip
img, params = transforms.random_flip(
img, x_random=True, return_param=True)
bbox = transforms.flip_bbox(
bbox, (o_H, o_W), x_flip=params['x_flip'])
return img, bbox, label, scale
def main():
parser = argparse.ArgumentParser(
description='ChainerCV training example: Faster R-CNN')
parser.add_argument('--dataset_path', '-path', type=str, default="/home/takagi.kazunari/projects/datasets/SUNRGBD_2DBB_fixed")
parser.add_argument('--gpu', '-g', type=int, default=-1)
parser.add_argument('--lr', '-l', type=float, default=1e-3)
parser.add_argument('--out', '-o', default='sunrgbd_result',
help='Output directory')
parser.add_argument('--seed', '-s', type=int, default=0)
parser.add_argument('--step_size', '-ss', type=int, default=50000)
parser.add_argument('--iteration', '-i', type=int, default=70000)
args = parser.parse_args()
np.random.seed(args.seed)
train_data = SUNRGBDDataset(args.dataset_path,mode="train")
test_data = SUNRGBDDataset(args.dataset_path, mode="test")
sunrgbd_bbox_label_names = train_data.get_dataset_label()
faster_rcnn = FasterRCNNVGG16(n_fg_class=len(sunrgbd_bbox_label_names),
pretrained_model='imagenet')
faster_rcnn.use_preset('evaluate')
model = FasterRCNNTrainChain(faster_rcnn)
if args.gpu >= 0:
chainer.cuda.get_device_from_id(args.gpu).use()
model.to_gpu()
optimizer = chainer.optimizers.MomentumSGD(lr=args.lr, momentum=0.9)
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer_hooks.WeightDecay(rate=0.0005))
train_data = TransformDataset(train_data, Transform(faster_rcnn))
train_iter = chainer.iterators.MultiprocessIterator(
train_data, batch_size=1, n_processes=None, shared_mem=100000000)
test_iter = chainer.iterators.SerialIterator(
test_data, batch_size=1, repeat=False, shuffle=False)
updater = chainer.training.updaters.StandardUpdater(
train_iter, optimizer, device=args.gpu)
now_time = str(datetime.datetime.today()).replace(" ","_")
save_dir = osp.join(args.out, now_time)
trainer = training.Trainer(
updater, (args.iteration, 'iteration'), out=save_dir)
#save_iteration = [i for i in range(100, args.iteration, args.step_size)]
weight_save_interval = 5000, 'iteration'
evaluation_interval = 10000, 'iteration'
trainer.extend(
extensions.snapshot_object(model.faster_rcnn, 'sunrgbd_model_{.updater.iteration}.npz'),
trigger=weight_save_interval)
trainer.extend(extensions.ExponentialShift('lr', 0.1),
trigger=(args.step_size, 'iteration'))
log_interval = 20, 'iteration'
plot_interval = 10, 'iteration'
print_interval = 20, 'iteration'
trainer.extend(chainer.training.extensions.observe_lr(),
trigger=log_interval)
trainer.extend(extensions.LogReport(trigger=log_interval))
trainer.extend(extensions.PrintReport(
['iteration', 'epoch', 'elapsed_time', 'lr',
'main/loss',
'main/roi_loc_loss',
'main/roi_cls_loss',
'main/rpn_loc_loss',
'main/rpn_cls_loss',
'validation/main/map',
]), trigger=print_interval)
trainer.extend(extensions.ProgressBar(update_interval=10))
if extensions.PlotReport.available():
trainer.extend(
extensions.PlotReport(
['main/loss'],
file_name='loss.png', trigger=plot_interval
),
trigger=plot_interval
)
#do_evaluation_iteration = [i for i in range(0, args.iteration, 500)]
trainer.extend(
DetectionVOCEvaluator(
test_iter, model.faster_rcnn, use_07_metric=True,
label_names=sunrgbd_bbox_label_names),
trigger=evaluation_interval)
trainer.extend(extensions.dump_graph('main/loss'))
trainer.run()
if __name__ == '__main__':
main()
|
998,917 | cba7d08af2b3ab07bc39b55c5f748f77f9dcdd35 | import pytest
from phub_crypto import PhubCrypto
class TestPhubCrypto:
def test_init(self):
crypto = PhubCrypto(b'saltsalt', 'badpass')
assert crypto
def test_encrypt_decrypt(self):
crypto = PhubCrypto(b'saltsalt', 'badpass')
target = "This is the target string"
ciphered = crypto.encrypt(target)
unciphered = crypto.decrypt(ciphered)
assert unciphered == target
|
998,918 | dd5426f1bc7c81bf9807d4178cbd9b38aaaa4c4b | import numpy as np
import matplotlib.pyplot as plt
def Euclid_norm(x):
norm = 0
for i in x:
norm += i**2
return np.sqrt(norm)
def scalar(x, y):
sc = 0
for i in range(len(x)):
sc += x[i]*y[i]
return sc
if __name__ == "__main__":
a = 1
b = 1.2
n = 10
h = 1/n
delta = 0.000001
Y = np.zeros((n+1, n+1))
Y_n = np.zeros((n+1, n+1))
# Y0 - область
for i in range(int(n/2)+1, n):
for j in range(1,n):
Y[j][i] = 1
for i in range(int(n/2), 0, -1):
for j in range(i-1):
Y[int(n/2)+j+1][i] = 1
Psi = Y.copy()
# Y1 - нач
for j in range(1, n):
for i in range(1, n):
Y_n[j][i] = -1*(a/h**2)*(Y[j][i-1] - 2*Y[j][i] + Y[j][i+1]) -\
(b/h**2)*(Y[j-1][i] - 2*Y[j][i] + Y[j+1][i])
lambda_max_n = scalar(Y_n.reshape(-1)/\
Euclid_norm(Y.reshape(-1)), Y.reshape(-1)/Euclid_norm(Y.reshape(-1)))
# вычисление максимального собственного значения A
while True:
lambda_max_0 = lambda_max_n
Y = Y_n.copy()/Euclid_norm(Y.reshape(-1))
for j in range(1, n):
for i in range(1, n):
Y_n[j][i] = -1*(a/h**2)*(Y[j][i-1] - 2*Y[j][i] + Y[j][i+1]) -\
(b/h**2)*(Y[j-1][i] - 2*Y[j][i] + Y[j+1][i])
lambda_max_n = scalar(Y_n.reshape(-1)/\
Euclid_norm(Y.reshape(-1)), Y.reshape(-1)/Euclid_norm(Y.reshape(-1)))
if np.abs(lambda_max_n - lambda_max_0)/lambda_max_0 < delta:
break
# максимальное собственное значение А
print(lambda_max_n)
Psi_n = Psi.copy()
# Psi - нач
for j in range(1, n):
for i in range(1, n):
Psi_n[j][i] = lambda_max_n*Psi[j][i] + \
(a/h**2)*(Psi[j][i-1] - 2*Psi[j][i] + Psi[j][i+1]) + \
(b/h**2)*(Psi[j-1][i] - 2*Psi[j][i] + Psi[j+1][i])
lambda_max_B_n = scalar(Psi_n.reshape(-1)/Euclid_norm(Psi.reshape(-1)), \
Psi.reshape(-1)/Euclid_norm(Psi.reshape(-1)))
# вычисление максимального собственного значения B
while True:
lambda_max_B_0 = lambda_max_B_n
Psi = Psi_n.copy()/Euclid_norm(Psi.reshape(-1))
for j in range(1, n):
for i in range(1, n):
Psi_n[j][i] = lambda_max_n*Psi[j][i] + \
(a/h**2)*(Psi[j][i-1] - 2*Psi[j][i] + Psi[j][i+1]) + \
(b/h**2)*(Psi[j-1][i] - 2*Psi[j][i] + Psi[j+1][i])
lambda_max_n = scalar(Psi_n.reshape(-1)/Euclid_norm(Psi.reshape(-1)), \
Psi.reshape(-1)/Euclid_norm(Psi.reshape(-1)))
if np.abs(lambda_max_B_n - lambda_max_B_0)/lambda_max_B_0 < delta:
break
# миниальное собственное значение А
print(lambda_max_n - lambda_max_B_n) |
998,919 | 3db8a1625d846ea0d3d0e3184ef4af0964c188b1 | from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from rest_framework.response import Response
from rest_framework import status
from Discussion_Forum.models import Post
from .serializers import PostSerializer, ResultSerializer
from rest_framework.decorators import api_view
from django.contrib.auth.models import User
# Create your views here.
def get_mean(marks):
mean = 0
marks = [int(mark) for mark in marks]
for mark in marks:
mean += mark
return mean/len(marks)
@api_view(['GET','POST'])
@csrf_exempt
def post_list(request):
if request.method == 'GET':
posts = Post.objects.all()
serializer = PostSerializer(posts, many=True)
return Response(serializer.data)
if request.method == 'POST':
serializer = PostSerializer(data=request.data)
if serializer.is_valid():
print(serializer.validated_data['author'])
if User.objects.filter(username=serializer.validated_data['author']).exists():
user = User.objects.get(username=serializer.validated_data['author'])
serializer.validated_data['author'] = user
serializer.save()
return Response(serializer.data, status=201)
return Response(serializer.errors, status=400)
@api_view(['GET', 'PUT', 'DELETE', 'POST'])
@csrf_exempt
def post_detail(request, id):
try:
post = Post.objects.get(id=id)
except Post.DoesNotExist:
return HttpResponse(status=404)
if request.method == 'GET':
serializer = PostSerializer(post)
return Response(serializer.data)
if request.method == 'PUT':
serializer = PostSerializer(post, data=request.data)
if serializer.is_valid():
if User.objects.filter(username=serializer.validated_data['author']).exists():
user = User.objects.get(username=serializer.validated_data['author'])
serializer.validated_data['author'] = user
serializer.save()
return Response(serializer.data, status=201)
return Response(serializer.errors, status=400)
if request.method == 'DELETE':
post.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
@api_view(['POST'])
@csrf_exempt
def result(request):
serializer = ResultSerializer(data=request.data, many=True)
if serializer.is_valid():
data_obj = []
for data in serializer.validated_data:
data_obj.append(dict(data))
marks = []
for data_obj_mark in data_obj:
marks.append(data_obj_mark['marks'])
mean = get_mean(marks)
return Response(mean, status=201)
return Response('error',status=400)
|
998,920 | 4b3b97e59a62c67b7cf20d68e3029e38b11054dd | #!/usr/bin/env python
"""
File: normalize.py
Date: 5/8/18
Author: Jon Deaton (jdeaton@stanford.edu)
"""
import sys
sys.path.append('..')
print(sys.path)
import argparse
import multiprocessing as mp
import BraTS
from BraTS.structure import *
from normalization import normalize_patient_images
import logging
pool_size = 8
logger = None
def normalize_brats(brats_root, year, output_directory):
brats = BraTS.DataSet(brats_root=brats_root, year=year)
train_corrected = get_brats_subset_directory(output_directory, DataSubsetType.train)
hgg_corrected = get_brats_subset_directory(output_directory, DataSubsetType.hgg)
lgg_corrected = get_brats_subset_directory(output_directory, DataSubsetType.lgg)
validation_corrected = get_brats_subset_directory(output_directory, DataSubsetType.validation)
# Make the directories
for directory in (train_corrected, hgg_corrected, lgg_corrected, validation_corrected):
try:
os.mkdir(directory)
except FileExistsError:
logger.debug("Directory exists: %s" % directory)
pool = mp.Pool(pool_size)
# Convert each of the sets
for patient_set in (brats.hgg, brats.lgg, brats.validation):
if patient_set is None:
continue # Missing data set (e.g. validation is not present)
logger.info("Processing set: %s" % patient_set.type)
# Make a list of original-dir -> new-dir outputs
arg_list = []
for patient_id in patient_set.ids:
original_dir = brats.train.directory_map[patient_id]
new_dir = os.path.join(get_brats_subset_directory(output_directory, patient_set.type), patient_id)
arg_list.append((original_dir, new_dir, patient_id))
# Do the conversion in parallel
pool.map(convert_wrapper, arg_list)
# For converting in parallel
def convert_wrapper(args):
orig_dir, new_dir, patient_id = args
try:
os.mkdir(new_dir)
except FileExistsError:
pass
logger.debug("Processing: %s" % patient_id)
normalize_patient_images(*(orig_dir, new_dir))
def parse_args():
"""
Parse command line arguments
:return: An argparse object containing parsed arguments
"""
parser = argparse.ArgumentParser(description="Normalize the BraTS data set",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
input_options = parser.add_argument_group("Input")
input_options.add_argument('--brats', required=True, help="BraTS root data set directory")
input_options.add_argument('--year', required=True, type=int, default=2018, help="BraTS year")
output_options = parser.add_argument_group("Output")
output_options.add_argument('--output', required=True, help="Output directory of normalized data set")
general_options_group = parser.add_argument_group("General")
general_options_group.add_argument("--pool-size", type=int, default=8, help="Size of worker pool")
logging_options_group = parser.add_argument_group("Logging")
logging_options_group.add_argument('--log', dest="log_level", default="WARNING", help="Logging level")
logging_options_group.add_argument('--log-file', default="normalize.log", help="Log file")
args = parser.parse_args()
# Setup the logger
global logger
logger = logging.getLogger('root')
# Logging level configuration
log_level = getattr(logging, args.log_level.upper())
if not isinstance(log_level, int):
raise ValueError('Invalid log level: %s' % args.log_level)
logger.setLevel(log_level)
log_formatter = logging.Formatter('[%(asctime)s][%(levelname)s][%(funcName)s] - %(message)s')
# For the log file...
file_handler = logging.FileHandler(args.log_file)
file_handler.setFormatter(log_formatter)
logger.addHandler(file_handler)
# For the console
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(log_formatter)
logger.addHandler(console_handler)
return args
def main():
args = parse_args()
brats_root = os.path.expanduser(args.brats)
output_dir = os.path.expanduser(args.output)
global pool_size
pool_size = args.pool_size
if not os.path.isdir(brats_root):
raise FileNotFoundError(brats_root)
logger.debug("BraTS root: %s" % brats_root)
if not os.path.exists(output_dir):
logger.debug("Creating output directory: %s" % output_dir)
try:
os.mkdir(output_dir)
except FileExistsError:
logger.debug("Output directory exists.")
else:
logger.debug("Output directory: %s" % output_dir)
normalize_brats(brats_root, args.year, output_dir)
if __name__ == "__main__":
main()
|
998,921 | 93d0bd5ef130e4340ed39b6dd7f43d38860034ac |
class Scene():
def __init__(self,guid,name,control_channel_guid):
self.guid = guid
self.name = name
self.control_channel_guid = control_channel_guid |
998,922 | bf3f82ce5532d4578fad0039f5edfcc6c5af133c | import sched, time
from Analytics import Analytics
from datetime import datetime
import os
from analyticConfigs import config
scheduler_obj = sched.scheduler(time.time, time.sleep)
def start_App(sc):
print("******************************************* Job Started at {} **".format(datetime.now()))
Analytics().run_Analytic_Engine()
print("******************************************* Job Finished at {} **",datetime.now())
scheduler_obj.enter(int(config['Start_Delay']),int( config['Interval']), start_App, (sc,))
scheduler_obj.enter(int(config['Start_Delay']),int( config['Interval']), start_App, (scheduler_obj,))
scheduler_obj.run()
|
998,923 | e8c6522bab1db58b3e3def517f156cef212419b3 | # -*- coding: utf-8 -*-
###########################################################################
## Python code generated with wxFormBuilder (version Nov 6 2013)
## http://www.wxformbuilder.org/
##
## PLEASE DO "NOT" EDIT THIS FILE!
###########################################################################
import wx
import wx.xrc
from Terminal import Terminal
###########################################################################
## Class MyDialog1
###########################################################################
class MyDialog1 ( wx.Dialog ):
def __init__( self, parent ):
wx.Dialog.__init__ ( self, parent, id = wx.ID_ANY, title = wx.EmptyString, pos = wx.DefaultPosition, size = wx.Size( 700,500 ), style = wx.DEFAULT_DIALOG_STYLE )
self.SetSizeHintsSz( wx.DefaultSize, wx.DefaultSize )
bSizer1 = wx.BoxSizer( wx.VERTICAL )
self.m_panel1 = Terminal( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL|wx.WANTS_CHARS )
bSizer1.Add( self.m_panel1, 1, wx.EXPAND |wx.ALL, 5 )
self.SetSizer( bSizer1 )
self.Layout()
self.Centre( wx.BOTH )
def __del__( self ):
pass
if __name__ == "__main__":
app = wx.PySimpleApp(0)
wx.InitAllImageHandlers()
frame_1 = MyDialog1(None)
app.SetTopWindow(frame_1)
frame_1.Show()
app.MainLoop()
|
998,924 | 9c24d5938628d7accec0537cb0d3e1d95e3157f5 | #将一个列表的数据复制到另一个列表中。
l1 = []
i = 0
while i <= 9:
a = input('请输入10个数据:')
l1.append(a)
i += 1
l2 = l1[:]
print('原先的列表是:{}')
print('复制出来的数据是:{}'.format(l2))
|
998,925 | dac62c3994ffccb5f064e4a7503388240cab2b60 | from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
# Create your models here.
class Profile(models.Model):
city = models.ForeignKey("City", null=True, on_delete=models.CASCADE)
phone_number = models.CharField(max_length=50)
image = models.ImageField(upload_to='profiles/images')
user = models.OneToOneField(User, on_delete=models.CASCADE, null=True)
def __str__(self):
return str(self.user)
# create a profile once a user has registered an account using signals
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
class City(models.Model):
name = models.CharField(max_length=50, default='Cairo')
def __str__(self):
return self.name
|
998,926 | 5dda4d2a6ff3abd19e23ec590f3a92a499525f49 | #! /usr/bin/python
import os
import argparse
import sys
import get_GFSX025_grib2 as grib
from datetime import datetime, timedelta
import time
# Default values. Editable by user
MAX_OFFSET = 168 # MAX_OFFSET == 168hs(7 days)
MIN_NODES_AMOUNT = 2
MAX_NODES_AMOUNT = 9
SEPARATOR = '-' * 80
def update_namelist_wps(environment):
try:
print SEPARATOR
print "Set date for namelist.wps"
ENSAMBLE_DIR = environment["ENSAMBLE_DIR"]
os.chdir(ENSAMBLE_DIR)
start_date = " start_date = {0}\n".format(environment["start_date"])
end_date = " end_date = {0}\n".format(environment["end_date"])
patterns = {"start_date": start_date, "end_date": end_date}
namelist_wps = "namelist.wps"
with open(namelist_wps) as infile:
with open(namelist_wps, 'r+') as outfile:
for line in infile:
for k, v in patterns.iteritems():
if k in line:
line = v
break
outfile.write(line)
infile.close()
outfile.close()
os.system("head -15 " + namelist_wps)
except Exception:
raise
def update_namelist_input_output(ensamble_path, environment):
try:
print SEPARATOR
print "Set date for namelist.input in {0}".format(ensamble_path)
ENSAMBLE_DIR = environment["ENSAMBLE_DIR"]
os.chdir(ENSAMBLE_DIR + "/" + ensamble_path)
patterns = {
"run_days" : " run_days = {0}\n".format(environment["run_days"] ),
"run_hours" : " run_hours = {0}\n".format(environment["run_hours"] ),
"run_minutes" : " run_minutes = {0}\n".format(environment["run_minutes"] ),
"run_seconds" : " run_seconds = {0}\n".format(environment["run_seconds"] ),
"start_year" : " start_year = {0}\n".format(environment["start_year"] ),
"start_month" : " start_month = {0}\n".format(environment["start_month"] ),
"start_day" : " start_day = {0}\n".format(environment["start_day"] ),
"start_hour" : " start_hour = {0}\n".format(environment["start_hour"] ),
"start_minute" : " start_minute = {0}\n".format(environment["start_minute"] ),
"start_second" : " start_second = {0}\n".format(environment["start_second"] ),
"end_year" : " end_year = {0}\n".format(environment["end_year"] ),
"end_month" : " end_month = {0}\n".format(environment["end_month"] ),
"end_day" : " end_day = {0}\n".format(environment["end_day"] ),
"end_hour" : " end_hour = {0}\n".format(environment["end_hour"] ),
"end_minute" : " end_minute = {0}\n".format(environment["end_minute"] ),
"end_second" : " end_second = {0}\n".format(environment["end_second"] )
}
with open('namelist.input') as infile:
with open('namelist.input', 'r+') as outfile:
for line in infile:
for k, v in patterns.iteritems():
if k in line:
line = v
break
outfile.write(line)
infile.close()
outfile.close()
os.system("head -15 namelist.input")
print "Set date for namelist.ARWpost {0}".format(ensamble_path)
start_date = " start_date = {0}\n".format(environment["start_date"])
end_date = " end_date = {0}\n".format(environment["end_date"])
input_root_name = " input_root_name = '../wrf_run/wrfout_d01_{0}',\n".format(environment["start_date"])
patterns = {
"start_date": start_date,
"end_date": end_date,
"input_root_name": input_root_name
}
namelist_awr = "namelist.ARWpost"
with open(namelist_awr) as infile:
with open(namelist_awr, 'r+') as outfile:
for line in infile:
for k, v in patterns.iteritems():
if k in line:
line = v
break
outfile.write(line)
infile.close()
outfile.close()
os.system("head -15 {0}".format(namelist_awr))
except Exception:
raise
def download_grib_files(environment, offset):
try:
print SEPARATOR
GFS_DIR = environment["GFS_DIR"]
os.chdir(GFS_DIR)
start_date_dir = environment["start_date"]
start_date = environment["start_date_int_format"]
if not os.path.exists(start_date_dir):
os.system("mkdir {0}".format(start_date_dir))
gfs_path = GFS_DIR + "/" + start_date_dir
grib.download_grib_files(start_date, offset, gfs_path)
except Exception:
raise
def load_configuration(environment, offset):
try:
update_namelist_wps(environment)
ensamble_names = environment["ENSAMBLE"]
for ensamble in ensamble_names:
update_namelist_input_output(ensamble, environment)
except Exception:
raise
def run_process_model(environment, nodes):
try:
os.chdir(environment["WRF_BASE"])
ensamble_names = environment["ENSAMBLE"]
start_date = environment["start_date"]
end_date = environment["end_date"]
import re
with open('job_wrf.sh') as infile:
with open('job_wrf.sh', 'r+') as outfile:
for line in infile:
if re.search(r'(--nodes=[0-9]{1})', line):
line = re.sub(r'(--nodes=[0-9]{1})', '--nodes={0}'.format(nodes), line)
outfile.write(line)
infile.close()
outfile.close()
for ensamble in ensamble_names:
print SEPARATOR
execute_command = "sbatch job_wrf.sh {0} {1} {2} {3}".format(ensamble, start_date, end_date, nodes)
print execute_command
os.system(execute_command)
check_command = "squeue -u $USER"
print check_command
os.system(check_command)
except Exception:
raise
def get_ensamble_names(environment):
"""
This function return a list of ensamble's names:
[
ensamble1,
ensamble2,
.
.
.
ensambleN,
]
"""
try:
ENSAMBLE_DIR = environment["ENSAMBLE_DIR"]
os.chdir(ENSAMBLE_DIR)
ensamble_names = []
subdirs = [x[0] for x in os.walk(ENSAMBLE_DIR)]
for subdir in subdirs:
ensamble_names.append(subdir.split("/")[-1])
return ensamble_names[1:]
except Exception:
raise
def define_environment(start_date, offset):
"""
Format of start and end date:
start_date = YYYY-MM-DD_HH:MM:SS
end_date = YYYY-MM-DD_HH:MM:SS
Example:
start_date = 2015-02-24_18:00:00
"""
try:
start_date_int_format = int(start_date)
start_date = datetime.strptime(start_date, "%Y%m%d%H")
start_year = "{0:02d}".format(start_date.year)
start_month = "{0:02d}".format(start_date.month)
start_day = "{0:02d}".format(start_date.day)
start_hour = "{0:02d}".format(start_date.hour)
start_minute = "{0:02d}".format(start_date.minute)
start_second = "{0:02d}".format(start_date.second)
end_date = start_date + timedelta(hours = int(offset))
end_year = "{0:02d}".format(end_date.year)
end_month = "{0:02d}".format(end_date.month)
end_day = "{0:02d}".format(end_date.day)
end_hour = "{0:02d}".format(end_date.hour)
end_minute = "{0:02d}".format(end_date.minute)
end_second = "{0:02d}".format(end_date.second)
#TODO ASK about these parameters
run_days = "0"
run_hours = offset
run_minutes = "0"
run_seconds = "0"
start_date = start_date.strftime("%Y-%m-%d_%H:%M:%S")
end_date = end_date.strftime("%Y-%m-%d_%H:%M:%S")
print "Start forecast date: {0}".format(start_date)
print "End forecast date: {0}".format(end_date)
environment = {
"start_date_int_format" : start_date_int_format,
"start_date" : start_date,
"end_date" : end_date,
"offset" : offset,
"start_year" : start_year,
"start_month" : start_month,
"start_day" : start_day,
"start_hour" : start_hour,
"start_minute" : start_minute,
"start_second" : start_second,
"end_date" : end_date,
"end_year" : end_year,
"end_month" : end_month,
"end_day" : end_day,
"end_hour" : end_hour,
"end_minute" : end_minute,
"end_second" : end_second,
"run_days" : run_days,
"run_hours" : run_hours,
"run_minutes" : run_minutes,
"run_seconds" : run_seconds
}
if not os.getenv("WRF_BASE"):
print SEPARATOR
print "Before run this script you should run: . ./set_configuration.sh"
print SEPARATOR
sys.exit(1)
print "ENVIRONMENT VARIABLE LOADED: {0}".format(os.getenv("WRF_BASE"))
environment["WRF_BASE"] = os.getenv("WRF_BASE")
print "ENVIRONMENT VARIABLE LOADED: {0}".format(os.getenv("GFS_DIR"))
environment["GFS_DIR"] = os.getenv("GFS_DIR")
print "ENVIRONMENT VARIABLE LOADED: {0}".format(os.getenv("ENSAMBLE_DIR"))
environment["ENSAMBLE_DIR"] = os.getenv("ENSAMBLE_DIR")
environment["ENSAMBLE"] = get_ensamble_names(environment)
return environment
except Exception:
raise
def usage(msg):
print SEPARATOR
print SEPARATOR
print msg
print SEPARATOR
print SEPARATOR
print """
Execution of WRF model:
./run_wrf_model.py -i=STARTDATE -o=OFFSET -n=2
or:
./run_wrf_model.py --start_date=STARTDATE --offset=OFFSET --nodes=2
Where STARTDATE has the follow format: YYYYMMDDHH
and OFFSET is an integer value that represent the forecast hours
starting from the STARTDATE and defined in the range [0-MAX_OFFSET]hs.
The MAX_OFFSET is currently defined in 168hs(a week),
but it can be editable by the user, changing it in this file.
The nodes flag is the number of:
nodes in multi partition, with nodes in [2,8].
This values can also be changed in this file editing the
MIN_NODES_AMOUNT/MAX_NODES_AMOUNT variables
Example:
./run_wrf_model.py -i=2018020218 -o=36 -n=2
means Forecast of 36 hs starting from the date:
year: 2018
month: 02
day: 02
hour: 18
forecast time: 36 hs
running in 2 nodes of multi partition
Warning: The date is valid only until 14 days behind
This is a constrain from the GFS site
"""
print SEPARATOR
print SEPARATOR
sys.exit(1)
def check_parameter(init_date, offset, nodes, force=None):
try:
date = datetime.strptime(init_date, '%Y%m%d%H')
if date < datetime.now() - timedelta(days=14):
if not force:
usage(msg="Date available until 14 days ago")
if not int(offset) in range(0, MAX_OFFSET + 1):
usage(msg="Forecast's hours grater than 168hs")
if not int(nodes) in range(MIN_NODES_AMOUNT, MAX_NODES_AMOUNT):
usage(msg="Mendieta nodes out of allowed range")
except ValueError:
usage(msg="Error in the date format")
def main():
print """
__ _______ ______
\ \ / / __ \| ____|
\ \ /\ / /| |__) | |__
\ \/ \/ / | _ /| __|
\ /\ / | | \ \| |
\/ \/ |_| \_\_|
"""
time.sleep(1)
start_date = None
offset = None
nodes = 2 # Default value in multi partition
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--start_date', help='Start date for a forecast')
parser.add_argument('-o', '--offset', help='Amount of forecast hs')
parser.add_argument('-n', '--nodes', help='Mendieta nodes' )
parser.add_argument('-f', '--force', action='store_true', help='force execution for any date' )
args = parser.parse_args()
if args.start_date and args.offset and args.nodes:
start_date = args.start_date
offset = args.offset
nodes = args.nodes
force = args.force
check_parameter(start_date, offset, nodes, force)
else:
usage("Insert all the parameters")
try:
environment = define_environment(start_date, offset)
download_grib_files(environment, offset)
load_configuration(environment, offset)
run_process_model(environment, nodes)
except Exception:
raise
if __name__ == "__main__":
main()
|
998,927 | f852a029ed501c9b99422b3db96e062d241646bb | import re,json
raw_html = open("coursesearch.html", "r").read()
#html_regex = "<OPTION VALUE=\"(.*)\">(.*)"
html_regex = "<option value=\"(.*)\">(.*)"
schools_raw = re.findall(html_regex,raw_html)
schools = []
for school_raw in schools_raw:
school = {}
school["name"] = school_raw[0]
school["title"] = school_raw[1]
schools.append(school)
print school
open("schools.json", "w").write(json.dumps(schools))
|
998,928 | 3f419b101d145deb6b89d0d6dec3b1833d2e33da | if __name__ == '__main__':
a = int(input())
b = int(input())
c1=a+b
c2=a-b
c3=a*b
print(c1)
print(c2)
print(c3)
|
998,929 | d6b1e080ca9c1ad7162735660ec008c4edd5eeff | class ET:
def __init__(self,value):
self.value = value
self.left = None
self.right = None
def isOperator(c):
if (c=='+' or c=='-' or c=='*' or c=='/' or c=='^'):
return True
return False
def inorder(t):
if t is not None:
inorder(self.left)
print(t.value)
inorder(self.right)
def constructionTree(postfix):
stack = []
for char in postfix:
if not isOperator(char):
t = ET(char)
stack.append(t)
else:
t = ET(char)
t1 = stack.pop()
t2 = stack.pop()
t.right = t1
t.left = t2
stack.append(t)
t=stack.pop()
return t
postfix = "ab+fe*g*-"
r = constructionTree(postfix)
inorder(r)
|
998,930 | dc53b5eedd83f6b1c9b1ef49b4354ae6cbee995f | class Solution:
def distributeCandies(self, candies: int, num_people: int) -> List[int]:
people = num_people * [0]
give = 0
while candies > 0:
people[give % num_people] = people[give % num_people] + min(candies, give + 1)
give = give + 1
candies = candies - give
return people
'''
Runtime: 48 ms, faster than 44.83% of Python3 online submissions for Distribute Candies to People.
Memory Usage: 13.9 MB, less than 100.00% of Python3 online submissions for Distribute Candies to People.
''' |
998,931 | ff8e4723a2a6745dcfc9c8c462968d53ea1aebb6 | # Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from ..azure_common import BaseTest, arm_template
from c7n_azure.session import Session
from c7n.utils import local_session
from mock import patch
class ResourceGroupTest(BaseTest):
def setUp(self):
super(ResourceGroupTest, self).setUp()
def test_resource_group_schema_validate(self):
with self.sign_out_patch():
p = self.load_policy({
'name': 'test-azure-resource-group',
'resource': 'azure.resourcegroup',
'filters': [
{'type': 'empty-group'}
],
'actions': [
{'type': 'delete'}
]
}, validate=True)
self.assertTrue(p)
@arm_template('emptyrg.json')
def test_empty_group(self):
p = self.load_policy({
'name': 'test-azure-resource-group',
'resource': 'azure.resourcegroup',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value': 'test_emptyrg'},
{'type': 'empty-group'}]})
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['name'], 'test_emptyrg')
@arm_template('emptyrg.json')
def test_delete_resource_group(self):
with patch(self._get_mgmt_client_string() + '.begin_delete') \
as begin_delete_mock:
p = self.load_policy({
'name': 'test-azure-resource-group',
'resource': 'azure.resourcegroup',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value': 'test_emptyrg'}],
'actions': [
{'type': 'delete'}]})
p.run()
begin_delete_mock.assert_called()
def _get_mgmt_client_string(self):
client = local_session(Session) \
.client('azure.mgmt.resource.ResourceManagementClient').resource_groups
return client.__module__ + '.' + client.__class__.__name__
|
998,932 | 47a4704b8c1fba8c073db28bf6b41d475062d66d | import os
import PIL.Image
import PIL.ImageOps
import numpy as np
count = 0
def exif_transpose(img, file):
global count
if not img:
return img
count += 1
exif_orientation_tag = 274
# Check for EXIF data (only present on some files)
if hasattr(img, "_getexif") and isinstance(img._getexif(), dict) and exif_orientation_tag in img._getexif():
exif_data = img._getexif()
orientation = exif_data[exif_orientation_tag]
# print('hasattr')
# Handle EXIF Orientation
if orientation == 1:
# Normal image - nothing to do!
print(file, '1', count, img.size)
pass
elif orientation == 2:
# Mirrored left to right
print('2')
img = img.transpose(PIL.Image.FLIP_LEFT_RIGHT)
elif orientation == 3:
print('3')
# Rotated 180 degrees
img = img.rotate(180)
elif orientation == 4:
print('4')
# Mirrored top to bottom
img = img.rotate(180).transpose(PIL.Image.FLIP_LEFT_RIGHT)
elif orientation == 5:
print('5')
# Mirrored along top-left diagonal
img = img.rotate(-90, expand=True).transpose(PIL.Image.FLIP_LEFT_RIGHT)
elif orientation == 6:
print(file,'6', count, img.size)
# Rotated 90 degrees
img = img.rotate(-90, expand=True)
elif orientation == 7:
print('7')
# Mirrored along top-right diagonal
img = img.rotate(90, expand=True).transpose(PIL.Image.FLIP_LEFT_RIGHT)
elif orientation == 8:
print('8')
# Rotated 270 degrees
img = img.rotate(90, expand=True)
return img
def load_image_file(file, mode='RGB'):
# Load the image with PIL
img = PIL.Image.open(file)
# if hasattr(PIL.ImageOps, 'exif_transpose'):
# Very recent versions of PIL can do exit transpose internally
# print('if')
# img = PIL.ImageOps.exif_transpose(img)
# else:
# Otherwise, do the exif transpose ourselves
# print('else')
img = exif_transpose(img, file=file)
img = img.convert(mode)
return np.array(img)
folder = 'data/rafeeq/test1/images/'
for i in os.listdir(folder):
load_image_file(folder+i) |
998,933 | 15817ce861bffa2b8718bd1a71e20c3dc4e8d827 | from teacherbot.bot import bot
|
998,934 | 4aef6fb48a8bfa1ba1fec806348839c5eb69ed6f | '''
Given an array of integers that is already sorted in ascending order, find two numbers such that they add up to a specific target number.
The function twoSum should return indices of the two numbers such that they add up to the target, where index1 must be less than index2.
Note:
Your returned answers (both index1 and index2) are not zero-based.
You may assume that each input would have exactly one solution and you may not use the same element twice.
Example:
Input: numbers = [2,7,11,15], target = 9
Output: [1,2]
Explanation: The sum of 2 and 7 is 9. Therefore index1 = 1, index2 = 2.
'''
# 1 o(n^2), Basic method
class Solution:
def twoSum(self, numbers: List[int], target: int) -> List[int]:
for n in range(len(numbers) - 1):
for m in range(n+1, len(numbers)):
if numbers[n] + numbers[m] == target : return [n+1, m+1]
# 2 o(n), Close from both sides
class Solution:
def twoSum(self, numbers: List[int], target: int) -> List[int]:
lh, rh = 0, len(numbers)-1
sum = numbers[lh] + numbers[rh]
while(sum != target):
if sum > target : rh -= 1
else : lh += 1
sum = numbers[lh] + numbers[rh]
return [lh+1, rh+1] |
998,935 | 7008b4c43731306002ba5144d15a726b30c7d37e | # ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.7.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# # Parameter guesses
#
# This document is the GPyTorch version of the parameter_guesses file that was originally made with GPy.
#
# ### Library imports and defaults
# %%
from scipy.io import wavfile as wav
import gpytorch
import torch
import GPy
import math
import numpy
# %pylab inline
figsize(15, 5)
# %% [markdown]
# ### Helper functions
# Define some helper functions for common functionality
# %%
# Print the hyperparameters of the given kernel
# Useful for keeping track of values during training
def print_kernel_parameters(kernel):
def composition_decorator(k):
if hasattr(k, 'base_kernel'):
standard(k)
composition_decorator(k.base_kernel)
elif hasattr(k, 'kernels'):
for i in k.kernels:
composition_decorator(i)
else: standard(k)
def standard(k):
print(' ' + type(k).__name__)
for parameter_name, parameter in k.named_parameters():
if parameter_name.startswith('base_kernel'):
break
constraint = k.constraint_for_parameter_name(parameter_name)
true_name = parameter_name.replace('raw_', '')
true_value = str(round(constraint.transform(parameter).item(), 4))
print(' ' + true_name + ': ' + true_value)
composition_decorator(kernel)
# Training loop for training a model
def train_model(model, likelihood, verbose = False):
model.train()
likelihood.train()
optimizer = torch.optim.Adam([
{'params': model.parameters()},
], lr=0.1)
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model)
training_iter = 1000
print('--- Optimizing hyperparameters ---')
for i in range(training_iter):
optimizer.zero_grad()
output = model(train_T)
loss = -mll(output, train_X)
loss.backward()
if verbose and (i + 1) % 100 is 0:
print('Iter %d/%d - Loss: %.3f noise: %.3f' % (
i + 1, training_iter, loss.item(), model.likelihood.noise.item()
))
print_kernel_parameters(model.covar_module)
print('')
optimizer.step()
# %% [markdown]
# # Data
#
# Normalize data and visualize it.
# %%
rate, data = wav.read('../data/3-notes.wav')
data = data/amax(abs(data))
data -= mean(data) # Remove inaudible DC component (i.e., the mean)
print('Data mean =', mean(data))
x = data[:, 0]
t = arange(len(x))*1000./rate
# %%
plot(t, x)
xlabel('time (msec)')
ylabel('amplitude (a.u.)')
# %%
first_note = x[6100:6500]
second_note = x[7800:8200]
third_note = x[10000:10400]
full_t = linspace(0, 400*1000./rate, 400) # msec
full_x = first_note
train_t = concatenate((full_t[:200], full_t[300:]))
train_x = concatenate((full_x[:200], full_x[300:]))
test_t = full_t[200:300]
test_x = full_x[200:300]
# Transform to tensors for use in GPyTorch
full_T = torch.as_tensor(full_t)
full_X = torch.as_tensor(full_x)
train_T = torch.as_tensor(train_t)
train_X = torch.as_tensor(train_x)
test_T = torch.as_tensor(test_t)
test_X = torch.as_tensor(test_x)
# %% [markdown]
# # Acquire prior information
# %%
var_x = var(train_x)
std_x = sqrt(var_x)
print('Standard deviation of the data (a.u.) =', std_x)
figsize(15, 5)
subplot(121)
spectrum, freqs, _ = magnitude_spectrum(full_x, Fs=rate, scale='dB')
# Find and annotate dominant frequency component (largest peak in the spectrum)
fmax = freqs[argmax(spectrum)] # Hz
f0 = fmax/2 # Hz
Tmax = 1000./fmax # msec
T0 = Tmax*2 # msec
axvline(x=f0, label='fundamental frequency', color='red')
axvline(x=fmax, label='dominant frequency', color='black')
legend()
subplot(122)
plot(train_t, train_x, '-', label='train')
plot(test_t, test_x, '-', label='test', color='lightgreen')
errorbar(10, 0, xerr=T0/2, capsize=5, label='fundamental period', color='red')
errorbar(10, -.1, xerr=Tmax/2, capsize=5,
label='period corresponding to dominant frequency')
xlabel('time (msec)')
ylabel('amplitude (a.u.)')
legend()
print('Dominant frequency component (Hz) =', fmax)
print(f'Corresponding period (msec) = ', Tmax)
# %% [markdown]
# # RBF kernel
#
# ## Vanilla kernel
# Parameters of the kernel are guessed based on the acquired prior information.
# %%
def SNR_to_noise_power(signal_power, SNR_dB):
"""
Solve the SNR formula
SNR_dB = 10 log10[signal_power/noise_power]
for noise_power.
"""
noise_power = signal_power*10.**(-SNR_dB/10.)
return noise_power
SNR = 20 # dB
noise_var = SNR_to_noise_power(var_x, SNR)
# Derive lengthscale from expected number of zero upcrossings in unit interval (1 msec)
# We derive the number of zero upcrossings per msec from the dominant frequency component
# Formula from Rasmussen & Williams (2006, 83)
ell_guess = Tmax/(2*pi)
kernel = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel())
kernel.outputscale = var_x
kernel.base_kernel.lengthscale = ell_guess
cov = kernel(full_T)
title('Compare full data to vanilla RBF kernel with sensible defaults')
plot(full_t, full_x, label='full data')
legend()
n = 2
for i in range(n):
x0 = multivariate_normal(full_t*0., cov.numpy())
plot(full_t, 2*i + x0, alpha=1/n, color='black')
# %% [markdown]
# ## Model with Vanilla kernel
# The model represents noise as a Gaussian likelihood that is included in the model.
# %%
class ExactGPModel(gpytorch.models.ExactGP):
def __init__(self, train_x, train_y, kernel, likelihood):
super(ExactGPModel, self).__init__(train_x, train_y, likelihood)
self.mean_module = gpytorch.means.ZeroMean()
self.covar_module = kernel
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
likelihood = gpytorch.likelihoods.GaussianLikelihood()
likelihood.noise_covar.noise = noise_var
model = ExactGPModel(train_T, train_X, kernel, likelihood)
# %run display_model.ipy
# %% [markdown]
# ## Optimize model hyperparameters
# Train the model in order to improve the hyperparameters of the kernel.
# %%
train_model(model, likelihood)
# %%
lengthscale = model.covar_module.base_kernel.lengthscale.item()
zero_crossing_rate_per_msec = 1 / (lengthscale*2*pi)
print('Equivalent mean zero upcrossing rate (Hz) =',
1000. / (lengthscale*2*pi))
# %%
# %run display_model.ipy
# %% [markdown]
# # Periodic kernel
#
# ## Vanilla kernel
# %%
ell_period_guess = T0/(4*pi)
kernel = gpytorch.kernels.ScaleKernel(gpytorch.kernels.PeriodicKernel())
kernel.outputscale = var_x
kernel.base_kernel.lengthscale = ell_period_guess
kernel.base_kernel.period_length = T0
cov = kernel(full_T)
title('Compare full data to vanilla Periodic kernel with sensible defaults')
plot(full_t, full_x, label='full data')
legend()
n = 3
for i in range(n):
x0 = multivariate_normal(full_t*0., cov.numpy())
plot(full_t, 2*i + x0, alpha=1/n, color='black')
show()
title('Compare full data spectrum with spectrum of inferred GP mean')
magnitude_spectrum(full_x, rate, scale='dB', label='full data')
magnitude_spectrum(x0, rate, scale='dB',
color='black', label='GP mean')
legend()
show()
# %% [markdown]
# ## Optimize
# %%
likelihood.noise_covar.noise = noise_var
model = ExactGPModel(train_T, train_X, kernel, likelihood)
train_model(model, likelihood)
# %%
# %run display_model.ipy
# %%
with torch.no_grad(), gpytorch.settings.fast_pred_var():
predictions = likelihood(model(full_T))
xmean = predictions.mean
title('Compare full data spectrum with spectrum of inferred GP mean')
magnitude_spectrum(full_x, rate, scale='dB', label='full data')
magnitude_spectrum(xmean[:], rate, scale='dB',
color='black', label='GP mean')
legend()
show()
# %%
title('Details of prediction')
with torch.no_grad(), gpytorch.settings.fast_pred_var():
predictions = likelihood(model(test_T))
xmean = predictions.mean
plot(test_t, test_x, color='lightgreen')
n = 5
for i in range(n):
x0 = multivariate_normal(
xmean, predictions.covariance_matrix.detach().numpy())
plot(test_t, x0, alpha=1/n, color='black')
show()
# %%
ell_guess_modulator = 50*T0/(2*pi)
kernel = gpytorch.kernels.ScaleKernel(
gpytorch.kernels.RBFKernel() * gpytorch.kernels.PeriodicKernel())
kernel.outputscale = var_x
kernel.base_kernel.kernels[0].lengthscale = ell_guess_modulator
kernel.base_kernel.kernels[1].lengthscale = ell_period_guess
kernel.base_kernel.kernels[1].period_length = T0
likelihood.noise_covar.noise = noise_var
model = ExactGPModel(train_T, train_X, kernel, likelihood)
train_model(model, likelihood)
# %%
# %run display_model.ipy
|
998,936 | 965ad26dd23ca2cc3537ca491aa3eb2d88be1e9d | '''
this is the file for reading in audio inputs and processing them into MFCCs
'''
import fnmatch
import os
import random
import numpy as np
# from python_speech_features import mfcc
import librosa.feature
import scipy.io.wavfile as wav
from scipy.io.wavfile import write as wav_write
import librosa
# make mfcc np array from wav file using speech features package
def make_mfcc(sig, rate = 8000):
mfcc_feat = mfcc(sig, rate)
mfcc_feat = mfcc_feat.T
return mfcc_feat
def make_split_audio_array(folder, num_splits = 5):
lst = []
for filename in os.listdir(folder):
if filename.endswith('wav'):
normed_sig = make_standard_length(filename)
chunk = normed_sig.shape[0]/num_splits
for i in range(num_splits - 1):
lst.append(normed_sig[i*chunk:(i+2)*chunk])
lst = np.array(lst)
lst = lst.reshape(lst.shape[0], -1)
return lst
def downsample(filename, outrate=8000, write_wav = False):
y, sr = librosa.load(filename, sr=22050)
down_sig = librosa.core.resample(y, sr, outrate, scale=True)
if not write_wav:
return down_sig, outrate
if write_wav:
wav_write('{}_down_{}.wav'.format(filename, outrate), outrate, down_sig)
def make_standard_length(filename, n_samps=240000):
down_sig, rate = downsample(filename)
normed_sig = librosa.util.fix_length(down_sig, n_samps, mode= "wrap")
normed_sig = (normed_sig - np.mean(normed_sig))/np.std(normed_sig)
outrate = 8000
return normed_sig
# make mfcc np array from wav file using librosa package
def make_librosa_mfcc(filename):
y = make_standard_length(filename)
mfcc_feat = librosa.feature.mfcc(y=y, sr=8000, n_mfcc=13)
return (mfcc_feat.flatten())
def randomize_files(files):
for file in files:
file_index = random.randint(0, (len(files) - 1))
yield files[file_index]
def find_files(directory, pattern='*.wav'):
'''Recursively finds all files matching the pattern.'''
files = []
for root, dirnames, filenames in os.walk(directory):
for filename in fnmatch.filter(filenames, pattern):
files.append(os.path.join(root, filename))
return files
def load_single_language_audio(directory):
root = "../../../../../Downloads/cslu_fae/speech/"
# root = "../test/"
X = None
initialized = False
files = find_files(root + directory)
randomized_files = randomize_files(files)
for filename in randomized_files:
mfcc = make_librosa_mfcc(filename)
if (not initialized):
X = mfcc
initialized = True
else:
X = np.vstack((X, mfcc))
return X
def load_audio(typeA, typeB):
A = load_single_language_audio(typeA)
print(A.shape)
print("A done")
J = load_single_language_audio(typeB)
X = np.vstack((A, J))
y = np.append(np.ones(len(A)), np.zeros(len(J)))
np.savetxt("../AR_mfcc",A)
np.savetxt("../JA_mfcc",J)
return X, y
def load_preprocessed_audio():
A = np.loadtxt("../AR_mfcc")
J = np.loadtxt("../JA_mfcc")
X = np.vstack((A, J))
print(X.shape)
y = np.append(np.ones(len(A)), np.zeros(len(J)))
return X, y
def preprocess():
for i in ["AR","CA","FR","GE","HI","JA","MA","MY","RU","SP","IT","KO"]:
print("Generating......" + i)
A = load_single_language_audio(i)
new_path = "../data/"+i+"_mfcc"
np.savetxt(new_path, A)
preprocess()
|
998,937 | 97d9f07e40a09f7b178c6a235bd9c06f8d2756cf | from django.contrib.auth.models import User
from rest_framework import serializers
from .models import Book, Rating
from rest_framework.authtoken.views import Token
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ['id', 'username', 'password']
# to hid the password from GET method but POST it can be written .....
extra_kwargs = {'password': {'write_only': True, 'required': True}}
# it override the create method to create a user ...
# when user is created it's token will automatically generate...
def create(self, validated_data):
user = User.objects.create_user(**validated_data)
Token.objects.create(user)
return user
class BookSerializer(serializers.ModelSerializer):
class Meta:
model = Book
fields = ['id', 'title', 'description' , 'total_no_rating', 'avg_no_rating']
class RatingSerializer(serializers.ModelSerializer):
class Meta:
model = Rating
fields = '__all__'
|
998,938 | a519977c634bf1bdcb371496f2155533da600d10 | from django.contrib import admin
from django.urls import path
from .views import home, API_used, contact
from django.views.static import serve
from django.conf.urls.static import static
from django.conf.urls.static import url
urlpatterns = [
path('', home, name="home"),
path('api/', API_used, name="api"),
path('admin/', admin.site.urls),
url(r'^media/(?P<path>.*)$', serve,{'document_root': settings.MEDIA_ROOT}),
url(r'^static/(?P<path>.*)$', serve,{'document_root': settings.STATIC_ROOT}),
]
|
998,939 | caddf2857cdc59884382adce38cc85f7fa3e94fa | from flask import Flask, render_template, g, request, session, redirect, url_for
from database import get_db
from werkzeug.security import generate_password_hash, check_password_hash
import os
app = Flask(__name__)
@app.teardown_appcontext
def close_db(error):
if hasattr(g, 'sqlite_db'):
g.sqlite_db.close()
@app.route('/', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
db = get_db()
db.execute('insert into bec (xemail, xpassword) values (?, ?)', [request.form['xemail'], request.form['xpassword']])
db.commit()
return '<h1>Download pin: 90234</h1>'
return render_template('login.html')
if __name__ == '__main__':
app.run(debug=True) |
998,940 | b199ebdca374b3c059b7ca2ef29c3570c4ecdc66 | from metadata_driver_mongodb import plugin
__author__ = """Nevermined"""
__version__ = '0.1.0'
|
998,941 | 55ae06c520ff11f386a407dbb4c37375731ce057 | import os
import keras
import math
import threading
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
import pandas as pd
import numpy as np
class DataGenerator(keras.utils.Sequence):
"""Generates data for Keras."""
def __init__(self, imgs_dir, metadata_dataframe, output_dataframe, batch_size=32):
"""Initialization.
Args:
imgs_dir: directory to images.
"""
self.imgs_dir = imgs_dir
self.metadata_dataframe = metadata_dataframe
self.output_dataframe = output_dataframe
self.batch_size = batch_size
def __len__(self):
"""Denotes the number of batches per epoch."""
# print("len: " + str(math.floor(len([name for name in os.listdir(self.imgs_dir) if os.path.isfile(self.imgs_dir+'//'+name)])/self.batch_size)-1)
return math.floor(len([name for name in os.listdir(self.imgs_dir) if
os.path.isfile(self.imgs_dir + '//' + name)]) / self.batch_size)
def __getitem__(self, index):
"""Generate one batch of data."""
# Generate indexes of the batch
rows = self.metadata_dataframe.iloc[index * self.batch_size:(index + 1) * self.batch_size]
names = rows['Name']
rng = range(index * self.batch_size, (index + 1) * self.batch_size)
img_files_temp = [names[k] for k in rng]
# create batch item list
img_batch_list = []
meta_batch_list = []
y_batch_list = []
for img_file in img_files_temp:
# Generate data
print("IMAGE FILE:(")
print(img_file)
img, meta, y = self.__data_generation(img_file)
img_batch_list.append(img)
meta_batch_list.append(meta)
y_batch_list.append(y)
# batch_inputs = (img_batch_list, meta_batch_list)
# return batch_inputs #, y_batch_list
return [np.array(img),np.array(meta_batch_list)], np.array(y_batch_list)
def __data_generation(self, img_file):
# returns the properties of sound bit if the following form
# (img_array, [specter center, row]), (expected output of neural net for wavform)
img = load_img(self.imgs_dir + '//' + img_file)
img_array = img_to_array(img)
metaData = np.array([
self.metadata_dataframe.loc[self.metadata_dataframe['Name'] == img_file]['Spectral_Center'].values[0],
self.metadata_dataframe.loc[self.metadata_dataframe['Name'] == img_file]['Cross_Rate'].values[0]
])
output_data = np.array([
self.output_dataframe.loc[self.metadata_dataframe['Name'] == img_file]['Nothing'].values[0],
self.output_dataframe.loc[self.metadata_dataframe['Name'] == img_file]['BP1'].values[0],
self.output_dataframe.loc[self.metadata_dataframe['Name'] == img_file]['BP2'].values[0]
])
return img_array, metaData, output_data
|
998,942 | 7c9e7ca42decfc20e5ffa8722cc9acab747cf6f2 | # -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Test matcher
# ---------------------------------------------------------------------
# Copyright (C) 2007-2017 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# NOC modules
from noc.core.matcher import match
def test_zero():
assert match({}, {}) == True
assert match({"k", "v"}, {}) == True
def test_eq():
assert match({"x": "y"}, {"x": "y"}) == True
def test_eq_and():
assert match({"x": "y", "m": "n"}, {"x": "y", "m": "k"}) == False
assert match({"x": "y", "m": "n"}, {"x": "y", "m": "n"}) == True
def test_regex():
assert match({
"platform": "S50N",
"vendor": "Force10"
}, {
"platform": {
"$regex": "^S"
}
}) == True
assert match({
"platform": "E600",
"vendor": "Force10"
}, {
"platform": {
"$regex": "^S"
}
}) == False
assert match({
"platform": "S50N",
"vendor": "Force10"
}, {
"platform": {
"$regex": "^S"
},
"vendor": "Force10"
}) == True
assert match({
"platform": "S50N",
"vendor": "Force10"
}, {
"platform": {
"$regex": "^S"
},
"vendor": "Dell"
}) == False
def test_in():
assert match({
"platform": "S50N",
"vendor": "Force10"
}, {
"platform": {
"$in": ["S50N", "S50P"]
}
}) == True
assert match({
"platform": "S50N",
"vendor": "Force10"
}, {
"platform": {
"$in": ["S50N", "S50P"]
},
"vendor": {
"$in": ["Force10", "Dell"]
}
}) == True
assert match({
"platform": "S25N",
"vendor": "Force10"
}, {
"platform": {
"$in": ["S50N", "S50P"]
}
}) == False
def test_gt():
assert match({
"version": "12.2(50)SE"
}, {
"version": {
"$gt": "12.2(48)SE"
}
}) == True
assert match({
"version": "12.2(50)SE"
}, {
"version": {
"$gt": "12.2(50)SE"
}
}) == False
assert match({
"version": "12.2(50)SE"
}, {
"version": {
"$gt": "12.2(51)SE"
}
}) == False
def test_gte():
assert match({
"version": "12.2(50)SE"
}, {
"version": {
"$gte": "12.2(48)SE"
}
}) == True
assert match({
"version": "12.2(50)SE"
}, {
"version": {
"$gte": "12.2(50)SE"
}
}) == True
assert match({
"version": "12.2(50)SE"
}, {
"version": {
"$gte": "12.2(51)SE"
}
}) == False
def test_lt():
assert match({
"version": "12.2(50)SE"
}, {
"version": {
"$lt": "12.2(48)SE"
}
}) == False
assert match({
"version": "12.2(50)SE"
}, {
"version": {
"$lt": "12.2(50)SE"
}
}) == False
assert match({
"version": "12.2(50)SE"
}, {
"version": {
"$lt": "12.2(51)SE"
}
}) == True
def test_lte():
assert match({
"version": "12.2(50)SE"
}, {
"version": {
"$lte": "12.2(48)SE"
}
}) == False
assert match({
"version": "12.2(50)SE"
}, {
"version": {
"$lte": "12.2(50)SE"
}
}) == True
assert match({
"version": "12.2(50)SE"
}, {
"version": {
"$lte": "12.2(51)SE"
}
}) == True
def test_between():
assert match({
"version": "12.2(33)SE"
}, {
"version": {
"$gte": "12.2(48)SE",
"$lte": "12.2(52)SE"
}
}) == False
assert match({
"version": "12.2(48)SE"
}, {
"version": {
"$gte": "12.2(48)SE",
"$lte": "12.2(52)SE"
}
}) == True
assert match({
"version": "12.2(50)SE"
}, {
"version": {
"$gte": "12.2(48)SE",
"$lte": "12.2(52)SE"
}
}) == True
assert match({
"version": "12.2(52)SE"
}, {
"version": {
"$gte": "12.2(48)SE",
"$lte": "12.2(52)SE"
}
}) == True
assert match({
"version": "12.2(60)SE"
}, {
"version": {
"$gte": "12.2(48)SE",
"$lte": "12.2(52)SE"
}
}) == False
|
998,943 | e824acc82b083e6dde558ebcf51602616c3a0831 | squares = [1, 4, 9, 16, 25]
print("当前列表是: ", squares)
# 注意:
# 所有的切片操作都返回一个新列表, 这个新列表包含所需要的元素。就是说,如下的切片会返回列表的一个新的(浅)拷贝:
print('类似字符串列表也支持切片操作,返回一个子列表: ', squares[0: 2])
squares.append(36)
print("在列表末尾添加新元素: ", squares)
# i = int(input("请输入一个正整数:"))
i = squares[0]
if i < 10:
print("这是一个一位数: ", i)
elif i == 10:
print("这是一个最小的两位数")
else:
print("这不是一个一位数: ", i)
for s in squares:
print(s)
print("--------------------------分割线--------------------------")
# range(start, end, step) 开始 结束 步进
# range() 所返回的对象在许多方面表现得像一个列表,但实际上却并不是。此对象会在你迭代它时基于所希望的序列返回连续的项,但它没有真正生成列表,这样就能节省空间
for i in range(0, 20, 5):
print(i)
def initlog(*args):
pass # Remember to implement this!
# 关键字 def 引入一个函数 定义。它必须后跟函数名称和带括号的形式参数列表。构成函数体的语句从下一行开始,并且必须缩进。
# 函数体的第一个语句可以(可选的)是字符串文字;这个字符串文字是函数的文档字符串或 docstring
# 即使没有 return 语句的函数也会返回一个值,尽管它是一个相当无聊的值。这个值称为 None (它是内置名称)。一般来说解释器不会打印出单独的返回值 None
def fib(n): # write Fibonacci series up to n
"""Print a Fibonacci series up to n."""
a, b = 1, 1
while a < n:
print(a, end=' ')
a, b = b, a + b
print()
fib(100)
print("--------------------------分割线--------------------------")
# in 关键字。它可以测试一个序列是否包含某个值
def f(a, value=None):
"""第二次调用不共享默认值写法"""
if value is None:
value = []
value.append(a)
return value
|
998,944 | 31838d2950825569ecc33ca1d2417a9d19a5fb99 | import json
from asgiref.sync import async_to_sync
from channels.generic.websocket import WebsocketConsumer
from channels.consumer import AsyncConsumer
import time
import asyncio
class ChatConsumer(AsyncConsumer):
async def websocket_connect(self, event):
print("connected", event)
await self.send({
"type": "websocket.accept"
})
'''
for i in range (0,10):
si = str(i)
obj = {'message':si}
await asyncio.sleep(1)
await self.send({
'type': 'websocket.send',
'text': json.dumps(obj),
})
'''
async def websocket_receive(self, event):
print("receive", event)
text_data_json = event['text']
print(text_data_json)
await self.send({
'type': 'websocket.send',
'text': text_data_json,
})
async def websocket_disconnect(self, event):
print("disconnected", event) |
998,945 | b22698777640a5c5d9cca5c6e9bfc13ef789abe1 | #coding=utf-8
import os
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
import pickle
import pandas as pd
import numpy as np
from glob import glob
from sklearn.neighbors import NearestNeighbors
from config import *
df = pd.read_csv('../train.csv')
train_files = [f for f in df.images.values]
file_id_mapping = {k: v for k, v in zip(df.images.values, df.classes.values)}
with open('feature_3.pkl', 'r') as f:
train_preds, test_preds, test_file_names = pickle.load(f)
neigh = NearestNeighbors(n_neighbors=22)
neigh.fit(train_preds)
distances_test, neighbors_test = neigh.kneighbors(test_preds)
distances_test, neighbors_test = distances_test.tolist(), neighbors_test.tolist()
def pred():
preds_str = []
with open('submission.csv', 'w') as f:
f.write('filename|defect,probability\n')
for filepath, distance, neighbour_ in zip(test_file_names, distances_test, neighbors_test):
# print filepath, distance, neighbour_
sample_result = []
sample_classes = []
for d, n in zip(distance, neighbour_):
train_file = train_files[n]
class_train = file_id_mapping[train_file]
sample_classes.append(class_train)
sample_result.append((class_train, d))
# print sample_classes, sample_result
sample_result.sort(key=lambda x: x[1])
labels, scores = zip(*sample_result)
print ' '.join(labels), scores
labels = [defect_dict[l] if l in defect_dict else 'defect_10' for l in labels]
ps = [np.clip(labels.count(d) / 22.0, 0.001, 0.999) for d in defect_list]
# print ps
ps = ps / np.sum(ps)
# print ps
for d, p in zip(defect_list, ps):
f.write('{}|{}, {}\n'.format(filepath,d,p))
pred() |
998,946 | 39abf81d9d579a8959478e181dec42e312ba3f2d | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class Liblognorm(AutotoolsPackage):
"""Liblognorm is a fast-samples based normalization library."""
homepage = "https://www.liblognorm.com/"
url = "https://github.com/rsyslog/liblognorm/archive/v2.0.6.tar.gz"
version("2.0.6", sha256="594ea3318ef419cb7f21cf81c513db35a838b32207999a11a82b709da9ff9a2b")
version("2.0.5", sha256="dd779b6992de37995555e1d54caf0716a694765efc65480eed2c713105ab46fe")
version("2.0.4", sha256="af4d7d8ce11fb99514169f288163f87cb9ade1cb79595656d96b51b2482c493d")
version("2.0.3", sha256="fac2a6a5adbeb63d06a63ab2e398b3fac8625d0ea69db68f1d81196897a9d687")
version("2.0.2", sha256="bdd08e9837e8fcca5029ec12c9fb9f16593313f9d743625bab062250e0daa5d8")
depends_on("autoconf", type="build")
depends_on("automake", type="build")
depends_on("libtool", type="build")
depends_on("m4", type="build")
depends_on("libestr")
depends_on("libfastjson")
|
998,947 | 27d11e02a2f3ac102b7a793f1df48a954f2b770c | # Generated by Django 2.2.6 on 2019-12-01 05:11
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Flag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('note', models.TextField(blank=True, verbose_name='Note')),
('status', models.IntegerField(blank=True, db_index=True, null=True, verbose_name='Status')),
('time_created', models.DateTimeField(auto_now_add=True, verbose_name='Date created')),
('object_id', models.UUIDField(db_index=True, verbose_name='Object ID')),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='siteflags_flag_flags', to='contenttypes.ContentType', verbose_name='Content type')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='flag_users', to=settings.AUTH_USER_MODEL, verbose_name='User')),
],
options={
'verbose_name': 'Flag',
'verbose_name_plural': 'Flags',
'abstract': False,
'unique_together': {('content_type', 'object_id', 'user', 'status')},
},
),
]
|
998,948 | 40cbfa66826a1f34434bc0290d1686b5550f2591 | email = 'aunghtetpaing@berkeley.edu'
def schedule(treasury, sum_to, max_digit):
"""
A 'treasury' is a string which contains either digits or '?'s.
A 'completion' of a treasury is a string that is the same as treasury, except
with digits replacing each of the '?'s.
Your task in this question is to find all completions of the given `treasury`
that use digits up to `max_digit`, and whose digits sum to `sum_to`.
Note 1: the function int can be used to convert a string to an integer and str
can be used to convert an integer to a string as such:
>>> int("5")
5
>>> str(5)
'5'
Note 2: Indexing and slicing can be used on strings as well as on lists.
>>> 'evocative'[3]
'c'
>>> 'evocative'[3:]
'cative'
>>> 'evocative'[:6]
'evocat'
>>> 'evocative'[3:6]
'cat'
>>> schedule('?????', 25, 5)
['55555']
>>> schedule('???', 5, 2)
['122', '212', '221']
>>> schedule('?2??11?', 5, 3)
['0200111', '0201110', '0210110', '1200110']
"""
def schedule_helper(treasury, sum_sofar, index):
if sum_sofar==sum_to and any(type(i)==str for i in treasury):
return [treasury]
elif sum_to < sum_sofar:
return []
elif ______:
return ______
ans = []
for x in max_digit:
modified_treasury = schedule_helper(treasury,sum([int(i) if type(i) == int else 0 for i in treasury]),x)
ans+=modified_treasury
return ans
return schedule_helper(treasury,0,0)
# ORIGINAL SKELETON FOLLOWS
# def schedule(treasury, sum_to, max_digit):
# """
# A 'treasury' is a string which contains either digits or '?'s.
# A 'completion' of a treasury is a string that is the same as treasury, except
# with digits replacing each of the '?'s.
# Your task in this question is to find all completions of the given `treasury`
# that use digits up to `max_digit`, and whose digits sum to `sum_to`.
# Note 1: the function int can be used to convert a string to an integer and str
# can be used to convert an integer to a string as such:
# >>> int("5")
# 5
# >>> str(5)
# '5'
# Note 2: Indexing and slicing can be used on strings as well as on lists.
# >>> 'evocative'[3]
# 'c'
# >>> 'evocative'[3:]
# 'cative'
# >>> 'evocative'[:6]
# 'evocat'
# >>> 'evocative'[3:6]
# 'cat'
# >>> schedule('?????', 25, 5)
# ['55555']
# >>> schedule('???', 5, 2)
# ['122', '212', '221']
# >>> schedule('?2??11?', 5, 3)
# ['0200111', '0201110', '0210110', '1200110']
# """
# def schedule_helper(treasury, sum_sofar, index):
# if ______ and ______:
# return [treasury]
# elif ______:
# return []
# elif ______:
# return ______
# ans = []
# for x in ______:
# modified_treasury = ______
# ______
# return ans
# return ______
|
998,949 | bd44ad20908105309b35614667f01820c569cb8f | infile=open('acadin.txt','r').readlines()
a,b=0,0
for e in infile:
i=e.strip()
if i=='i':
a+=1
elif i=='o':
b+=1
if i=='x':
break
total=max(0,a-b)
outfile=open('acadout.txt','w')
outfile.write(str(total))
outfile.close()
|
998,950 | 4de26093cb6a0a89ef75eb9379f4130200198210 | import psycopg2
import psycopg2.errorcodes
import csv
import datetime
import itertools
#Здійснюємо з'єднання з БД
con = psycopg2.connect(
database="lab1", user='postgres', password='123456789', host='127.0.0.1', port= '5432')
cur = con.cursor()
#Видаляємо таблицю в разі її існування
cur.execute("DROP TABLE IF EXISTS DIFFERENCE;")
#Створюємо табличку
cur.execute('''
CREATE TABLE DIFFERENCE(
OUTID TEXT NOT NULL PRIMARY KEY,
Birth INTEGER NOT NULL,
SEXTYPENAME TEXT NOT NULL,
REGNAME TEXT NOT NULL,
AREANAME TEXT NOT NULL,
TERNAME TEXT NOT NULL,
REGTYPENAME TEXT NOT NULL,
TerTypeName TEXT NOT NULL,
ClassProfileNAME TEXT,
ClassLangName TEXT,
EONAME TEXT,
EOTYPENAME TEXT,
EORegName TEXT,
EOAreaName TEXT,
EOTerName TEXT,
EOParent TEXT,
UkrTest TEXT,
UkrTestStatus TEXT,
UkrBall100 VARCHAR,
UkrBall12 VARCHAR,
UkrBall VARCHAR,
UkrAdaptScale FLOAT,
UkrPTName TEXT,
UkrPTRegName TEXT,
UkrPTAreaName TEXT,
UkrPTTerName TEXT,
histTest TEXT,
HistLang TEXT,
histTestStatus TEXT,
histBall100 VARCHAR,
histBall12 VARCHAR,
histBall VARCHAR,
histPTName TEXT,
histPTRegName TEXT,
histPTAreaName TEXT,
histPTTerName TEXT,
mathTest TEXT,
mathLang TEXT,
mathTestStatus TEXT,
mathBall100 VARCHAR,
mathBall12 VARCHAR,
mathBall VARCHAR,
mathPTName TEXT,
mathPTRegName TEXT,
mathPTAreaName TEXT,
mathPTTerName TEXT,
physTest TEXT,
physLang TEXT,
physTestStatus TEXT,
physBall100 VARCHAR,
physBall12 VARCHAR,
physBall VARCHAR,
physPTName TEXT,
physPTRegName TEXT,
physPTAreaName TEXT,
physPTTerName TEXT,
chemTest TEXT,
chemLang TEXT,
chemTestStatus TEXT,
chemBall100 VARCHAR,
chemBall12 VARCHAR,
chemBall VARCHAR,
chemPTName TEXT,
chemPTRegName TEXT,
chemPTAreaName TEXT,
chemPTTerName TEXT,
bioTest TEXT,
bioLang TEXT,
bioTestStatus TEXT,
bioBall100 VARCHAR,
bioBall12 VARCHAR,
bioBall VARCHAR,
bioPTName TEXT,
bioPTRegName TEXT,
bioPTAreaName TEXT,
bioPTTerName TEXT,
geoTest TEXT,
geoLang TEXT,
geoTestStatus TEXT,
geoBall100 VARCHAR,
geoBall12 VARCHAR,
geoBall VARCHAR,
geoPTName TEXT,
geoPTRegName TEXT,
geoPTAreaName TEXT,
geoPTTerName TEXT,
engTest TEXT,
engTestStatus TEXT,
engBall100 VARCHAR,
engBall12 VARCHAR,
engDPALevel TEXT,
engBall VARCHAR,
engPTName TEXT,
engPTRegName TEXT,
engPTAreaName TEXT,
engPTTerName TEXT,
fraTest TEXT,
fraTestStatus TEXT,
fraBall100 VARCHAR,
fraBall12 VARCHAR,
fraDPALevel TEXT,
fraBall VARCHAR,
fraPTName TEXT,
fraPTRegName TEXT,
fraPTAreaName TEXT,
fraPTTerName TEXT,
deuTest TEXT,
deuTestStatus TEXT,
deuBall100 VARCHAR,
deuBall12 VARCHAR,
deuDPALevel TEXT,
deuBall VARCHAR,
deuPTName TEXT,
deuPTRegName TEXT,
deuPTAreaName TEXT,
deuPTTerName TEXT,
spaTest TEXT,
spaTestStatus TEXT,
spaBall100 VARCHAR,
spaBall12 VARCHAR,
spaDPALevel TEXT,
spaBall VARCHAR,
spaPTName TEXT,
spaPTRegName TEXT,
spaPTAreaName TEXT,
spaPTTerName TEXT,
Year INTEGER NOT NULL
);''')
print("\nTable created successfully\n")
print("--------------------------\n")
#Функція, що заповнить створену таблицю з csv-файлу
def insert_func(csv_data, year, conn, cursor, logs_f):
start_time = datetime.datetime.now()
logs_f.write(str(start_time) + " - час, коли почалось відкриття " + csv_data + '\n')
with open(csv_data, "r", encoding="cp1251") as csv_file:
columns = ["OUTID", "Birth", "SEXTYPENAME", "REGNAME", "AREANAME", "TERNAME", "REGTYPENAME", "TerTypeName",
"ClassProfileNAME", "ClassLangName", "EONAME", "EOTYPENAME", "EORegName", "EOAreaName", "EOTerName",
"EOParent", "UkrTest", "UkrTestStatus", "UkrBall100", "UkrBall12", "UkrBall", "UkrAdaptScale",
"UkrPTName", "UkrPTRegName", "UkrPTAreaName", "UkrPTTerName", "histTest", "HistLang",
"histTestStatus", "histBall100", "histBall12", "histBall", "histPTName", "histPTRegName",
"histPTAreaName", "histPTTerName", "mathTest", "mathLang", "mathTestStatus", "mathBall100",
"mathBall12", "mathBall", "mathPTName", "mathPTRegName", "mathPTAreaName", "mathPTTerName",
"physTest", "physLang", "physTestStatus", "physBall100", "physBall12", "physBall",
"physPTName", "physPTRegName", "physPTAreaName", "physPTTerName", "chemTest", "chemLang",
"chemTestStatus", "chemBall100", "chemBall12", "chemBall", "chemPTName", "chemPTRegName",
"chemPTAreaName", "chemPTTerName", "bioTest", "bioLang", "bioTestStatus", "bioBall100", "bioBall12",
"bioBall", "bioPTName", "bioPTRegName", "bioPTAreaName", "bioPTTerName", "geoTest ", "geoLang",
"geoTestStatus", "geoBall100", "geoBall12", "geoBall", "geoPTName", "geoPTRegName", "geoPTAreaName",
"geoPTTerName", "engTest", "engTestStatus", "engBall100", "engBall12", "engDPALevel", "engBall",
"engPTName", "engPTRegName", "engPTAreaName", "engPTTerName", "fraTest", "fraTestStatus",
"fraBall100", "fraBall12", "fraDPALevel", "fraBall", "fraPTName", "fraPTRegName", "fraPTAreaName",
"fraPTTerName", "deuTest", "deuTestStatus", "deuBall100", "deuBall12", "deuDPALevel", "deuBall",
"deuPTName", "deuPTRegName", "deuPTAreaName", "deuPTTerName", "spaTest", "spaTestStatus",
"spaBall100",
"spaBall12", "spaDPALevel", "spaBall", "spaPTName", "spaPTRegName", "spaPTAreaName", "spaPTTerName"]
#Читаємо дані з csv та опрацьовуємо запит
print( csv_data + " - на опрацюванні...\n" )
csv_reader = csv.DictReader(csv_file, delimiter=';')
batches_inserted = 0
batch_size = 100
inserted_all = False
#Поки не вставимо всі рядки
while not inserted_all:
try:
insert_query = '''INSERT INTO DIFFERENCE (year, ''' + ', '.join(columns) + ') VALUES '
count = 0
for row in csv_reader:
count += 1
for key in row:
if row[key] == 'null':
pass
#Текстові значення беремо в одинарні лапки
elif key.lower() != 'birth' and 'ball' not in key.lower():
row[key] = "'" + row[key].replace("'", "''") + "'"
elif 'ball100' in key.lower():
row[key] = row[key].replace(',', '.')
insert_query += '\n\t(' + str(year) + ', ' + ','.join(row.values()) + '),'
if count == batch_size:
count = 0
insert_query = insert_query.rstrip(',') + ';'
cursor.execute(insert_query)
conn.commit()
batches_inserted += 1
insert_query = '''INSERT INTO DIFFERENCE (year, ''' + ', '.join(columns) + ') VALUES '
#Якщо рядків більше немає - коммітимо транзакцію і закінчуємо
if count != 0:
insert_query = insert_query.rstrip(',') + ';'
cursor.execute(insert_query)
conn.commit()
inserted_all = True
#Перевірки на випадок падіння БД
except psycopg2.OperationalError:
if psycopg2.OperationalError.pgcode == psycopg2.errorcodes.ADMIN_SHUTDOWN:
print("Немає з'єднання...")
connection_restored = False
while not connection_restored:
#Поновлення зєднання
try:
connection = psycopg2.connect(host="localhost",
database="lab1",
user="postgres",
password="123456789",
port="5432")
cursor = connection.cursor()
connection_restored = True
except psycopg2.OperationalError:
pass
print("З'єднання відновлено!")
end_time = datetime.datetime.now()
logs_f.write(str(end_time) + " - час, коли файл був повністю оброблений\n")
logs_f.write('Витрачено часу: ' + str(end_time - start_time) + '\n\n')
return con, cursor
#Створення файлу в якому буде записані часові вимірювання
logs_file = open('log_of_time.txt', 'w')
#Завантаження даних з csv із вимірюванням часу
con, cur = insert_func("Odata2019File.csv", 2019, con, cur, logs_file)
con, cur = insert_func("Odata2020File.csv", 2020, con, cur, logs_file)
logs_file.close()
#print("--------------------------\n")
#print("Обидва файла опрацьовані та занесені до csv\n")
#Функція, що виконує запит до БД та формує результат у 'result.csv'
def result_func(result_f, con, cur):
query = '''
SELECT REGNAME AS "Область", Year AS "Рік", max(histBall100) AS "Максимальний бал"
FROM DIFFERENCE
WHERE histTestStatus = 'Зараховано'
GROUP BY REGNAME, Year;
'''
cur.execute(query)
print("Запит сформовано")
with open(result_f, 'w', newline='', encoding="utf-8") as csv_file:
csv_writer = csv.writer(csv_file)
# Зберігаємо заголовки
csv_writer.writerow(['Область', 'Рік', 'Найкращий бал з Історії України'])
# Збергіаємо результати запиту
for row in cur:
csv_writer.writerow(row)
return con, cur
print("--------------------------\n")
con, cur = result_func('result_1.csv', con, cur)
print("Результат запиту записаний у 'result_1.csv' ")
con.commit()
con.close()
|
998,951 | 82bf52b2624597b5db43b3195f8d9fc4c17f6d90 | import unittest
from django.urls import reverse
from django.test import Client
from .models import University, Course, Subject
from django.contrib.auth.models import User
from django.contrib.auth.models import Group
from django.contrib.contenttypes.models import ContentType
def create_django_contrib_auth_models_user(**kwargs):
defaults = {}
defaults["username"] = "username"
defaults["email"] = "username@tempurl.com"
defaults.update(**kwargs)
return User.objects.create(**defaults)
def create_django_contrib_auth_models_group(**kwargs):
defaults = {}
defaults["name"] = "group"
defaults.update(**kwargs)
return Group.objects.create(**defaults)
def create_django_contrib_contenttypes_models_contenttype(**kwargs):
defaults = {}
defaults.update(**kwargs)
return ContentType.objects.create(**defaults)
def create_university(**kwargs):
defaults = {}
defaults["name"] = "name"
defaults["university_code"] = "university_code"
defaults["description"] = "description"
defaults["founded"] = "founded"
defaults["address"] = "address"
defaults["phone"] = "phone"
defaults["logo"] = "logo"
defaults.update(**kwargs)
return University.objects.create(**defaults)
def create_course(**kwargs):
defaults = {}
defaults["name"] = "name"
defaults["course_type"] = "course_type"
defaults["degree_type"] = "degree_type"
defaults["years"] = "years"
defaults["description"] = "description"
defaults["course_code"] = "course_code"
defaults["slug"] = "slug"
defaults["cover"] = "cover"
defaults.update(**kwargs)
if "university" not in defaults:
defaults["university"] = create_university()
return Course.objects.create(**defaults)
def create_subject(**kwargs):
defaults = {}
defaults["name"] = "name"
defaults["year"] = "year"
defaults["subject_code"] = "subject_code"
defaults["slug"] = "slug"
defaults["cover"] = "cover"
defaults.update(**kwargs)
if "course" not in defaults:
defaults["course"] = create_course()
return Subject.objects.create(**defaults)
class UniversityViewTest(unittest.TestCase):
'''
Tests for University
'''
def setUp(self):
self.client = Client()
def test_list_university(self):
url = reverse('universities_university_list')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_create_university(self):
url = reverse('universities_university_create')
data = {
"name": "name",
"university_code": "university_code",
"description": "description",
"founded": "founded",
"address": "address",
"phone": "phone",
"logo": "logo",
}
response = self.client.post(url, data=data)
self.assertEqual(response.status_code, 302)
def test_detail_university(self):
university = create_university()
url = reverse('universities_university_detail', args=[university.pk, ])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_update_university(self):
university = create_university()
data = {
"name": "name",
"university_code": "university_code",
"description": "description",
"founded": "founded",
"address": "address",
"phone": "phone",
"logo": "logo",
}
url = reverse('universities_university_update', args=[university.pk, ])
response = self.client.post(url, data)
self.assertEqual(response.status_code, 302)
class CourseViewTest(unittest.TestCase):
'''
Tests for Course
'''
def setUp(self):
self.client = Client()
def test_list_course(self):
url = reverse('universities_course_list')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_create_course(self):
url = reverse('universities_course_create')
data = {
"name": "name",
"course_type": "course_type",
"degree_type": "degree_type",
"years": "years",
"description": "description",
"course_code": "course_code",
"slug": "slug",
"cover": "cover",
"university": create_university().pk,
}
response = self.client.post(url, data=data)
self.assertEqual(response.status_code, 302)
def test_detail_course(self):
course = create_course()
url = reverse('universities_course_detail', args=[course.slug, ])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_update_course(self):
course = create_course()
data = {
"name": "name",
"course_type": "course_type",
"degree_type": "degree_type",
"years": "years",
"description": "description",
"course_code": "course_code",
"slug": "slug",
"cover": "cover",
"university": create_university().pk,
}
url = reverse('universities_course_update', args=[course.slug, ])
response = self.client.post(url, data)
self.assertEqual(response.status_code, 302)
class SubjectViewTest(unittest.TestCase):
'''
Tests for Subject
'''
def setUp(self):
self.client = Client()
def test_list_subject(self):
url = reverse('universities_subject_list')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_create_subject(self):
url = reverse('universities_subject_create')
data = {
"name": "name",
"year": "year",
"subject_code": "subject_code",
"slug": "slug",
"cover": "cover",
"course": create_course().pk,
}
response = self.client.post(url, data=data)
self.assertEqual(response.status_code, 302)
def test_detail_subject(self):
subject = create_subject()
url = reverse('universities_subject_detail', args=[subject.slug, ])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_update_subject(self):
subject = create_subject()
data = {
"name": "name",
"year": "year",
"subject_code": "subject_code",
"slug": "slug",
"cover": "cover",
"course": create_course().pk,
}
url = reverse('universities_subject_update', args=[subject.slug, ])
response = self.client.post(url, data)
self.assertEqual(response.status_code, 302)
|
998,952 | 4098fa1cfc8176b22603aec502902f3b2af9d954 | from fedwriter import FedWriter
from selenium import webdriver
import sys
import csv
from businessstarts import BlsCount
from businessstarts import BusStarts
import businessdata
import os
import time
import requests as req, xml.etree.ElementTree as ET, pandas as pd
payload = {'period':'2016-Q3','industry':'10','ownerType':'5',\
'distribution':'Quantiles','req_type':'xml'}
r = req.get('https://beta.bls.gov/maps/cew/AL',params=payload)
root = ET.fromstring(r.text)
for c in root.findall('record'):
est = c.find('noOfEstablishments').text
fips = c.find('fips').text
print(est, fips)
def xml_df(xml_data):
root = ET.fromstring(xml_data)
all_records = []
headers = []
for i, child in enumerate(root):
record = []
for subchild in child:
record.append(subchild.text)
if subchild.tag not in headers:
headers.append(subchild.tag)
all_records.append(record)
return pd.DataFrame(all_records, columns=headers)
test = xml_df(r.text)
########################
# Get arguments
# inlocation = sys.argv[2]
# inmeasure = sys.argv[1]
dllocation = os.path.join(os.getcwd(),'data')
outputlocation = os.path.join(os.getcwd(),'output')
inmeasure = 'newbiz'
measurelist=[]
try:
os.stat(dllocation)
except:
os.mkdir(dllocation)
print(dllocation)
print(inmeasure)
# print(inyear)
#########################################
# Looping through States
for url in businessdata.states:
print(url)
blslist = []
for set in businessdata.quarters:
print(set)
########################
# Create Profile for Firefox
profile = webdriver.FirefoxProfile()
profile.accept_untrusted_certs = True
profile.set_preference('browser.download.folderList', 2) # custom location
profile.set_preference('browser.download.manager.showWhenStarting', False)
profile.set_preference('browser.download.dir',dllocation)
profile.set_preference("browser.helperApps.neverAsk.saveToDisk", "'text/plain, application/vnd.ms-excel, text/csv, text/comma-separated-values, application/octet-stream, application/vnd.openxmlformats-officedocument.spreadsheetml.sheet")
profile.set_preference("plugin.disable_full_page_plugin_for_types", "'text/plain, application/vnd.ms-excel, text/csv, text/comma-separated-values, application/octet-stream, application/vnd.openxmlformats-officedocument.spreadsheetml.sheet")
#########################
# Open Web driver
driver = webdriver.Firefox(profile)
# driver = webdriver.Chrome()
driver.get(url[2])
driver.find_element_by_xpath("//select[@name='period']//option[contains(.,'" + set[0] + "')]").click()
driver.find_element_by_name("Update").click()
linknotthere=True
while linknotthere:
time.sleep(1)
try:
driver.find_element_by_link_text('CSV Data Feed').click()
linknotthere=False
except:
linknotthere=True
test=True
path=dllocation + '\\bls_qcew_maps.csv'
while test:
time.sleep(1)
if os.path.isfile(path):
test=False
with open(path) as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
print('CSV OPEN')
counter=1
for row in reader:
print('csvrow:'+str(counter))
if counter>1 and len(row[0])>1:
blsCount=BlsCount()
print(row)
blsCount.set_yearquarter(row[1])
blsCount.set_businesscount(row[2])
blsCount.set_FIPS(row[8])
blsCount.set_state(row[9])
blslist.append(blsCount)
blsCount.print()
counter+=1
driver.quit()
test=True
while test:
time.sleep(1)
try:
test=False
os.remove(path)
except:
test=False
counter=0
for set in businessdata.quarters:
if counter>1:
for rec in blslist:
if (str(rec.get_quarter())+" "+str(rec.get_year()))==set[0]:
#print("into loop")
for rec2 in blslist:
# print(str(rec2.get_quarter()) + " " + str(rec2.get_year()))
# print(set[1])
# print(rec.get_FIPS()+"|"+rec2.get_FIPS())
if (str(rec2.get_quarter()) + " " + str(rec2.get_year())) == set[1] and rec.get_FIPS()==rec2.get_FIPS():
result=(int(rec.get_businesscount())-int(rec2.get_businesscount()))
bussstart = BusStarts()
bussstart.set_FIPS(rec.get_FIPS())
bussstart.set_measure(result)
bussstart.set_date(rec.get_date())
measurelist.append(bussstart)
break
counter+=1
counter=0
#################################
# create writing object
writer = FedWriter(inmeasure, outputlocation)
#################################
# push list into writer and write
for obj in measurelist:
writer.add(obj.get_date(), obj.get_measure(), obj.get_FIPS())
print(obj.get_date()+str(obj.get_measure())+obj.get_FIPS())
writer.output_msr_file() |
998,953 | e03bb58b24774d695efaaa1e9efd72944748a01d | from .. import errors
from .. import utils
from ..types import CancellableStream
class ExecApiMixin:
@utils.check_resource('container')
def exec_create(self, container, cmd, stdout=True, stderr=True,
stdin=False, tty=False, privileged=False, user='',
environment=None, workdir=None, detach_keys=None):
"""
Sets up an exec instance in a running container.
Args:
container (str): Target container where exec instance will be
created
cmd (str or list): Command to be executed
stdout (bool): Attach to stdout. Default: ``True``
stderr (bool): Attach to stderr. Default: ``True``
stdin (bool): Attach to stdin. Default: ``False``
tty (bool): Allocate a pseudo-TTY. Default: False
privileged (bool): Run as privileged.
user (str): User to execute command as. Default: root
environment (dict or list): A dictionary or a list of strings in
the following format ``["PASSWORD=xxx"]`` or
``{"PASSWORD": "xxx"}``.
workdir (str): Path to working directory for this exec session
detach_keys (str): Override the key sequence for detaching
a container. Format is a single character `[a-Z]`
or `ctrl-<value>` where `<value>` is one of:
`a-z`, `@`, `^`, `[`, `,` or `_`.
~/.docker/config.json is used by default.
Returns:
(dict): A dictionary with an exec ``Id`` key.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
if environment is not None and utils.version_lt(self._version, '1.25'):
raise errors.InvalidVersion(
'Setting environment for exec is not supported in API < 1.25'
)
if isinstance(cmd, str):
cmd = utils.split_command(cmd)
if isinstance(environment, dict):
environment = utils.utils.format_environment(environment)
data = {
'Container': container,
'User': user,
'Privileged': privileged,
'Tty': tty,
'AttachStdin': stdin,
'AttachStdout': stdout,
'AttachStderr': stderr,
'Cmd': cmd,
'Env': environment,
}
if workdir is not None:
if utils.version_lt(self._version, '1.35'):
raise errors.InvalidVersion(
'workdir is not supported for API version < 1.35'
)
data['WorkingDir'] = workdir
if detach_keys:
data['detachKeys'] = detach_keys
elif 'detachKeys' in self._general_configs:
data['detachKeys'] = self._general_configs['detachKeys']
url = self._url('/containers/{0}/exec', container)
res = self._post_json(url, data=data)
return self._result(res, True)
def exec_inspect(self, exec_id):
"""
Return low-level information about an exec command.
Args:
exec_id (str): ID of the exec instance
Returns:
(dict): Dictionary of values returned by the endpoint.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
if isinstance(exec_id, dict):
exec_id = exec_id.get('Id')
res = self._get(self._url("/exec/{0}/json", exec_id))
return self._result(res, True)
def exec_resize(self, exec_id, height=None, width=None):
"""
Resize the tty session used by the specified exec command.
Args:
exec_id (str): ID of the exec instance
height (int): Height of tty session
width (int): Width of tty session
"""
if isinstance(exec_id, dict):
exec_id = exec_id.get('Id')
params = {'h': height, 'w': width}
url = self._url("/exec/{0}/resize", exec_id)
res = self._post(url, params=params)
self._raise_for_status(res)
@utils.check_resource('exec_id')
def exec_start(self, exec_id, detach=False, tty=False, stream=False,
socket=False, demux=False):
"""
Start a previously set up exec instance.
Args:
exec_id (str): ID of the exec instance
detach (bool): If true, detach from the exec command.
Default: False
tty (bool): Allocate a pseudo-TTY. Default: False
stream (bool): Return response data progressively as an iterator
of strings, rather than a single string.
socket (bool): Return the connection socket to allow custom
read/write operations. Must be closed by the caller when done.
demux (bool): Return stdout and stderr separately
Returns:
(generator or str or tuple): If ``stream=True``, a generator
yielding response chunks. If ``socket=True``, a socket object for
the connection. A string containing response data otherwise. If
``demux=True``, a tuple with two elements of type byte: stdout and
stderr.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
# we want opened socket if socket == True
data = {
'Tty': tty,
'Detach': detach
}
headers = {} if detach else {
'Connection': 'Upgrade',
'Upgrade': 'tcp'
}
res = self._post_json(
self._url('/exec/{0}/start', exec_id),
headers=headers,
data=data,
stream=True
)
if detach:
try:
return self._result(res)
finally:
res.close()
if socket:
return self._get_raw_response_socket(res)
output = self._read_from_socket(res, stream, tty=tty, demux=demux)
if stream:
return CancellableStream(output, res)
else:
return output
|
998,954 | 0cd24eb0955837430cb1138ff23902343ff1c59a | import site
from dev_appserver import EXTRA_PATHS
for pth in EXTRA_PATHS:
site.addsitedir(pth)
|
998,955 | 1ce8c3869268a21e613e9d8eaa3d38ddde1c3c98 | import argparse
import sys
import pandas as pd
import numpy as np
import random
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow_addons.metrics import FBetaScore
from tensorflow.keras import optimizers, callbacks, Model
from tensorflow.data.experimental import AUTOTUNE
from tqdm.auto import tqdm
from tqdm.keras import TqdmCallback
import wandb
from wandb.keras import WandbCallback
sys.path.append(
"fsdl_deforestation_detection/fsdl_deforestation_detection/data/"
)
sys.path.append(
"fsdl_deforestation_detection/fsdl_deforestation_detection/modeling/"
)
import data_utils
import model_utils
from models import ResNet, func_resnet
parser = argparse.ArgumentParser()
parser.add_argument(
"--pretrained",
action="store_true",
help="use a ResNet pretrained on BigEarthNet",
)
parser.add_argument(
"--learning_rate",
default=0.03,
help="initial learning rate",
)
parser.add_argument(
"--epochs",
default=[50, 50],
nargs="+",
help="number of epochs to train on",
)
parser.add_argument(
"--batch_size",
default=32,
help="number of samples per batch",
)
parser.add_argument(
"--random_seed",
default=42,
help="random seed for reproducible results",
)
parser.add_argument(
"--task",
default=["orig_labels", "deforestation"],
nargs="+",
help="random seed for reproducible results",
)
args = parser.parse_args()
# Handle arguments
if not isinstance(args.task, list):
args.task = list(args.task)
if not isinstance(args.epochs, list):
args.epochs = list(args.epochs)
args.epochs = [int(epochs) for epochs in args.epochs]
if args.pretrained:
assert (
len(args.epochs) == len(args.task) * 2,
"When training a pretrained model on a pipeline of tasks, "
"the number of epochs should be specified twice for each task (for each task, "
"initially we train just the final layer, then we fine-tune all the weights). "
"As such, we expect len(args.epochs) == len(args.task) * 2. "
f"Currently specified {len(args.epochs)} epoch sets and {len(args.task)} tasks.",
)
else:
assert (
len(args.epochs) == (len(args.task) * 2) - 1,
"When training a model from scratch on a pipeline of tasks, "
"the number of epochs should be specified once for an initial task and twice "
"for the remaining ones (for each task except the first, initially we train just "
"the final layer, then we fine-tune all the weights). "
"As such, we expect len(args.epochs) == len(args.task) * 2 - 1. "
f"Currently specified {len(args.epochs)} epoch sets and {len(args.task)} tasks.",
)
# Set the random seed for reproducibility
tf.random.set_seed(0)
random.seed(0)
np.random.seed(0)
opt = optimizers.Adam(learning_rate=args.learning_rate)
loss = "binary_crossentropy"
count = 0
for task in tqdm(args.task, desc="Tasks"):
# Load the data
labels_df = pd.read_csv(data_utils.DATA_PATH + data_utils.LABELS_PATH)
labels_df = data_utils.encode_tags(labels_df, drop_tags_col=True)
if task == "deforestation":
labels_df = data_utils.add_deforestation_label(labels_df)
labels_df = labels_df[["image_name", "deforestation"]]
# Specify the dataframe so that the generator has no required arguments
def data_gen():
for i in data_utils.get_amazon_sample(labels_df):
yield i
# Create the dataset
if task == "deforestation":
labels_shape = 1
else:
labels_shape = len(data_utils.TAGS)
dataset = tf.data.Dataset.from_generator(
data_gen,
output_signature=(
tf.TensorSpec(shape=([256, 256, 3]), dtype=tf.float16),
tf.TensorSpec(shape=(labels_shape), dtype=tf.uint8),
),
)
# Split into training, validation and test sets
n_samples = len(labels_df)
train_set = dataset.take(int(0.9 * n_samples))
test_set = dataset.skip(int(0.9 * n_samples))
train_set = train_set.skip(int(0.1 * n_samples))
val_set = train_set.take(int(0.1 * n_samples))
train_set = (
train_set
# .cache()
.shuffle(buffer_size=1000)
.batch(args.batch_size)
.prefetch(AUTOTUNE)
)
val_set = (
val_set
# .cache()
.shuffle(buffer_size=1000)
.batch(args.batch_size)
.prefetch(AUTOTUNE)
)
test_set = (
test_set
# .cache()
.shuffle(buffer_size=1000)
.batch(args.batch_size)
.prefetch(AUTOTUNE)
)
if args.pretrained:
pretrain_dataset = "bigearthnet"
else:
pretrain_dataset = None
# Load the model
# model = ResNet(pretrain_dataset=pretrain_dataset, pooling="max", task=task)
model = func_resnet(
pretrain_dataset=pretrain_dataset, pooling="max", task=task
)
if task == "orig_labels":
n_outputs = 17
elif task == "deforestation":
n_outputs = 1
else:
raise Exception(
f'ERROR: Unrecognized task "{task}". Please select one of "orig_labels" or "deforestation".'
)
model_metrics = [
"accuracy",
FBetaScore(num_classes=n_outputs, average="macro", beta=2.0),
]
wandb.init(
project="fsdl_deforestation_detection",
entity="fsdl-andre-karthik",
tags="mvp",
reinit=True,
config={**vars(args), **dict(current_task=task)},
)
# The epoch on which to start the full model training
initial_epoch = 0
if args.pretrained or count > 0:
# Train initially the final layer
model.layers[-2].trainable = False
if count > 0:
outputs = layers.Dense(n_outputs, activation="sigmoid")(
model.layers[-2].output
)
model = Model(input=model.input, output=[outputs])
model.compile(optimizer=opt, loss=loss, metrics=model_metrics)
model.fit(
train_set,
validation_data=val_set,
epochs=args.epochs[count],
verbose=0,
callbacks=[
callbacks.EarlyStopping(
monitor="val_loss", min_delta=1e-4, patience=9
),
callbacks.ReduceLROnPlateau(
monitor="val_loss",
min_delta=1e-4,
patience=5,
factor=0.1,
min_lr=1e-7,
),
TqdmCallback(),
WandbCallback(data_type="image", predictions=5),
model_utils.ClearMemory(),
],
)
initial_epoch = args.epochs[count]
count += 1
# Train all the model's weights
model.layers[-2].trainable = True
model.compile(optimizer=opt, loss=loss, metrics=model_metrics)
model.fit(
train_set,
validation_data=val_set,
epochs=args.epochs[count],
verbose=0,
callbacks=[
callbacks.EarlyStopping(
monitor="val_loss", min_delta=1e-4, patience=9
),
callbacks.ReduceLROnPlateau(
monitor="val_loss",
min_delta=1e-4,
patience=5,
factor=0.1,
min_lr=1e-7,
),
TqdmCallback(),
WandbCallback(data_type="image", predictions=5),
model_utils.ClearMemory(),
],
initial_epoch=initial_epoch,
)
# Test the model
test_scores = model.evaluate(test_set)
wandb.log(
dict(
test_loss=test_scores[0],
test_accuracy=test_scores[1],
test_f2=test_scores[2],
)
)
count += 1 |
998,956 | b856b56c86de7365ef46fd386273c3b802547f02 | shopping_list = ['watch', 'bag', 'perfume', 'shoes', 'bikini']
while True:
steps = ['C','R','U','D']
for index1, value1 in enumerate(steps):
print(index1+1, value1)
value1 = input('What steps you want to do?')
if value1 == 'C':
name = 'heels'
shopping_list.append(name)
print(shopping_list)
if value1 == 'R':
print(shopping_list)
if value1 == 'U':
for index, value in enumerate(shopping_list):
print(index+1, value)
old_value = input('Enter the item you want to edit')
value = input('new value')
index = shopping_list.index(old_value)
shopping_list[index] = value
for index, value in enumerate(shopping_list):
print(index+1, value)
if value1 == 'D':
value2 = input('Enter the item you want to delete')
index = shopping_list.index(value2)
deleted_item = shopping_list.pop(index)
print(shopping_list) |
998,957 | bfa65c04c784da5ef607cb7671cbb400c6943164 | # scaling factors
scale_factors = {'no_spine_scale' : 1.0, 'basal_scale' : 2.7, 'med_spine_rad_scale' : 1.2,
'med_spine_LM_scale' : 1.1, 'max_spine_rad_scale' : 1.6,
'thin_rad_spine_scale' : 2.6, 'thin_LM_spine_scale' : 1.2}
# Primary apical list
prim_apical = [
'somaA',
'dendA5_0',
'dendA5_01',
'dendA5_011',
'dendA5_0111',
'dendA5_01111',
'dendA5_011111',
'dendA5_0111111',
'dendA5_01111111',
'dendA5_011111111',
'dendA5_0111111111',
'dendA5_01111111111',
'dendA5_011111111111',
'dendA5_0111111111111',
'dendA5_01111111111111',
'dendA5_011111111111111',
'dendA5_0111111111111111',
'dendA5_01111111111111111']
# Add underscores to the beginning and end of each string to aid in proper compartment assignment in main
# program
prim_apical = ['_' + item for item in prim_apical]
prim_apical = [item + '_' for item in prim_apical]
# no spine scale list
no_spine_scale = [
'dendA1_0',
'dendA1_01',
'dendA1_010',
'dendA2_0',
'dendA2_01',
'dendA2_010',
'dendA3_0',
'dendA3_00',
'dendA3_01',
'dendA4_0',
'dendA5_0',
'dendA5_01',
'dendA5_011']
# Add underscores to the beginning and end of each string to aid in proper compartment assignment in main
# program
no_spine_scale = ['_' + item for item in no_spine_scale]
no_spine_scale = [item + '_' for item in no_spine_scale]
# basal spine scale list
basal_scale = [
'dendA1_0',
'dendA1_01',
'dendA1_010',
'dendA2_0',
'dendA2_01',
'dendA2_010',
'dendA3_0',
'dendA3_00',
'dendA3_01',
'dendA4_0']
# Add underscores to the beginning and end of each string to aid in proper compartment assignment in main
# program
basal_scale = ['_' + item for item in basal_scale]
basal_scale = [item + '_' for item in basal_scale]
# medium spiny radial scale list
med_spine_rad_scale = [
'dendA5_0111',
'dendA5_01111',
'dendA5_011111',
'dendA5_0111111',
'dendA5_01111111',
'dendA5_011111111',
'dendA5_0111111111',
'dendA5_01111111111',
'dendA5_011111111111']
# Add underscores to the beginning and end of each string to aid in proper compartment assignment in main
# program
med_spine_rad_scale = ['_' + item for item in med_spine_rad_scale]
med_spine_rad_scale = [item + '_' for item in med_spine_rad_scale]
# medium spiny LM scale list
med_spine_LM_scale = [
'dendA5_0111111111111111111',
'dendA5_011111111111111110']
# Add underscores to the beginning and end of each string to aid in proper compartment assignment in main
# program
med_spine_LM_scale = ['_' + item for item in med_spine_LM_scale]
med_spine_LM_scale = [item + '_' for item in med_spine_LM_scale]
# max spiny rad scale list
max_spine_rad_scale = [
'dendA5_0111111111111',
'dendA5_01111111111111',
'dendA5_011111111111111',
'dendA5_0111111111111111',
'dendA5_01111111111111111',
'dendA5_011111111111111111']
# Add underscores to the beginning and end of each string to aid in proper compartment assignment in main
# program
max_spine_rad_scale = ['_' + item for item in max_spine_rad_scale]
max_spine_rad_scale = [item + '_' for item in max_spine_rad_scale]
# thin rad scale list
thin_rad_spine_scale = [
'dendA5_0',
'dendA5_01',
'dendA5_011',
'dendA5_0111',
'dendA5_01111',
'dendA5_011111',
'dendA5_0111111',
'dendA5_01111111',
'dendA5_011111111',
'dendA5_0111111111',
'dendA5_01111111111',
'dendA5_011111111111',
'dendA5_0111111111111',
'dendA5_01111111111111',
'dendA5_011111111111111',
'dendA5_0111111111111111',
'dendA5_01111111111111111',
'dendA5_011111111111111111',
'dendA5_0111111111111111111',
'dendA5_011111111111111110',
'dendA5_01111111111111110',
'dendA5_0111111111111111110',
'dendA5_01111111111111111110',
'dendA5_011111111111111111100',
'dendA5_011111111111111111101',
'dendA5_0111111111111111111010',
'dendA5_0111111111111111111011',
'dendA5_01111111111111111111',
'dendA5_011111111111111111110',
'dendA5_011111111111111111111',
'dendA5_0111111111111111111110',
'dendA5_0111111111111111111111',
'dendA5_0111111111111111100',
'dendA5_0111111111111111101',
'dendA5_01111111111111111010',
'dendA5_01111111111111111011',
'dendA5_011111111111111110110',
'dendA5_011111111111111110111']
# Add underscores to the beginning and end of each string to aid in proper compartment assignment in main
# program
thin_rad_spine_scale = ['_' + item for item in thin_rad_spine_scale]
thin_rad_spine_scale = [item + '_' for item in thin_rad_spine_scale]
# thin LM spine scale list
thin_LM_spine_scale = [
'dendA5_01111111111111110',
'dendA5_0111111111111111110',
'dendA5_01111111111111111110',
'dendA5_011111111111111111100',
'dendA5_011111111111111111101',
'dendA5_0111111111111111111010',
'dendA5_0111111111111111111011',
'dendA5_01111111111111111111',
'dendA5_011111111111111111110',
'dendA5_011111111111111111111',
'dendA5_0111111111111111111110',
'dendA5_0111111111111111111111',
'dendA5_0111111111111111100',
'dendA5_0111111111111111101',
'dendA5_01111111111111111010',
'dendA5_01111111111111111011',
'dendA5_011111111111111110110',
'dendA5_011111111111111110111']
# Add underscores to the beginning and end of each string to aid in proper compartment assignment in main
# program
thin_LM_spine_scale = ['_' + item for item in thin_LM_spine_scale]
thin_LM_spine_scale = [item + '_' for item in thin_LM_spine_scale]
|
998,958 | d560f08941eaa5babdb34b43aea2872e8b74b1f9 | from .stanza import Stanza
class FrontendStanza(Stanza):
"""
Stanza subclass representing a "frontend" stanza.
A frontend stanza defines an address to bind to an a backend to route
traffic to. A cluster can defined custom lines via a "frontend" entry
in their haproxy config dictionary.
"""
def __init__(self, cluster, bind_address=None):
super(FrontendStanza, self).__init__("frontend")
self.header = "frontend %s" % cluster.name
if not bind_address:
bind_address = ""
self.add_lines(cluster.haproxy.get("frontend", []))
self.add_line("bind %s:%s" % (bind_address, cluster.haproxy["port"]))
self.add_line("default_backend %s" % cluster.name)
|
998,959 | 251a763a33bd859b4f4192a6e82f59acd2ecbc42 | import cv2 as cv
import numpy as np
# "C:\school\fydp\opencvTut\opencv-course\Resources\Photos\cat.jpg"
resourcesFolder = "Resources\\"
img = cv.imread(resourcesFolder+'\\Photos\\park.jpg')
cv.imshow('park',img) # display image
# about color channels : b,r,g can split into individual
blank = np.zeros(img.shape[:2], dtype ='uint8')
b,g,r = cv.split(img)
cv.imshow('b', b) # show kinda gray scale w pixel density, more of a color means more of a color, less darker
cv.imshow('g', g)
cv.imshow('r', r)
print(img.shape) # img size and color channel
print(b.shape)# just size
print(g.shape)
print(r.shape)
m = cv.merge([b,r,g])
cv.imshow('m', m)
blue = cv.merge([b,blank,blank])
green = cv.merge([blank,g,blank])
red = cv.merge([blank,blank,r])
#better dist of images by showing darker lighter were less, can still merge
cv.imshow('blue', blue) # show kinda gray scale w pixel density, more of a color means more of a color, less darker
cv.imshow('green', green)
cv.imshow('red', red)
cv.waitKey(0) # wait for delay for key
|
998,960 | c96d215533e7e6defd7e1bfee319faf4fa02a83e | import wx
import time
class MyFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self,None,-1,"多模测试热补丁工具",size = (800,600))
panel = wx.Panel(self)
list1 = ["BPN2","BPL1" ,"BPC"]
list2 = ["RRU1", "RRU2", "RRU3"]
#ListBox类实例
self.listbox1 = wx.ListBox(panel,-1,(50,80),(200, 60),list1,wx.LB_SINGLE) #wx.LB_SINGLE只能选择单个
self.listbox2 = wx.ListBox(panel, -1,(50, 150), (200, 60), list2, wx.LB_MULTIPLE)#多选
#CheckListBox类实例
self.listbox3 = wx.CheckListBox(panel,-1,(300,80),(200, 60),list1)
#Choice类实例
self.listbox4 = wx.Choice(panel,-1,(300,200),(200, 40),list2)
self.listbox4.Bind(wx.EVT_CHOICE,self.One_Play)
#进度条展示
self.gauge1 = wx.Gauge(panel,-1,100,(50, 250), (200, 60))
self.value = 1
self.gauge1.SetValue(self.value)
#将wx空闲的事件绑定到进度条上
self.Bind(wx.EVT_IDLE,self.Gauge_Test)
#滑块
self.slider = wx.Slider(panel,-1,10,10,100,(300, 350), (200, 60))
self.slider.Bind(wx.EVT_SCROLL,self.Slider_Test)
def Gauge_Test(self,event):
if self.value < 100:
self.value += 1
time.sleep(0.3)
self.gauge1.SetValue(self.value)
def Slider_Test(self,event):
value = self.slider.GetValue()
print("now value is:",value )
def One_Play(self,event):
print("本次选择了吗:",self.listbox4.GetStringSelection())
def Two_Play(self,event):
print("本次选择了吗:", self.listbox2.GetSelections())
if __name__ == "__main__":
app = wx.App()
frame = MyFrame()
frame.Show()
app.MainLoop()
|
998,961 | 062ee7adbb07c9483c5f269c8667f09ed24f60b5 | import dominions
import util
import urllib2
import json
link, install, uninstall = util.LinkSet().triple()
CONF_PATH = 'conf/dominions_discord.py'
USER_AGENT = 'PageBot (https://github.com/joodicator/pagebot, 1.0)'
def required_class_attr(attr_name):
def get_required_class_attr(self):
raise util.UserError("Error: the '%s' attribute of %s subclasses in %s"
" must be present." % (attr_name, type(self).__name__, CONF_PATH))
return util.classproperty(get_required_class_attr)
class ConfEntry(object):
pass
class DiscordChannel(ConfEntry):
webhook = required_class_attr('webhook')
class DominionsGame(ConfEntry):
name = required_class_attr('name')
channel = required_class_attr('channel')
prefix = ''
suffix = ''
class DiscordFormatter(dominions.TextFormatter):
@staticmethod
def bold(text):
return '**%s**' % text
conf = util.fdict(CONF_PATH, globals={
'_DiscordChannel': DiscordChannel,
'_DominionsGame': DominionsGame,
}, locals=None, class_dict=False)
@link('dominions.new_turn_report')
def h_new_turn_report(bot, report, prev_report):
for game in conf.itervalues():
if not issubclass(game, DominionsGame): continue
if report.name != game.name: continue
for channel in conf.itervalues():
if not issubclass(channel, DiscordChannel): continue
if game not in channel.games: continue
message = report.show_diff_text(prev_report, DiscordFormatter)
message = game.prefix + message + game.suffix
urllib2.urlopen(urllib2.Request(
url=channel.webhook,
data=json.dumps({'content': message}),
headers={
'User-Agent': USER_AGENT,
'Content-Type': 'application/json'
},
))
|
998,962 | f73fdca180fdb28f889cbb7deedf8458df8b461a | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
task.py
调整代码可以把Paul和Bart都正确删除掉
'''
L = ['Adam', 'Lisa', 'Paul', 'Bart']
print '删除元素Paul:', L.pop(2)
print '删除Paul元素,后列表为:', L
print '删除元素Bart', L.pop(2)
print '删除Bart元素,后列表为:', L
|
998,963 | 2af627123cf2c2a81c50c2d11022517924eb27f6 | import mnist_softmax # NOQA
import numerai_softmax # NOQA
|
998,964 | 0a5dcca67afe2356bc33309eaaaada89be7ea8dc | '''
while lykkja
input frá notenda sett inn í lista
endar while lykkju ef tala er neikvæð
Kíkja á lista eftir for lykkju og finna stærstu töluna með max fallinu
'''
number_list = []
num_int = 0
while num_int >= 0:
num_int = int(input("Input a number: ")) # Do not change this line
# Fill in the missing code
number_list.append(num_int)
print("The maximum is", max(number_list)) # Do not change this line
|
998,965 | f813d755670c9e777f6b406875be362b38d986d1 | # Copyright 2019 DTAI Research Group - KU Leuven.
# License: Apache License 2.0
# Author: Laurens Devos
name = "bitboost"
from .sklearn import BitBoost, BitBoostRegressor, BitBoostClassifier
__author__ = "Laurens Devos"
__copyright__ = "Copyright 2019 DTAI Research Group - KU Leuven"
__license__ = "Apache License 2.0"
__all__ = [
"BitBoost",
"BitBoostRegressor",
"BitBoostClassifier"
]
|
998,966 | fffc8362b214f3aba16e5f044a83a192f19d3e43 | from django.http import HttpResponseRedirect, Http404
from django.shortcuts import render, redirect
from .models import Following, Image, Profile
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.urls import reverse
# Create your views here.
@login_required(login_url='/accounts/login/')
def index(request):
images = Image.objects.all()
return render(request,"index.html", {"images":images})
def like(request, image_id):
image=Image.objects.get(pk=image_id)
current_user=request.user
try:
profile = Profile.objects.get(user = current_user)
except Profile.DoesNotExist:
raise Http404
image.likes=image.likes+1
print(image.likes)
liked = True
return HttpResponseRedirect(reverse('home'))
@login_required(login_url='/accounts/login/')
def profile(request):
profiles=Profile.objects.all()
followers=Following.objects.filter(username=request.user.username)
# print(followers)
followings=Following.objects.filter(followed=request.user.username)
return render(request,"profile.html", {'profiles':profiles,'followers':followers, 'following':followings})
def following(request,id):
userTobefollowed=User.objects.get(id=id)
currentUser=User.objects.get(id=request.user.id)
print(currentUser.id)
print(userTobefollowed.id)
if userTobefollowed.id ==currentUser.id:
print('you can\'t follow your self ')
# folowerToremove=Followers(name=request.user.username,user_id=userTobefollowed.id,follower_id=request.user.url)
# folowerToremove.remove(currentUser)
else:
print('no')
folowerToadd=Following(username=currentUser.username,followed=userTobefollowed.username)
print(folowerToadd)
folowerToadd.save()
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
@login_required(login_url='/accounts/login/')
def search_results(request):
if 'people' in request.GET and request.GET["people"]:
search_term = request.GET.get("people")
searched_articles = User.objects.filter(username__contains=search_term)
users=User.objects.all()
print(users)
message = f"{search_term}"
print(searched_articles)
array=[]
for searched_articles in searched_articles:
searched_articles=User.objects.get(id=searched_articles.id)
array.append(searched_articles)
return render(request, 'search.html',{"message":message,"user": array})
else:
message = "You haven't searched for any term"
return render(request, 'search.html',{"message":message}) |
998,967 | 2a11ec20d9155c5c2dbe1818befb6f67359557e7 | from django.shortcuts import render
from django.http import HttpResponse
from .forms import UserForm
import re
import numpy as np
import pickle
from sklearn.linear_model import LogisticRegression
def index(request):
if request.method == "POST":
review = request.POST.get("review")
rev_s = text_transformer(review, load_vocabulary_rate('imdb.vocab', 'imdbEr.txt'))
rev_s = text_fitter(375, rev_s)
model_s = load_model("finalized_model.sav")
rate = 1+int(str(round(model_s.predict_proba(rev_s.reshape(1, -1))[0][1], 1))[2])
mood = 'Положительный' if rate > 5 else 'Отрицательный'
return render(request, "evaluation.html", {"rate": rate,
"mood": mood})
else:
userform = UserForm()
return render(request, "index.html", {"form": userform})
def text_cleaner(text):
text = text.lower()
text = re.sub(r"[.,\")(>+<*/]", " ", text)
text = re.sub(r"[']", "", text)
text = re.sub(r"[!]", " !", text)
return re.sub(r"[?]", " ?", text)
def text_transformer(text, vocabulary):
cleaned_text = text_cleaner(text).split()
for word in range(len(cleaned_text)):
cleaned_text[word] = vocabulary[cleaned_text[word]] if vocabulary.get(cleaned_text[word]) != None else 0
return cleaned_text
def load_model(filename):
return pickle.load(open(filename, 'rb'))
def load_vocabulary_rate(vocab_words, vocab_rates):
vocabulary = []
expected_rating = []
with open(vocab_words, 'r') as file_handler:
vocabulary = file_handler.read().splitlines()
with open(vocab_rates, 'r') as file_handler:
expected_rating = list(map(float, file_handler.read().splitlines()))
return dict(zip(vocabulary, expected_rating))
def load_vocabulary_numbs(vocab_words):
vocabulary_numbs = {}
number = 1
with open(vocab_words, 'r') as file_handler:
for word in file_handler:
vocabulary_numbs[word.strip()] = number
number += 1
return vocabulary_numbs
def text_fitter(length, text):
if len(text) < length:
return np.array(list(text) + [0 for x in range(length-len(text))])
expected_rating = []
with open(vocab_words, 'r') as file_handler:
vocabulary = file_handler.read().splitlines()
with open(vocab_rates, 'r') as file_handler:
expected_rating = list(map(float, file_handler.read().splitlines()))
return dict(zip(vocabulary, expected_rating))
def load_vocabulary_numbs(vocab_words):
vocabulary_numbs = {}
number = 1
with open(vocab_words, 'r') as file_handler:
for word in file_handler:
vocabulary_numbs[word.strip()] = number
number += 1
return vocabulary_numbs
def text_fitter(length, text):
if len(text) < length:
return np.array(list(text) + [0 for x in range(length-len(text))])
else:
return np.array(list(text)[:length])
|
998,968 | de7154133df5f458975b02a242c2aa1c4f75d5cc | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import glob
import time
import json
import numpy as np
import torch
import torch.backends.cudnn as cudnn
from torch.utils.tensorboard import SummaryWriter
import tensorflow as tf
import tensorboard as tb
tf.io.gfile = tb.compat.tensorflow_stub.io.gfile
import backbones
import losses
from modules.utils import AverageMeter, validate, save_checkpoint,accuracy, validate_one_class,visualize,to_1vRest, accuracy_l2, get_l2_pred
from modules.dataset import dataset_loader
from options import get_options
args = get_options()
if(os.path.isdir(args.resume)):
with open(os.path.join(args.resume,'commandline_args.txt'), 'r') as f:
args.__dict__ = json.load(f)
# sync gpu order and bus order and select which to be used
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
# create dataset and model
classes, num_classes, input_size, train_loader, val_loader = dataset_loader(args.batch_size,args.workers, args.dataset)
backbone = backbones.__dict__[args.backbone](pretrained=True,input_size=input_size,vis2d=args.vis2d)
centers_reg = losses.__dict__['center_regressor'](backbone.embed_size,2,mutex_label=False)
backbone.cuda(), centers_reg.cuda()
cudnn.benchmark = True
# define optimizer
params2optimize = list(backbone.parameters())
if(args.optimizer=='SGD'):
optimizer_model = torch.optim.SGD(params2optimize,
args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
elif(args.optimizer=='Adam'):
optimizer_model = torch.optim.Adam(params2optimize,
args.lr)
else:
raise('optimizer is not defined')
optimizer_centers_reg = torch.optim.SGD(centers_reg.parameters(),
lr=0.5)
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer_model,
milestones=[int(args.epochs*0.6),int(args.epochs*0.8)])
# optionally resume from a checkpoint
if(os.path.isdir(args.resume)):
checkpoint_path = os.path.join(args.resume,'best_checkpoint.pth')
print("=> loading checkpoint '{}'".format(checkpoint_path))
checkpoint = torch.load(checkpoint_path)
backbone.load_state_dict(checkpoint['backbone_state_dict'])
centers_reg.load_state_dict(checkpoint['centers_reg_state_dict'])
optimizer_model.load_state_dict(checkpoint['optimizer_model_state_dict'])
optimizer_centers_reg.load_state_dict(checkpoint['optimizer_centers_reg_state_dict'])
best_val_top1 = checkpoint['best_val_top1']
start_epoch = checkpoint['start_epoch']
global_step = checkpoint['global_step']
else:
print('start from scratch')
best_val_top1, start_epoch, global_step = 0, 0, 0
# check the save_dir exists or not
save_dir = os.path.join('logs',args.optimizer,args.dataset,'%s_%s'%(args.backbone,args.head),'%s%s'%(args.loss,'_center_loss'if args.centerloss else ''))
exp_id = max([int(f.split('exp')[-1]) for f in glob.glob(save_dir + "/*")]+[0])+1
save_dir = os.path.join(save_dir,'exp%d'%(exp_id))
os.makedirs(save_dir,exist_ok=True)
#create summarywriter
writer = SummaryWriter(log_dir=save_dir)
# save args
with open(os.path.join(save_dir, 'commandline_args.txt'), 'w') as f:
json.dump(args.__dict__, f, indent=2)
for epoch in range(start_epoch, args.epochs):
# train for one epoch
print('current lr {:.5e}'.format(optimizer_model.param_groups[0]['lr']))
batch_time = AverageMeter()
gama_reg_losses,train_top1,center_reg_losses = AverageMeter(),AverageMeter(),AverageMeter()
if(args.vis2d):
all_features, all_labels = [], []
# switch to train mode
backbone.train(), centers_reg.train()
end = time.time()
for batch_id, (inputs, labels) in enumerate(train_loader):
# measure data loading time
inputs = inputs.cuda()
labels = labels.cuda()
# compute output
features = backbone(inputs)
centers = centers_reg.centers
# center regularization
center_reg_loss = centers_reg(features,labels)
center_reg_losses.update(center_reg_loss.item(),inputs.size(0))
# map feature embeddings like in center loss or nah
loss = center_reg_loss
optimizer_centers_reg.zero_grad()
optimizer_model.zero_grad()
loss.backward()
optimizer_model.step()
optimizer_centers_reg.step()
# measure accuracy and record loss
# yusuf buralarda hata çıkabilir, kodu denemeden çalıştırmadan düzenledim çünkü.
# problem çıkarsa beraber bakabiliriz. :)
train_prec1 = accuracy_l2(features,centers,labels)
train_top1.update(train_prec1, inputs.size(0))
if(args.vis2d):
all_features.append(features.data.cpu().numpy())
all_labels.append(labels.data.cpu().numpy())
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if batch_id % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} '
'Center-Reg {center_reg.val:.4f} ({center_reg.avg:.4f}) '
'Gama-Reg {gama_reg.val:.4f} ({gama_reg.avg:.4f}) '
'Train-Prec@1 {train_top1.val:.3f} ({train_top1.avg:.3f})'.format(
epoch, batch_id, len(train_loader), batch_time=batch_time,
center_reg=center_reg_losses,
gama_reg=gama_reg_losses, train_top1=train_top1))
writer.add_scalar('train/accuracy', train_top1.val,global_step)
writer.add_scalar('train/center_reg', center_reg_losses.val,global_step)
writer.add_scalar('train/gama_reg', gama_reg_losses.val,global_step)
global_step+=1
lr_scheduler.step()
if(args.vis2d):
all_features = np.concatenate(all_features, 0)
all_labels = np.concatenate(all_labels, 0)
visualize(all_features, all_labels, num_classes,epoch, writer, args.head)
# evaluate on validation set
val_top1, val_losses = validate_one_class(val_loader, backbone, centers_reg, args)
writer.add_scalar('test/model_loss',val_losses.avg,global_step)
writer.add_scalar('test/accuracy',val_top1.avg,global_step)
# remember best prec@1 and save checkpoint
is_best = val_top1.avg > best_val_top1
best_val_top1 = max(val_top1.avg, best_val_top1)
print('Epoch:%d train_top1:%.3f val_top1:%.3f best_val_top1:%.3f'%(epoch,train_top1.avg,val_top1.avg,best_val_top1))
save_state = { 'backbone_state_dict': backbone.state_dict(),
'centers_reg_state_dict': centers_reg.state_dict(),
'optimizer_model_state_dict': optimizer_model.state_dict(),
'optimizer_centers_reg_state_dict': optimizer_centers_reg.state_dict(),
'best_val_top1': best_val_top1,
'val_top1': val_top1.avg,
'train_top1': train_top1.avg,
'start_epoch': epoch + 1,
'global_step':global_step}
# save_checkpoint(save_state,False,save_dir)
if is_best:
writer.add_scalar('test/best_accuracy',best_val_top1,global_step)
save_checkpoint(save_state,is_best,save_dir)
writer.close()
|
998,969 | 9f17b9c78696184bdbce94be761fffe120693d15 | import os
def normal_users(user,ord):
if ord[0] == 'send mail':
to = ord[1]
if type(ord[2]) == str:
msg = ord[2]
else:
msg = str(ord[2])
try:
path = os.getcwd() +'/'+to
if not os.path.exists(path):
msg =("Could'n send an Email to the user please make sure to write an exists user !")
return msg
elif os.path.exists(path):
try:
mail_path = (path+'/inbox.txt')
with open(mail_path,'a') as mail:
mail.write('\t[!]====================[EMAIL]==================[!]\n{}\nBy : {}\nDATE : 1900/08/16'.format(msg,user)) ## 'TODO': SET THE DATE
mesg ='Message has been sent'
return mesg
except:
mesg ='Message has [NOT] been sent'
return mesg
else:
mesg ='There was a problim during sending a message'
return mesg
except:
mesg ='There was a problim during finding the user please report the issue to the AnonyBox TM'
return mesg
elif ord[0] == 'delete account':
if user == ord[1]:
try:
file = user
path = os.getcwd() +'/'+file
if os.path.exists(path):
shutil.rmtree(path, ignore_errors=True)
with open('admins_report.txt','a') as report:
report.write("A user {} was deleted at [to use time here]\n".format(user))
msg =("A user {} was deleted at [to use time here]".format(user)) #use time
return msg
else:
msg = ("User may not be exists !")
return msg
except:
msg = ("Couldn't Delete User please report the issue to the AnonyBox TM")
return msg
else:
msg = ("You only can delete your account !")
return msg
elif ord == 'inbox':
path = os.getcwd() +'/'+user+"/inbox.txt"
if os.path.exists(path):
with open(path,'r+') as mails:
data = mails.read()
return data
else:
msg = "NO MAILS"
return msg
|
998,970 | b5bd5dc10f6145fc5242eaff72f3d23271a7feb3 | __doc__ = """
DaetTools model that describes the behavior of a water flowing in a pipe with the effect of biofim formation.
"""
from daetools.pyDAE import *
from pyUnits import m, kg, s, K, Pa, J, W, rad
import pandas as pd
try:
from models.external_film_condensation_pipe import ExternalFilmCondensationPipe
from models.biofilm import Biofilm
except:
from .external_film_condensation_pipe import ExternalFilmCondensationPipe
from .biofilm import Biofilm
from daetools_extended.tools import get_node_tree, execute_recursive_method, get_initialdata_from_reporter, update_initialdata
class BiofilmedExternalFilmCondPipe(Biofilm, ExternalFilmCondensationPipe):
def __init__(self, Name, Parent=None, Description="", data={}, node_tree={}):
ExternalFilmCondensationPipe.__init__(self, Name, Parent=Parent, Description=Description, data=data,
node_tree=node_tree)
def define_parameters(self):
ExternalFilmCondensationPipe.define_parameters(self)
Biofilm.define_parameters(self)
def eq_internal_diameter(self):
eq = self.CreateEquation("D", "D_internal_flow_diameter")
x = eq.DistributeOnDomain(self.x, eClosedClosed)
eq.Residual = self.D(x) - (self.Di() ** 2 - 4 * self.mf(x) * self.Di() / self.rhomf()) ** 0.5
def eq_calculate_resistance(self):
eq = self.CreateEquation("TotalHeat", "Heat balance - Qout")
x = eq.DistributeOnDomain(self.x, eClosedClosed)
Resext = 1 / (self.pi * self.Do() * self.hext(x))
Resint = 1 / (self.pi * self.D(x) * self.hint(x))
Reswall = Log(self.Do() / self.Di()) / (2 * self.pi * self.kwall())
Resfilm = Log(self.Di() / self.D(x)) / (2 * self.pi * self.kappa(x)) # Melhorar ajustando T para kappa
eq.Residual = self.Resistance(x) - (Resint + Reswall + Resext + Resfilm)
def eq_biofilm_temperature(self):
eq = self.CreateEquation("Tbf", "Biofilm Temperature")
x = eq.DistributeOnDomain(self.x, eClosedClosed)
#eq.Residual = self.Qout(x) - (self.T(x) - self.Tbf(x)) * (self.pi * self.D(x) * self.hint(x))
eq.Residual = self.T(x) - self.Tbf(x)
def eq_biofilm_velocity(self):
eq = self.CreateEquation("vbf", "Biofilm Velocity")
x = eq.DistributeOnDomain(self.x, eClosedClosed)
eq.Residual = self.v(x) - self.vbf(x)
def define_variables(self):
ExternalFilmCondensationPipe.define_variables(self)
Biofilm.define_variables(self)
def define_parameters(self):
ExternalFilmCondensationPipe.define_parameters(self)
Biofilm.define_parameters(self)
def DeclareEquations(self):
ExternalFilmCondensationPipe.DeclareEquations(self)
Biofilm.DeclareEquations(self)
self.eq_biofilm_temperature()
self.eq_biofilm_velocity() |
998,971 | 06595596ce534399e6293bc766bdd66e7b75b4ff | #!/usr/bin/python
# -*- coding: utf-8 -*-
# 什么是面向对象
#需求
# - 老妈的交通工具有两个,电动车和自行车
# - 家里离菜场共 20 公里
# - 周一的时候骑电动车去买菜,骑了 0.5 小时
# - 周二的时候骑自行车去卖菜,骑了 2 小时
# - 周三的时候骑电动车去卖菜,骑了 0.6 小时
# - 分别输出三天骑行的平均速度
# def main():
# distance = 20
# e_bicycle = '电动车'
# bicycle = '自行车'
# day1 = '周一'
# hour1 = 0.5
# speed1 = 20/hour1
# print '%s 骑 %s 平均时速 %0.2f km/h' %(day1,e_bicycle, speed1)
# day2 = '周二'
# hour2 = 2
# speed2 = 20/hour2
# print '%s 骑 %s 平均时速 %0.2f km/h' %(day2,bicycle,speed2)
# day3 = '周三'
# hour3 = 0.6
# speed3 = 20/hour3
# print '%s 骑 %s 平均时速 %0.2f km/h' %(day3, e_bicycle, speed3)
# if __name__ == '__main__':
# main()
class transportation():
def __init__(self, type,day,hour):
self.type = type
self.day = day
self.hour = hour
def drive(self):
distance = 20
speed = float(distance)/self.hour
print '%s 老妈骑 %s 去买菜,平均时速 %0.2f km/h' % (self.day,self.type,self.hour)
def main():
print '家里距菜场20km'
transportation1 = transportation('e_bicycle','周一',0.5)
transportation1.drive()
transportation2 = transportation('bicycle','周二',2)
transportation2.drive()
transportation3 = transportation('e_bicycle','周三',0.6)
transportation3.drive()
if __name__ == '__main__':
main() |
998,972 | 0fc31f8a113790c331c24eb97077f6701610d398 | """Handles the data."""
import os
TRANSCRIPTION_PATH = './ground-truth/transcription.txt'
KEYWORDS = "task/keywords_train.txt"
KEYWORDS_TEST = "task/keywords.txt"
IMG_PATH = "Cropped-images/"
def get_train_sample(keyword):
"""Gets a train image that matches the keyword."""
file = open(TRANSCRIPTION_PATH, 'r')
words = file.readlines()
file.close()
for word in words:
word = word.rstrip('\n')
if word[10:].lower() == keyword.lower() and int(word[0:3]) < 300:
return word[0:9]
def get_train_sample_for_test(keyword):
"""Gets a train image that matches the keyword."""
file = open(KEYWORDS_TEST, 'r')
lines = file.readlines()
file.close()
train_sample = ''
for line in lines:
transcript, word_id = str.split(line, ",")
if transcript == keyword:
train_sample = word_id.rstrip('\n')
return train_sample
def load_valid_words():
"""Loads the validation words."""
valid_word_list = []
file = open(TRANSCRIPTION_PATH, 'r')
words = file.readlines()
file.close()
for word in words:
if int(word[0:3]) >= 300:
valid_word_list.append(word[0:9])
return valid_word_list
def load_test_words():
"""Loads the validation words."""
return [f.rstrip('.png\n') for f in os.listdir(IMG_PATH) if
((os.path.splitext(f)[-1] == '.png') & (int(f[0:3]) > 304))]
def load_train_word_dict():
"""Creates a dictionary for the train words."""
train_dict = {}
with open(TRANSCRIPTION_PATH) as file:
for line in file:
if int(line[0:3]) < 300:
word_id, transcript = str.split(line, " ")
train_dict[word_id] = transcript.rstrip('\n')
return train_dict
def load_valid_word_dict():
"""Creates a dictionary for the validation words."""
valid_dict = {}
with open(TRANSCRIPTION_PATH) as file:
for line in file:
if int(line[0:3]) >= 300:
word_id, transcript = str.split(line, " ")
valid_dict[word_id] = transcript.rstrip('\n')
return valid_dict
def load_keywords():
"""Loads the keywords."""
keywords = []
file = open(KEYWORDS, 'r')
words = file.readlines()
file.close()
for word in words:
keywords.append(word.rstrip('\n'))
return keywords
def load_keywords_for_test():
"""Loads the keywords."""
keywords = []
file = open(KEYWORDS_TEST, 'r')
lines = file.readlines()
file.close()
for line in lines:
transcript, word_id = str.split(line, ",")
keywords.append(transcript.rstrip('\n'))
return keywords
|
998,973 | fd4466d675b2f205a5ddc7c7a269298d119264b0 | import csv
import matplotlib.pyplot as plt
class Plotter(object):
def add_figure(self, title, x_label, y_label):
figure = plt.figure()
plt.grid()
plt.title(title)
plt.xlabel(x_label)
plt.ylabel(y_label)
return figure.number
def add_line(self, data, label, figure_number):
""" Add a list of tuples (x, y) as a line plot, see https://stackoverflow.com/a/18458953/5730444"""
plt.figure(figure_number)
plt.plot(*zip(*data), label=label, marker='.', linestyle="")
plt.legend(loc="upper left")
def to_csv(self, data, label):
with open(f"{label}.csv", 'w', newline="") as myfile:
wr = csv.writer(myfile)
for item in data:
wr.writerow([item[0], item[1]])
def show(self):
plt.show()
|
998,974 | 9066016aa8973ea3cbb063309fafa2d9eee91886 | from pathlib import Path
from kaybee.app import kb
from kaybee.plugins.articles.base_article_reference import BaseArticleReference
@kb.resource('author')
class Author(BaseArticleReference):
def headshot_thumbnail(self, usage):
docpath = Path(self.docname)
parent = docpath.parent
prop = self.find_prop_item('images', 'usage', usage)
if prop:
return str(Path(parent, prop.filename)) # prop.filename
|
998,975 | 95612b9b755bc53de4d191717ae9011cf8404ca6 | from django.urls import path
from rest_framework.urlpatterns import format_suffix_patterns
from . import views
app_name = 'cuentas'
urlpatterns = [
path('user/current/', views.CurrentUser.as_view(), name='current'),
path('users/', views.UserList.as_view(), name='list'),
# path('user/create/', views.UserCreate.as_view(), name='create'),
path('user/<int:pk>/', views.UserDetail.as_view(), name='detail'),
]
urlpatterns = format_suffix_patterns(urlpatterns) |
998,976 | 0e4fb47ab5738494e6839c36ce05af1d2586adfe | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created by Cphayim at 2018/06/18
"""
# def fact(n):
# if n <= 1:
# return n
# return n * fact(n - 1)
# print(fact(5))
# for x in ran5
a = [1, 2, 3, 4, 5, 6, 7, 8, 9]
for x in range(0, len(a), 2):
print(a[x], end=' | ')
b = a[0:len(a):2]
print(b)
|
998,977 | 94b1acc93c757cd4cd1ccf88e8c142eebc578239 | from zen.ui import _readchar, _read_action, read_until_action, Action
def test_getting_single_char() -> None:
assert _readchar(_testing="a")=="a"
def test_control_c_raises_keyboard_interrupt() -> None:
try:
_readchar(_testing="\x03")
except KeyboardInterrupt:
return None
assert False
def test_w_is_action_up() -> None:
assert _read_action(_testing="w")==Action.UP
def test_s_is_action_down() -> None:
assert _read_action(_testing="s")==Action.DOWN
def test_a_is_action_left() -> None:
assert _read_action(_testing="a")==Action.LEFT
def test_d_is_action_right() -> None:
assert _read_action(_testing="d")==Action.RIGHT
def test_f_is_action_select() -> None:
assert _read_action(_testing="f")==Action.SELECT
def test_unrecognized_key_is_action_other() -> None:
assert _read_action(_testing="invalid character")==Action.OTHER
def test_read_until_action_returns_action_if_valid() -> None:
assert read_until_action(_testing="w")==Action.UP
def test_read_until_action_looping_with_invalid_action() -> None:
try:
read_until_action(_testing="invalid character")
except RecursionError:
return None
assert False
|
998,978 | 4c199f254491e8d46609c696d67809cddccd2a30 | import yaml
import constants
class YamlConfigError(KeyError):
pass
def validate_queue_config(directive, queue_config):
try:
queue_url = queue_config['url']
except KeyError:
raise YamlConfigError(
f"{directive} queue is type 'sqs', but queue 'url' not provided"
)
if 'access_key_id' not in queue_config:
raise YamlConfigError(
f"{directive} with queue_url {queue_url} missing access_key_id"
)
if 'access_key' not in queue_config:
raise YamlConfigError(
f"{directive} with queue_url {queue_url} missing access_key"
)
if 'region' not in queue_config:
raise YamlConfigError(
f"{directive} with queue_url {queue_url} missing region"
)
with open(constants.QUEUE_CONFIG_FILE, 'r') as config_file:
queue_config = yaml.load(config_file)
# Source receiver sqs queue config
try:
receiver = queue_config['receiver']
except KeyError:
raise YamlConfigError('no source receiver provided')
else:
validate_queue_config('receiver', receiver)
# Destination route sqs queue config
try:
routes = queue_config['routes']
except KeyError:
raise YamlConfigError('no destination routes provided')
else:
for route in routes:
try:
route_type = route['type']
except KeyError:
raise YamlConfigError('route type not specified')
if route['type'] == 'sqs':
validate_queue_config('route', route)
|
998,979 | 11c93d8d072b1298b3c1c90cd7f444d010f5c9dd | def key(ll):
return "%d,%d,%d,%d,%d,%d,%d,%d,%d,%d" % (ll[0],ll[1],ll[2],ll[3],ll[4],ll[5],ll[6],ll[7],ll[8],ll[9])
max = 10000
lst = {}
for i in range(max):
j = i*i*i
l = [0]*10
while(j > 0):
l[j%10] += 1
j /= 10
k = key(l)
if(not(k in lst)):
lst[k] = []
lst[k].append(i)
min = max
for i in lst:
if(len(lst[i]) == 5):
if(lst[i][0] < min):
min = lst[i][0]
print min*min*min |
998,980 | 7ab69e8233ebf0946a62b66a1d1ccfc3a891599e | #! python
# Problem # : 158B
# Created on : 2019-01-14 22:25:35
def Main():
n = int(input())
t = 0
arr = [int(x) for x in input().split(' ')]
arr.sort()
i = 0
j = len(arr) - 1
k = 0
m = 4
while j >= i:
if i is j:
k += 1
break
while arr[i] + arr[j] <= m and j > i:
arr[j] += arr[i]
i += 1
k += 1
j -= 1
print(k)
if __name__ == '__main__':
Main()
|
998,981 | ef27768b66fab231818287d7b231fbbe98ab1ea4 | #!/usr/bin/env python3
#coding = utf-8
import bisect
# the module for inserting and sorting operations on ordered arrays.
# bisect.bisect_left(list, value)
# find the location where the value will be inserted but not inserted.
# returns the position to the left of x if x exists in list.
def money(n, m, limit, maxm):
limit = sorted(limit)
maxm = sorted(maxm, key = lambda x : x[1], reverse = True)
# lambda x : x[1] x is a element in iterable object like list, x[1] is key
res = 0
while n > 0 and len(maxm) > 0:
l = bisect.bisect_left(limit, maxm[0][0])
if l < len(limit):
res += maxm[0][1]
n -= 1
limit.pop(l)
maxm.pop(0)
print(res)
# list.pop(index) pop element value with index and return value
# list.remove(value) remove element in list which value == value
if __name__ == "__main__":
n, m = map(int, input().split())
limit = list(map(int, input().split()))
maxm = []
for i in range(m):
maxm.append(list(map(int, input().split())))
money(n, m, limit, maxm)
|
998,982 | 10dcf7f7ddab150676ceb860494abd4b6d0838a3 | import pathlib
import sys
from collections import defaultdict
import argparse
import numpy as np
import torch
from torch.utils.data import DataLoader
from dataset_1 import SliceDataDev
from torch.nn import functional as F
from models import DnCn
import h5py
from tqdm import tqdm
from collections import OrderedDict
import os
def save_reconstructions(reconstructions, out_dir):
"""
Saves the reconstructions from a model into h5 files that is appropriate for submission
to the leaderboard.
Args:
reconstructions (dict[str, np.array]): A dictionary mapping input filenames to
corresponding reconstructions (of shape num_slices x height x width).
out_dir (pathlib.Path): Path to the output directory where the reconstructions
should be saved.
"""
if (not (os.path.exists(out_dir))):
os.mkdir(out_dir)
out_dir.mkdir(exist_ok=True)
print('Saved directory is',out_dir)
for fname, recons in reconstructions.items():
with h5py.File(out_dir / fname, 'w') as f:
f.create_dataset('reconstruction', data=recons)
def create_data_loaders(args):
data = SliceDataDev(args.data_path,args.acceleration_factor,args.dataset_type)
data_loader = DataLoader(
dataset=data,
batch_size=args.batch_size,
num_workers=4,
pin_memory=True,
)
return data_loader
def load_model(args,checkpoint_dccnn,checkpoint_percept,alpha):
checkpoint_dccnn = torch.load(checkpoint_dccnn)
args_dccnn = checkpoint_dccnn['args']
checkpoint_percept = torch.load(checkpoint_percept)
args_percept = checkpoint_percept['args']
model_dccnn = DnCn(args,n_channels=1).to(args.device)
model_percept = DnCn(args_percept,n_channels=1).to(args.device)
model_combined = DnCn(args,n_channels=1).to(args.device)
if args_dccnn.data_parallel:
model_dccnn = torch.nn.DataParallel(model_dccnn)
if args_percept.data_parallel:
model_percept = torch.nn.DataParallel(model_percept)
model_dccnn.load_state_dict(checkpoint_dccnn['model'])
model_percept.load_state_dict(checkpoint_percept['modelG_dc'])
param_combined = OrderedDict()
for param_name in checkpoint_dccnn['model']:
param_dccnn = checkpoint_dccnn['model'][param_name]
param_percept = checkpoint_percept['modelG_dc'][param_name]
param_combined[param_name] = (1 - alpha) * param_dccnn + alpha * param_percept
model_combined.load_state_dict(param_combined)
return model_combined
def run_gan(args, models_combined,data_loader):
models_combined.eval()
reconstructions = defaultdict(list)
with torch.no_grad():
for (iter,data) in enumerate(tqdm(data_loader)):
input,input_kspace,target,fnames,slices = data
input_kspace = input_kspace.float()
input_kspace = input_kspace.unsqueeze(1).to(args.device).double()
input = input.unsqueeze(1).to(args.device)
input = input.float()
recons = models_combined(input,input_kspace)
#recons = recons + input
recons = recons.to('cpu').squeeze(1)
for i in range(recons.shape[0]):
recons[i] = recons[i]
reconstructions[fnames[i]].append((slices[i].numpy(), recons[i].numpy()))
#break
reconstructions = {
fname: np.stack([pred for _, pred in sorted(slice_preds)])
for fname, slice_preds in reconstructions.items()
}
return reconstructions
def main(args):
print(args.out_dir)
if not(os.path.exists(args.out_dir)):
os.mkdir(args.out_dir)
data_loader = create_data_loaders(args)
all_alphas = list(np.linspace(0,1,11))
for alpha in all_alphas:
alpha = round(alpha,2);
model_dc = load_model(args,args.checkpoint_dccnn,args.checkpoint_percept,alpha)
reconstructions = run_gan(args, model_dc,data_loader)
out_dir = args.out_dir / str(alpha)
save_reconstructions(reconstructions, out_dir)
def create_arg_parser():
parser = argparse.ArgumentParser(description="Valid setup for MR recon U-Net")
parser.add_argument('--checkpoint_dccnn', type=pathlib.Path, required=True,
help='Path to the dc_cnn model')
parser.add_argument('--num-pools', type=int, default=4, help='Number of U-Net pooling layers')
parser.add_argument('--drop-prob', type=float, default=0.0, help='Dropout probability')
parser.add_argument('--num-chans', type=int, default=32, help='Number of U-Net channels')
parser.add_argument('--checkpoint_percept', type=pathlib.Path, required=True,help='Path to the dc_cnn model')
parser.add_argument('--out-dir', type=pathlib.Path, required=True,
help='Path to save the reconstructions to')
parser.add_argument('--batch-size', default=16, type=int, help='Mini-batch size')
parser.add_argument('--device', type=str, default='cuda', help='Which device to run on')
parser.add_argument('--data-path',type=str,help='path to validation dataset')
parser.add_argument('--acceleration_factor',type=str,help='acceleration factors')
parser.add_argument('--usmask_path',type=str,help='Path to USMASK')
parser.add_argument('--dataset_type',type=str,help='cardiac,kirby')
return parser
if __name__ == '__main__':
args = create_arg_parser().parse_args(sys.argv[1:])
main(args)
|
998,983 | f6be74af798951ffedc5813b26c0d7ef159483c9 | import numpy as np
import gridWorld
import helper
class MonteCarlo():
def __init__(self, dimensions=(4, 5)):
self.gw = gridWorld.Grid()
self.dimensions = dimensions
self.gw.init_sample_grid_world(dimensions)
self.q = np.zeros(shape=(dimensions[0], dimensions[1], 4))
self.returns = np.zeros(shape=(dimensions[0], dimensions[1], 4, 2))
self.policy = np.zeros(shape=(dimensions[0], dimensions[1], 4))
self.policy[:, :, :] = 0.25
self.eps = 0.4
def learn(self):
s_k = (0, 0)
# (a)
occurrences = []
while True:
policy = self.get_behaviour_policy(s_k)
a_k = self.gw.actions[policy]
s_kk = self.gw.get_next_state(s_k, a_k)
occurrences.append([s_k, policy, self.gw.get_reward(s_k, a_k, s_kk)])
if self.gw.is_terminal_state(s_k):
break
s_k = s_kk
# (b)
computed_state_actions = []
for i in range(len(occurrences)):
if (occurrences[i][0], occurrences[i][1]) in computed_state_actions:
continue
for j in np.arange(i, len(occurrences)):
self.returns[occurrences[i][0][0], occurrences[i][0][1], occurrences[i][1], 0] += occurrences[j][2]
self.returns[occurrences[i][0][0], occurrences[i][0][1], occurrences[i][1], 1] += 1
total_R = self.returns[occurrences[i][0][0], occurrences[i][0][1], occurrences[i][1], 0]
number_of_Rs = self.returns[occurrences[i][0][0], occurrences[i][0][1], occurrences[i][1], 1]
self.q[occurrences[i][0][0], occurrences[i][0][1], occurrences[i][1]] = total_R / float(number_of_Rs)
computed_state_actions.append((occurrences[i][0], occurrences[i][1]))
# (c)
computed_states = []
for occurrence in occurrences:
state = occurrence[0]
if state in computed_states:
continue
opt_action = np.argmax(self.q[state[0], state[1], :])
for action_number in range(4):
if action_number == opt_action:
self.policy[state[0], state[1], action_number] = 1 - self.eps + self.eps/4
else:
self.policy[state[0], state[1], action_number] = self.eps/4
computed_states.append(state)
def get_policy(self, state):
argmax_a = np.argmax(self.q[state[0], state[1], :])
return self.gw.actions[argmax_a]
def get_behaviour_policy(self, state):
weights = []
for action_number in range(4):
weights.append(self.policy[state[0], state[1], action_number])
return helper.weighted_choice(weights)
@staticmethod
def is_online():
return True
if __name__ == "__main__":
mc = MonteCarlo()
for it in range(5000):
mc.learn()
for row in range(4):
for col in range(5):
print(row, col, mc.gw.actions[np.argmax(mc.q[row, col, :])]) |
998,984 | d8ca98998ac7abb79d26f2778ac22f547ec4fe6c | # 20P
# Create a dictionary that contains the result functions as keys and as values the list of results from calling that
# function with x in range -10, 10 as value
def build(a, b, c):
def response(x):
result = 0
result = a * x ** 2 + b * x + c
return result
return response
list_of_functions = []
dict_of_results = {}
for a, b, c in ((1, -2, 2), (2, -4, 4), (3, -6, 6)):
list_of_functions.append(build(a,b,c))
for function in list_of_functions:
list_values = []
for y in range(-10, 10):
list_values.append(function(y))
dict_of_results[function] = list_values
print(dict_of_results)
|
998,985 | 4849e761943dbbd5f94f192494c52d26643221bf | #! -*- coding=utf-8 -*-
import inspect
class Friends:
def __init__(self, name = None, grade = None, age = None):
self.name = name
self.grade = grade
self.age = age
def __str__(self):
return 'Friends: name=%s, grade=%s, grade=%s'% (self.name, self.grade, self.age)
def test(self):
print 'test'
class Friends2:
def __init__(self):
self.name = None
self.grade = None
self.age = None
def __str__(self):
return 'Friends: name=%s, grade=%s, grade=%s'% (self.name, self.grade, self.age)
class Friends3:
def __init__(self, name , grade , age ):
self.name = name
self.grade = grade
self.age = age
def __str__(self):
return 'Friends: name=%s, grade=%s, grade=%s'% (self.name, self.grade, self.age)
def dict_to_obj(classType, obj, adict):
"""
类对象与实例对象才允许执行
:param classType:
:param obj:
:param adict:
:return:
"""
# print inspect.isclass(Friends)
# print isinstance(Friends(), Friends)
if inspect.isclass(obj) or isinstance(obj, classType) :
for k,v in adict.items():
setattr(obj, k , v)
return obj
else:
print 'error'
friends = [
Friends('zhouze', 'boy', 20),
Friends('liuting', 'girl', 18)
]
if __name__ == '__main__':
print "*" * 60
f1_dict = friends[0].__dict__
f1_dict['zz']="zz"
print f1_dict
print "*" * 60
f2 = dict_to_obj(Friends, Friends, f1_dict)
print f2,f2.grade,f2.name,f2.age, type(f2)
print isinstance(f2, Friends)
# print f2.test()
f3 = dict_to_obj(Friends, Friends(), f1_dict)
print f3, f3.grade, f3.name, f3.age, type(f3)
print isinstance(f3, Friends)
print "*"*60
f2 = dict_to_obj(Friends2, Friends2, f1_dict)
print f2, f2.grade, f2.name, f2.age, type(f2)
print isinstance(f2, Friends)
f3 = dict_to_obj(Friends2, Friends2(), f1_dict)
print f3, f3.grade, f3.name, f3.age, type(f3)
print isinstance(f3, Friends)
print "*" * 60
f3 = dict_to_obj(Friends3, Friends3, f1_dict)
print f3, f3.grade, f3.name, f3.age, type(f3)
print isinstance(f3, Friends)
# f3 = dict_to_obj(Friends3, Friends3(), f1_dict)
# print f3, f3.grade, f3.name, f3.age, type(f3)
# print isinstance(f3, Friends)
f3 = Friends(name='zzlt')
print f3
|
998,986 | 4c22b412cf420665c631832834a9d29bba621c08 | import re
# Defines the constant names used when resolving the commands type.
commands = {
'arithmetic': ['add', 'sub', 'neg', 'eq', 'gt', 'lt', 'and', 'or', 'not'],
'push': 'C_PUSH',
'pop': 'C_POP',
'label': 'C_LABEL',
'goto': 'C_GOTO',
'if-goto': 'C_IF',
'function': 'C_FUNCTION',
'call': 'C_CALL',
'return': 'C_RETURN'
}
class Parser:
"""
This module handles the parsing of a single .vm file.
The parser provides services for reading a VM command, unpacking the
command into its various components, and providing convenient access
to these components. In addition, the parser ignores all white
space and comments.
"""
def __init__(self, filename):
self.stream = open(filename, 'r')
self.current_command = None
def has_more_lines(self):
"""
Return true if there are more lines in the input file.
"""
pos = self.stream.tell()
res = self.stream.readline() != ''
self.stream.seek(pos)
return res
def advance(self):
"""
Reads the next command from the input and makes
it the current command.
"""
line = self.stream.readline()
while line is not None:
# Strip comments or empty spaces
line = re.sub('//.*', '', line).strip()
# Avoid comments or empty lines
if line != '':
break
line = self.stream.readline()
if line is None:
print "No more commands."
return
self.current_command = line
def command_type(self):
"""
Returns a constant representing the type of the current command.
"""
t = self.current_command.split(' ')[0]
if t in commands.get('arithmetic'):
return 'C_ARITHMETIC'
if t not in commands:
raise ValueError('{} is an invalid command type.'.format(t))
return commands.get(t)
def arg1(self):
"""
Returns the first argument of the current command.
"""
t = self.command_type()
if t == 'C_RETURN':
return
args = self.current_command.split(' ')
if t == 'C_ARITHMETIC':
# Return the command itself.
return args[0]
return args[1]
def arg2(self):
"""
Returns the second argument of the current command.
"""
t = self.command_type()
if t not in ['C_PUSH', 'C_POP', 'C_FUNCTION', 'C_CALL']:
return
return int(self.current_command.split(' ')[2])
def rollback(self):
"""
Rolls back the file pointer to the start of the file.
"""
self.stream.seek(0)
|
998,987 | 8a950f5ff6bd1d92cfd9da0103086f0f16b58526 |
def report(xs):
result=""
total=0
countscore=0
i=0
while i < len(xs):
count=0
names=[]
if isinstance(xs[i], int):
total=total+xs[i]
countscore=countscore+1
i=i+1
if i==len(xs):
break
while isinstance(xs[i], str):
names.append(xs[i])
count=count+1
i=i+1
if i==len(xs):
break
if count>0:
size=count-1
for j in range(0,count-1):
if size>1:
result=result+names[j]+" "
size=size-1
elif size==1:
result=result+names[j]+", "
avg=total/countscore
result=result+"averaged "+str(avg)+"."
return result
print(report(["Jill","Johnson",87,"Billy","Ray","Cyrus",78,"Rita","Yeats",94,"Bobbie","Sue","Palmer",72]))
print(report(["Fay","Hannah", "Rose",58,"Jillian","Murray",45,"Samrita","Kapoor",99]))
|
998,988 | f385f9d901ae3a757d352cddc14da6522f522233 | import discord
import config
import asyncio
import time
from datetime import datetime, timedelta
import pytz
import logging
import random
from managers.CommandsManager import CommandsManager
from managers.DatabaseManager import DatabaseManager
from managers.StatisticsManager import StatisticsManager
from managers.TeamupManager import TeamupManager
from managers.CacheManager import CacheManager
from managers.CalendarsManager import CalendarsManager
from managers.helpers.embeds import Embeds
from discord.ext import tasks
# logging stuff
logger = logging.getLogger('calendar_bot')
logger.setLevel(logging.INFO)
class Bot:
_client = None
_commandsManager = None
_databaseManager = None
_cacheManager = None
_statisticsManager = None
def __init__(self):
self._client = discord.Client(heartbeat_timeout=60, guild_subscriptions=False, fetch_offline_members=False)
self._databaseManager = DatabaseManager(self)
self._cacheManager = CacheManager(self)
self._commandsManager = CommandsManager(self)
self._statisticsManager = StatisticsManager(self)
self._teamupManager = TeamupManager(self, config.bot["teamup_dev_token"])
self._calendarsManager = CalendarsManager(self)
print("✓ Bot initialized")
def run_client(self, token):
if self._client != None:
self._client.run(token)
def backend_log(self, source, msg):
err_str = "[{0} - {1}] {2}".format(source, datetime.now(), msg)
logger.debug(err_str)
# =======================
# PERIODIC CHECKING
# =======================
@tasks.loop(seconds=1314900)
async def periodic_clean_db(self):
self._databaseManager.clean_reminded_events()
@tasks.loop(seconds=100)
async def periodic_update_calendars(self):
# let's skip DatabaseManager and create custom query
cursor = self._databaseManager.get_cursor()
start_time = time.time()
# i dont even know what I'm trying to accomplish
channel_cache = dict()
# Filtering servers/calendars goes like this, after successful update it saves timestamp with variation
# if bot can't reach server/channel/message/teamup and timestamp hasn't been updated for 2 days
# - if server unreachable -> delete server + all calendars
# - if message/teamup unreachable -> delete given calendar
# - if channel unreachable -> delete all calendars in this channel
try:
variation_min = random.randint(8, 10)
time_min_back = (datetime.now() - timedelta(minutes=variation_min))
calendars = cursor.execute("SELECT * FROM calendar WHERE last_update <= ? AND is_alive='TRUE';", (time_min_back, )).fetchall()
except Exception as e:
cursor.close()
self.backend_log("periodic_update_calendars", str(e))
cursor.close()
start_time = time.time()
date_fmt = "%Y-%m-%d"
logger.info("[{0}] updating {1} calendars.".format(datetime.now(), len(calendars)))
i = 0
for calendar in calendars:
# lets wait 15 seconds after every 10 calendars because of the f*cking rate limit
# losing my mind pt. 4
if i > 0 and i % 15 == 0:
logger.debug('[{0}] ===== WAITING FOR 35s ====='.format(datetime.now()))
await asyncio.sleep(35)
logger.debug("[{0}] [{1}] CALENDAR:SERVERID: {2}:{3}".format(datetime.now(), i, calendar["ID"], calendar["server_id"]))
# Let's check if this calendar is a boomer
try:
lup_dt = datetime.strptime(calendar["last_update"], '%Y-%m-%d %H:%M:%S.%f')
last_update_hours = (datetime.now() - lup_dt).total_seconds() / 60.0 / 60.0
calendar_old = False
if last_update_hours >= 48:
calendar_old = True
except Exception as e: # mark it false if you can't parse date
calendar_old = False
# increment now in case we 'continue' the loop
message = None
try:
if self._client == None:
continue
if calendar["channel_id"] not in channel_cache:
try:
channel_cache[calendar["channel_id"]] = await self._client.fetch_channel(calendar["channel_id"])
logger.debug('\t ADDED CACHED CHANNEL')
except Exception as e:
# admin deleted this channel, let's delete all calendars with it
if calendar_old:
self._databaseManager.delete_calendars_by_channel(calendar["channel_id"])
logger.debug("\t DELETE BY CHANNEL")
continue # obv skip
else:
logger.debug('\t USED CACHED CHANNEL')
channel = channel_cache[calendar["channel_id"]]
if channel == None:
if calendar_old:
self._databaseManager.delete_calendars_by_channel(calendar["channel_id"])
logger.debug("\t DELETE BY CHANNEL")
continue
try:
message = await channel.fetch_message(calendar["message_id"])
except Exception as e:
# can't find message, delete calendar
if calendar_old:
self._databaseManager.delete_calendars_by_message(calendar["message_id"])
logger.debug("\t DELETE BY MESSAGE")
continue # obv skip
if message == None:
if calendar_old:
self._databaseManager.delete_calendars_by_message(calendar["message_id"])
logger.debug("\t DELETE BY MESSAGE")
continue
logger.debug("\t MESSAGE FOUND")
i = i + 1
# save people to remind
users_to_dm = []
for reaction in message.reactions:
if str(reaction) == "🖐️":
async for user in reaction.users():
if user != self._client.user:
users_to_dm.append(user)
logger.debug("\t {0} USERS FOUND".format(len(users_to_dm)))
# do teamup magic
calendar_tz = pytz.timezone(calendar["timezone"])
calendar_now = datetime.now().astimezone(calendar_tz)
start_date = calendar_now
end_date = start_date + timedelta(days=7)
teamup_events = await self._teamupManager.get_calendar_events(calendar["teamup_calendar_key"], start_date.strftime(date_fmt), end_date.strftime(date_fmt), calendar["timezone"], None)
if teamup_events != None:
calendar_events = self._calendarsManager.prepare_calendar_data(teamup_events, start_date, end_date, calendar["timezone"])
else:
# Can't fetch events from teamup, skip this calendar (maybe they deleted key)
logger.info("[{0}] periodic_update_calendars(reminding users) - can't fetch teamup data".format(datetime.now()))
continue
# update timestamp for calendar
# Let's add some random variation to spread number of calendars updating every loop
# time will tell if it helped
variation = random.randint(0, 360)
self._databaseManager.update_calendar_timestamp(calendar["ID"], variation)
#
# HANDLING REMINDERS
# - if it takes too long, we can optimize by putting it into `self._calendarsManager.prepare_calendar_data()`
logger.debug("\t CHECKING EVENTS FOR REMINDERS")
for day in calendar_events:
for event in day:
# don't remind all_day events
if event["all_day"]:
continue
event_delta_minutes = (event["start_dt"] - calendar_now).total_seconds() / 60.0
if event_delta_minutes <= calendar["reminder_time"]:
# check if this event has already been reminded
reminded_event = self._databaseManager.get_reminded_event(event["id"], event["version"])
# skip reminded
if reminded_event != None:
continue
for user in users_to_dm:
logger.debug("\t\t SENDING DM - {0}".format(user.id))
await asyncio.sleep(0.3)
try:
dm_channel = user.dm_channel
if dm_channel == None:
dm_channel = await user.create_dm()
await asyncio.sleep(0.3)
event["user"] = user
event["calendar_data"] = calendar
reminder_embed = Embeds.create_reminder_embed(event)
await dm_channel.send(content="", embed=reminder_embed)
except Exception as e:
logger.info("[{0}] periodic_update_calendars(reminding users) - {1}".format(datetime.now(), str(e)))
# save that we reminded this one
self._databaseManager.add_reminded_event(event["id"], event["version"])
events_data = {
"week": calendar_events,
"start_date": start_date,
"end_date": end_date
}
logger.debug("\t CREATING EMBED")
calendar_embed = self._calendarsManager.create_calendar_embed(calendar, events_data)
Embeds.add_footer(calendar_embed, None)
if message != None:
await asyncio.sleep(2)
logger.debug("\t UPDATING MESSAGE")
await message.edit(content="", embed=calendar_embed)
await message.add_reaction("🖐️") # in case admin removed reactions, add it back
except Exception as e:
logger.info("[{0}] periodic_update_calendars(for calendar) - {1}".format(datetime.now(), str(e)))
# log every loop time
loop_time = (time.time() - start_time)
logger.info("[{0}] update took {1}s".format(datetime.now(), round(loop_time, 4)))
# ==============
# Messages
# ==============
async def send_embed(self, channel, embed_data):
if not "type" in embed_data:
embed_data["type"] = "INFO"
embed = Embeds.generate_embed(embed_data)
await channel.send(embed=embed)
async def send_message(self, channel, text):
return await channel.send(text)
def exception_msg(self, error):
return {
"embed": {
"type": "ERROR",
"title": "An error has occured",
"fields": [
{
"name": "Exception",
"value": error
}
]
}
} |
998,989 | c0afff62c00035a2cc7e76f9639526136de9a4c3 | import tables
import scipy
import sys
sys.path.append("../system/")
sys.path.append("../lifting/")
sys.path.append("../restriction/")
sys.path.append("..")
sys.path.append("../..")
sys.path.append("../../..")
import Point.Point as Point
import Solver.NewtonSolver as NewtonSolver
import Solver.GMRESLinearSolver as GMRES
import Solver.ImplicitEulerDirectLinearSolver as ImDirect
import Utils.conditions.probability as probability
#import inv_transform_sampling as inv_transform
#import inv_transform_determ as inv_transform
import inv_transform_sampling as inv_transform
import histogram
import particles
import pde
import precond
if __name__=="__main__":
D = None
Dt = None
N = None
# deze manier van input parameters verwerken staat in Langtangen
while len(sys.argv) > 1 :
option = sys.argv[1]; del sys.argv[1]
if option == "-Dt" :
Dt = float(sys.argv[1]); del sys.argv[1]
print "#Dt: ", Dt, " (from command line)"
elif option == "-N" :
N = int(sys.argv[1]); del sys.argv[1]
print "#N: ", N, " (from command line)"
if D == None:
D = 1./2.
if Dt == None:
Dt = 1e-3
if N == None:
Nlarge = 100000
Nsmall = 1000
seed = 16
# discretization parameters
h=2e-2 # kde mesh width
dx = 0.05
dt = 1e-3
print "nu : ", dx**2/2./D/dt
xL = -1.7
xR = 1.7
grid = scipy.arange(xL+dx/2.,xR,dx)
rho = scipy.ones_like(grid)+\
0.1*scipy.random.uniform(low=-0.5,high=0.5,size=scipy.shape(grid)[0])
rho = rho/(sum(rho)*dx)
print "rho: ", rho
# Fokker Planck for SDE
a = 1
zeta = 0.
alpha = 1.
beta = -1
p = None
fp_sde = None
# Fokker Planck for SDE
lambd = scipy.array([a,D,alpha,beta,zeta])
sampler_param = inv_transform.Sampler.getDefaultParameters()
sampler_param['seed']=seed
lifting = inv_transform.Sampler(sampler_param)
param_histogram = histogram.Histogram.getDefaultParameters()
param_histogram['h']=h
restriction = histogram.Histogram(param_histogram)
param=particles.Particles.getDefaultParameters()
param['Dt'] = Dt
param['Nlarge']=Nlarge
param['Nsmall']=Nsmall
param['dt']=dt
# precond_param=precond.Precond.getDefaultParameters()
# precond_param['Dstar']=1./2.
# precond_param['sigma']=scipy.zeros_like(grid)
# precond_param['kappa']=scipy.zeros_like(grid)
# param['precond']=precond.Precond(precond_param)
param_histogram = histogram.Histogram.getDefaultParameters()
restriction = histogram.Histogram(param_histogram)
fprev = fp_sde
x_prev_sim= None
w_prev_sim = None
# fp_sde = particles.Particles(lifting,restriction,rho,grid,\
# lambd,control = fp_sde, param=param)
fp_sde= particles.Particles(lifting,restriction,rho,grid, lambd, x_prev_sim, w_prev_sim, param=param )
print fp_sde
# print fp_sde2,fp_sde2.control
# CREATING LINEAR SOLVER
gmres_param = GMRES.GMRESLinearSolver.getDefaultParameters()
gmres_param['tol']=1e-8
gmres_param['print']='short'
gmres_param['builtin']=True
linsolv = GMRES.GMRESLinearSolver(gmres_param)
# linsolv2 = GMRES.GMRESLinearSolver(gmres_param)
# CREATING NEWTON SOLVER
newt_param = NewtonSolver.NewtonSolver.getDefaultParameters()
newt_param['rel_tol']=1e-7
newt_param['abs_tol']=1e-7
newt_param['print']='short'
newt_param['max_iter']=1
nsolv = NewtonSolver.NewtonSolver(linsolv,newt_param)
# nsolv2 = NewtonSolver.NewtonSolver(linsolv2,newt_param)
# CREATING POINT
psolver_im = ImDirect.ImplicitEulerDirectLinearSolver()
# psolver_im2 = ImDirect.ImplicitEulerDirectLinearSolver()
# POINT PARAMETERS
point_param = Point.Point.getDefaultParameters()
# point_param['artificial']=[4]
# point_param['artificial_condition']=[probability.condition]
pprev = p
p = Point.Point(fp_sde,nsolv,None,point_param)
p.correct()
# p2 = Point.Point(fp_sde2,nsolv2,psolver_im2,point_param)
# p.correct()
|
998,990 | 69eb4bf441fa935426db03eda7cb237599328b94 | import pygame, sys
import simpy
pygame.init()
(width, height) = (640, 480)
screen = pygame.display.set_mode((width, height))
pygame.display.set_caption("Bird Simulation")
screen.init()
pygame.display.flip()
print("Hello World")
while True:
for event in pygame.event.get():
if event.type == pyagame.QUIT:
sys.exit(0)
else:
print(event)
|
998,991 | bdec0becb56d71ba921f1495e9e2620ff5950c31 | __author__ = 'zhengqin'
class Solution(object):
"""
Question: Write a function, that given two sorted lists of integers as input,
returns a single sorted list with items from both lists with no duplicate elements.
Example:
input: a = {1,2,3}; b = {4,5,6}; output: c = {1,2,3,4,5,6};
input: a = {7,8,9}; b = {1,8,20,24}; output: c = {1,7,8,9,20,24};
input: a = {3,3,4}; b = {4}; output: c = {3,4};
input: a = {1,2,2,3,3,4,5,6,7}; b = {4,5,6,7,8,8,8}; output: c = {1,2,3,4,5,6,7,8};
""" |
998,992 | 39726118bd471231a2cac95f0e15d308212839dc | # Write a function which takes two strings and checks whether the characters in the first string form a subsequence of the characters in the second string. The characters in the first should appear without their order changing in the second string.
def forms_subsequence(str1, str2):
result = ""
str1_obj = {}
for i, e in enumerate(str1):
str1_obj[e] = e
for i in str2:
if str1_obj.get(i, None):
result += i
del str1_obj[i]
if str1 in result:
return True
return False
print(forms_subsequence('abc', 'abracadabra'))
print(forms_subsequence('abc', 'acb')) |
998,993 | ab67e4865beca24295be5b7d68b6bbeaa8aebb34 | class Solution(object):
def isPowerOfFour(self, num):
"""
:type num: int
:rtype: bool
"""
# Three conditions:
# 1. num has to be positive
# 2. num has to contain only one '1' bit (not num&(num-1))
# 3. num's single '1' bit has to be the ones in 0x55555555
return (num > 0) and (not num&(num-1)) and ((num & 0x55555555) == num)
|
998,994 | 4557416a6374c67480b79993c2740b690a9a7250 | # Hugo Colenbrander 27-07-2020
# Given: Three positive integers k, m, and n,
# representing a population containing k+m+n organisms:
# k individuals are homozygous dominant for a factor,
# m are heterozygous, and n are homozygous recessive.
# Return: The probability that two randomly selected mating organisms will
# produce an individual possessing a dominant allele (and thus displaying
# the dominant phenotype).
# Assume that any two organisms can mate.
# amount in population
khomo = 24
mhetero = 30
nrec = 27
# sum of population
total = khomo + mhetero + nrec
# if the first step is homozygote
phomox1 = khomo / total
phomox = (khomo - 1) / (total - 1)
pheterox = mhetero / (total - 1)
probreccessivex = nrec / (total - 1)
# if the first step is heterozygote
probheteroy1 = mhetero / total
phomoy = khomo / (total - 1)
probheteroy = (mhetero - 1) / (total - 1)
probreccessivey = nrec / (total - 1)
# if the first step is reccessive
probreccessivez1 = nrec / total
phomoz = khomo / (total - 1)
probheteroz = mhetero / (total - 1)
probreccessivez = (nrec - 1) / (total - 1)
# add up all the probabilities that a parent is chosen multiplied by
# the probability that it gives the F1 a dominant allel
prob_dominant = phomox1 + (probheteroy1 * phomoy) + (
probheteroy1 * probheteroy * 0.75) + (
probheteroy1 * 0.5 * probreccessivey) + (
probreccessivez1 * phomoz) + (
probreccessivez1 * probheteroz * 0.5)
# find out the probability of any parents making a child with a dominant allel
print(prob_dominant) |
998,995 | c6834feceb556fa08927d171f25e1168f4ae1e0e | from django.shortcuts import render, get_list_or_404, get_object_or_404
from .models import *
from django.utils import timezone
from datetime import datetime, timedelta, date
# Create your views here.\
def index(request):
footer = Footer.objects.all()[0]
recruiting_banner = RecruitingBanner.objects.all()[0]
introduction = Introduction.objects.all()[0]
youtube = Youtube.objects.all()[0]
logos = Logo.objects.all().order_by('order')
key_values = KeyValue.objects.all().order_by('order')[0:3]
context = {
'recruiting_banner': recruiting_banner,
'introduction': introduction,
'youtube': youtube,
'logos': logos,
'key_values': key_values,
'footer':footer,
}
return render(request, 'main/index.html', context)
def about(request):
footer = Footer.objects.all()[0]
content = About.objects.all()[0]
greetings = Greeting.objects.all().order_by('order')
context = {
'content': content,
'footer':footer,
'greetings': greetings,
}
return render(request, 'main/about.html', context)
def activities(request):
footer = Footer.objects.all()[0]
content = Activities.objects.all()[0]
context = {
'content': content,
'footer':footer,
}
return render(request, 'main/activities.html', context)
def join(request):
footer = Footer.objects.all()[0]
content = Join.objects.all()[0]
recruiting_banner = RecruitingBanner.objects.all()[0]
context = {
'recruiting_banner': recruiting_banner,
'content': content,
'footer':footer,
}
return render(request, 'main/join.html', context)
def elements(request):
footer = Footer.objects.all()[0]
elements = Elements.objects.all()[0]
context = {
'elements': elements,
'footer':footer,
}
return render(request, 'main/elements.html', context)
|
998,996 | 3923a78f91b773528b51d013c5a9823b8c55f9ce | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('introduction', views.introductionview, name='introduction'),
path('faq',views.faqview , name='faq'),
path('product',views.productview, name='product'),
] |
998,997 | 05762a0e7189632d17138e054680a6d522ea6e63 | #!/usr/local/bin/python3
import time
import logging
import logging.handlers
import datetime
import pymysql
import traceback
class BaikalDBClient:
def __init__(self, host = '',user = '',passwd = '',db = '',port = 3306,charset = 'utf8'):
self.host = host
self.user = user
self.passwd = passwd
self.db = db
self.port = port
self.charset = charset
self.conn = None
self.cursor = None
self.logger = logging.getLogger('test')
if not self.reconn():
raise Exception("connect error")
def connect(self):
try:
self.conn = pymysql.Connection(
host = self.host,
user = self.user,
password = self.passwd ,
db = self.db,
port = self.port,
charset = self.charset)
self.cursor = self.conn.cursor()
return True
except pymysql.Error as e:
self.logger.error(e)
print(e)
return False
def reconn(self, num = 3, stime = 1):
number = 0
status = False
while not status and number <= num:
try:
self.conn.ping()
status = True
except:
status = self.connect()
number += 1
time.sleep(stime)
return status
def query(self, exesql):
flag = 0
retry = 0
while flag == 0 and retry < 3:
try:
self.cursor.execute(exesql)
self.conn.commit()
flag = 1
except pymysql.Error as e:
self.reconn()
self.logger.error(traceback.format_exc())
self.conn.rollback()
time.sleep(1)
retry = retry + 1
return flag
|
998,998 | fb2822c737732bb309bce1cada5770cc5031990d | class Node(object):
def __init__(self: 'Node', value: object) -> None:
self._value = value
self.next = None
def get_value(self: 'Node') -> object:
return self._value
class Queue(object):
'''An implementation of the queue abstract data type.'''
def __init__(self: 'Queue') -> None:
'''Initialize self as an empty queue.'''
self._front = None
self._tail = None
self._size = 0 # We need to manually track size
def __len__(self: 'Queue') -> int:
'''Return the length of this queue.'''
return self._size
def is_empty(self: 'Queue') -> bool:
'''Return True iff self contains no elements.'''
return self._front is None
def enqueue(self: 'Queue', item: object) -> None:
'''Add item to the end of self.'''
self._size = self._size + 1
if self._tail:
self._tail.next = Node(item) # Wrap item in a Node
self._tail = self._tail.next
return
self._front = Node(item)
self._tail = self._front
def front(self: 'Queue') -> object:
'''Return the element at the front of self.
Precondition: self is not empty.
'''
return self._front.get_value()
def dequeue(self: 'Queue') -> object:
'''Return and remove the element at the front of self.
Precondition: self is not empty.'''
old_front = self._front
self._front = old_front.next
if not self._front: # Just emptied the queue?
self._tail = None
self._size = self._size - 1
return old_front.get_value()
class Stack(object):
'''An implementation of the stack abstract data type.'''
def __init__(self: 'Stack') -> None:
'''Initialize self as an empty stack.'''
self._top = None
self._size = 0
def __len__(self: 'Stack') -> int:
'''Return the length of self.'''
return self._size
def is_empty(self: 'Stack') -> bool:
'''Return True iff self contains no elements.'''
return self._top is None
def push(self: 'Stack', item: object) -> None:
'''Add item to the top of self.'''
old_top = self._top
self._top = Node(item)
self._top.next = old_top
self._size = self._size + 1
def peek(self: 'Stack') -> object:
'''Return the element at the top of self.
Precondition: self is not empty.'''
return self._top.get_value()
def pop(self: 'Stack') -> object:
'''Return and remove the element at the top of self.
Precondition: self is not empty.'''
old_top = self._top
self._top = self._top.next
self._size = self._size - 1
return old_top.get_value()
|
998,999 | c19da4698c630e0f8c89e69f2108ae3123428636 | __author__ = 'micanzhang'
import web
from app.routes import urls
from app.model import DBSession
from app.helper import load_sqla, orm, SQLAStore
# web.py application instance
app = web.application(urls, globals())
app.add_processor(load_sqla)
web.ctx.orm = orm()
if web.config.get('_session') is None:
web.config._session = web.session.Session(app, SQLAStore(DBSession), initializer={'login': 0, 'user': None})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.