index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
23,600 | ee3e96aa65e8c9bbfccfa179d4a71fccc5b2f36b | import requests
import os
import calendar
import datetime
for j in range(100):
url = "http://portal.amfiindia.com/DownloadNAVHistoryReport_Po.aspx?mf="+str(j)+"&tp=1&frmdt=01-Jan-1999&todt=7-Dec-2017"
r = requests.get(url)
data=r.text
handel=open(str(j)+".txt","w",encoding='utf8')
handel.write(data)
file_name=handel.name
handel.close()
import sqlite3 as sql
import logging
import pyparsing as pp
import glob
num_field=pp.Word(pp.nums+".")
logger = logging.getLogger('ftpuploader')
conn=sql.connect("nav.db")
cur=conn.cursor()
data=[]
file_write=open('process.txt','w+')
with open(str(j)+".txt",'r') as file_handel:
for _ in range(8):
next(file_handel)
for j in file_handel.readlines():
data=j.rstrip().split(';')
if len(data)==6 and data[0]!='':
try:
k=num_field.parseString(data[2])
data_2=k[0]
except:
data_2=str('0.0')
try:
k=num_field.parseString(data[3])
data_3=k[0]
except:
data_3=str('0.0')
try:
k=num_field.parseString(data[3])
data_4=k[0]
except:
data_3=str('0.0')
try:
Day_name=calendar.day_name[datetime.datetime.strptime(data[5] , '%d-%b-%Y').weekday()]
#print(Day_name)
sql_ins="insert into scheme_nav_setails(Scheme_Code,Scheme_Name,\
Repurchase_Price,Net_Asset_Value,Sale_Price,record_Date,Day_name) \
values("+data[0]+",'"+str(data[1].replace("'",""))+"',"+data_2+","+data_3+","+data_4+",'"+data[5]+"','"+Day_name+"');"
#print(sql_ins)
cur.execute(sql_ins)
except Exception as e:
file_write.write(sql_ins)
print(data)
print(sql_ins)
logger.exception('Failed: ' + str(e))
conn.commit()
cur.execute("VACUUM scheme_nav_setails")
conn.close()
file_write.close()
#file_name=str(j)+".txt"
print(file_name)
os.remove(file_name)
|
23,601 | 6695997129a82f677313e2c774edcd2efa8bf163 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import argparse
import logging
import os
import random
import numpy as np
import torch
from nltk.stem import PorterStemmer
from pytorch_transformers import BertTokenizer
from pytorch_transformers.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
from pytorch_transformers.modeling_bert import BertForMaskedLM
from eval_utils import eval_ss_scores, evaluation_pipeline_scores
from preprocess_utils import convert_whole_word_to_feature, convert_token_to_feature, convert_sentence_to_token
from read_utils import get_word_map, get_word_count, read_eval_dataset, read_eval_index_dataset
from selection import substitution_selection, lm_score, raw_score_substitutions, substitution_ranking
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser()
parser.add_argument("--device", default="cuda", type=str)
parser.add_argument("--eval_path", default="eval_data/BenchLS.txt", type=str, # required=True,
help="The evaluation data path, includes file name as well!")
parser.add_argument("--bert_model", default="bert-large-uncased-whole-word-masking", type=str, # required=True,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, "
"bert-base-multilingual-cased, bert-base-chinese.")
parser.add_argument("--output_SR_file", default="output/output.txt", type=str, # required=True,
help="The output directory of writing substitution selection.")
parser.add_argument("--word_embeddings", default="crawl-300d-2M-subword.vec", type=str,
help="The path to the word embeddings file")
parser.add_argument("--word_frequency", default="frequency_merge_wiki_child.txt", type=str,
help="The path to the word frequency file")
# Other parameters
parser.add_argument("--cache_dir", default="", type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--max_seq_length", default=128, type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_eval", default=True, action='store_true', help="Whether to run eval on the dev set.")
parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.")
parser.add_argument("--eval_batch_size", default=8, type=int, help="Total batch size for eval.")
parser.add_argument("--num_selections", default=10, type=int, help="Total number of training epochs to perform.")
parser.add_argument("--num_eval_epochs", default=1, type=int, help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion", default=0.1, type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
# parser.add_argument("--no_cuda", action='store_true', help="Whether not to use CUDA when available")
parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus")
parser.add_argument('--seed', type=int, default=42, help="random seed for initialization")
parser.add_argument('--fp16', default=True, action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
# parser.add_argument('--loss_scale', type=float, default=0,
# help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
# "0 (default value): dynamic loss scaling.\n"
# "Positive power of 2: static loss scaling value.\n")
# parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.")
# parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.")
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def extract_context(words, mask_index, window):
""" Extracts 7 words around the content word """
total_length = len(words)
half_window = int(window / 2)
assert 0 <= mask_index < total_length
return words[max(0, mask_index - half_window):min(total_length, mask_index + half_window + 1)]
def main():
args = parser.parse_args()
DEVICE = torch.device(args.device if args.device == "cuda" and torch.cuda.is_available() else "cpu")
print('Using device:', DEVICE)
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if not args.do_eval:
raise ValueError("At least `do_eval` must be True.")
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
# reading the pretrained model
cache_dir = args.cache_dir if args.cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE),
'distributed_{}'.format(args.local_rank))
pretrained_bert_model = BertForMaskedLM.from_pretrained(args.bert_model, cache_dir=cache_dir)
if args.fp16:
pretrained_bert_model.half()
pretrained_bert_model.to(DEVICE)
print("Loading embeddings ...")
word_vec_path = args.word_embeddings
wv_dict, wv_emb = get_word_map(word_vec_path)
print("Loaded. Loading word counts...")
word_count_path = args.word_frequency
word_count = get_word_count(word_count_path)
print("Loaded.")
stemmer_for_matching = PorterStemmer()
SS = []
substitution_words = []
source_words = []
window_context = 11
if args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
file_name = args.eval_path.split('/')[-1][:-4]
if file_name == 'lex.mturk':
eval_examples, mask_words, mask_labels = read_eval_dataset(args.eval_path)
else:
eval_examples, mask_words, mask_labels = read_eval_index_dataset(args.eval_path)
eval_size = len(eval_examples)
logger.info("Running evaluation")
logger.info("Num examples = %d", eval_size)
# disable training mode for BERT
pretrained_bert_model.eval()
for i in range(eval_size):
source_word = mask_words[i]
print("Sentence {} rankings: ".format(i))
tokens, words, position = convert_sentence_to_token(eval_examples[i], args.max_seq_length, tokenizer)
assert len(words) == len(position)
mask_index = words.index(source_word)
mask_context = extract_context(words, mask_index, window_context)
mask_position = position[mask_index]
if isinstance(mask_position, list):
feature = convert_whole_word_to_feature(tokens, mask_position, args.max_seq_length, tokenizer)
else:
feature = convert_token_to_feature(tokens, mask_position, args.max_seq_length, tokenizer)
tokens_tensor = torch.tensor([feature.input_ids]).to(DEVICE)
token_type_ids = torch.tensor([feature.input_type_ids]).to(DEVICE)
attention_mask = torch.tensor([feature.input_mask]).to(DEVICE)
# Predict all tokens
with torch.no_grad():
prediction_scores = pretrained_bert_model(tokens_tensor, token_type_ids, attention_mask)
if isinstance(prediction_scores, tuple):
prediction_scores = prediction_scores[0]
if isinstance(mask_position, list):
predicted_top = prediction_scores[0, mask_position[0]].topk(20)
else:
predicted_top = prediction_scores[0, mask_position].topk(20)
pre_tokens = tokenizer.convert_ids_to_tokens(predicted_top[1].cpu().numpy())
initial_subs_pool = substitution_selection(source_word, pre_tokens, stemmer_for_matching, args.num_selections)
print("\n\n")
print(initial_subs_pool)
SS.append(initial_subs_pool)
source_words.append(source_word)
pre_word = substitution_ranking(source_word, mask_context, initial_subs_pool, wv_dict, wv_emb, word_count,
tokenizer, pretrained_bert_model, mask_labels[i], DEVICE)
substitution_words.append(pre_word)
with open(args.output_SR_file, "a+") as output_sr_file:
potential, precision, recall, F_score = eval_ss_scores(SS, mask_labels)
print("The score of evaluation for substitution selection")
output_sr_file.write(
"\t".join([str(item) for item in [args.num_selections, precision, recall, F_score]]) + "\t")
print(potential, precision, recall, F_score)
precision, accuracy, changed_proportion = \
evaluation_pipeline_scores(substitution_words, source_words, mask_labels)
print("The score of evaluation for full LS pipeline")
print(precision, accuracy, changed_proportion)
output_sr_file.write(str(precision) + '\t' + str(accuracy) + '\n')
if __name__ == "__main__":
main()
|
23,602 | 37e8e9f10f1848a5120b34229884f32baae45601 | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def sumNumbers(self, root):
"""
:type root: TreeNode
:rtype: int
"""
result = []
if (root == None):
return 0
self.find_numbers(root, 0, result)
# result will contain all root to leaf numbers
ans = 0
for i in range(len(result)):
ans += result[i]
return ans
def find_numbers(self, root, item, result):
# if leaf node, add the number to result
if (root.left == None and root.right == None):
item = item * 10 + root.val
result.append(item)
return
item = item * 10 + root.val
if (root.left != None):
self.find_numbers(root.left, item, result)
if (root.right != None):
self.find_numbers(root.right, item, result)
|
23,603 | 510f56a9fdbbeefa98d47c1e34be49d393119157 | import numpy as np
import pandas as pd
import math
from enum import Enum
from collections import namedtuple
from collections import Counter
from operator import itemgetter
from pprint import pformat
from random import randint
from functools import reduce
from scipy.linalg import fractional_matrix_power as power
#не правильно работающие скрипты
#долго работает
"""
class KDTree(object):
def _build_tree(self, objects, k, axis=0):
if not objects:
return None
objects.sort(key=lambda o: o[0][axis])
median_idx = int(len(objects) / 2)
median_point, median_label = objects[median_idx]
median_id = self._id_gen
self._id_gen += 1
next_axis = (axis + 1) % k
#print("point={}, label={}, id={}".format(median_point[:5], median_label, median_id))
return Node(point=median_point,
left=self._build_tree(objects[:median_idx], k, next_axis),
right=self._build_tree(objects[median_idx + 1:], k, next_axis),
axis=axis, label=median_label, id=median_id)
def __init__(self, k, objects=[]):
self._id_gen = 0
self.root = self._build_tree(list(objects), k)
def nearest_neighbors(self, destination, num_neighbors):
bests = [[None, None, float('inf'), None] for i in range(num_neighbors)]
used_ids = set()
for num_best in range(num_neighbors):
stack = []
stack.append(self.root)
while stack:
node = stack.pop(0)
if node is not None:
point, left, right, axis, label, id = node
if id not in used_ids:
#print("id={}, used_ids={}, len(bests)={}".format(id, used_ids, [bests[i][3] for i in range(len(bests))]))
here_sd = square_distance(point, destination)
if here_sd < bests[num_best][2]:
if bests[num_best][3] is not None:
used_ids.remove(bests[num_best][3])
bests[num_best][:] = point, label, here_sd, id
used_ids.add(id)
diff = destination[axis] - point[axis]
close, away = (left, right) if diff <= 0 else (right, left)
stack.append(close)
if diff ** 2 < bests[num_best][2]:
stack.append(away)
#self._recursive_search(self.root, num_best, bests, used_ids, destination)
return bests
"""
def square_distance(a, b, D=None):
if D is None:
D = np.identity(a.shape[0])
c = a - b
s = c.dot(D).dot(c.transpose())
return s
def cos_distance(a, b):
return 1 - (a.dot(b)/(math.sqrt(a.dot(a)) * math.sqrt(b.dot(b))))
class Scaler:
def __init__(self, file=None):
if file is None:
self.mean = []
self.std = []
else:
with open(file) as reader:
self.mean = list(map(float, reader.readline().split(",")))
self.std = list(map(float, reader.readline().split(",")))
def scale(self, arr):
res = arr.copy()
if len(self.mean) == 0:
for j in range(res.shape[1]):
self.mean.append(res[:, j].mean())
if res[:, j].std() > 0:
self.std.append(res[:, j].std())
else:
self.std.append(0.01)
res[:, j] = (res[:, j] - self.mean[j])/self.std[j]
with open("mean_std", "w") as writer:
writer.write(",".join([str(item) for item in self.mean]) + "\n")
writer.write(",".join([str(item) for item in self.std]) + "\n")
else:
for j in range(res.shape[1]):
res[:, j] = (res[:, j] - self.mean[j])/self.std[j]
return res
def train_test_split(Xy, test_size):
test_inds = set()
i = 0
while i < test_size:
k = randint(0, Xy.shape[0] - 1)
if k not in test_inds:
test_inds.add(k)
i += 1
train_inds = set(range(Xy.shape[0])).difference(test_inds)
train_inds = list(train_inds)
test_inds = list(test_inds)
return Xy[train_inds, :], Xy[test_inds, :]
def accuracy(pred_y, test_y):
good = 0
for i in range(len(pred_y)):
if pred_y[i] == test_y[i]:
good += 1
return good/len(pred_y)
def const(r):
return 1
def epan(r):
return 3/4 * (1 - r*r)
def quart(r):
return 15/16 * (1 - r*r)*(1 - r*r)
def trian(r):
return 1 - r
def gauss(r):
return (2 * math.pi)**(-0.5) * math.e**(-0.5*r*r)
class KNN:
def __init__(self):
self.instances = []
def fit(self, X, y):
for i in range(X.shape[0]):
self.instances.append((X[i, :], y[i]))
def nearest_neighbors(self, dest, num_nrs, D):
bests = [[None, float('inf'), None] for i in range(num_nrs)]
for i in range(len(self.instances)):
point, label = self.instances[i]
#dist = square_distance(dest, point, D)
dist = cos_distance(dest, point)
if dist < bests[num_nrs - 1][1]:
bests.append([label, dist, point])
bests.sort(key=lambda item: item[1])
bests.pop()
return bests
def predict_one(self, instance, num_nrs, weight_func, D):
res = self.nearest_neighbors(instance, num_nrs, D)
h = res[-1][1]
points = {}
val_max = -1
ind_max = None
for i in range(len(res)):
if res[i][0] not in points.keys():
points[res[i][0]] = weight_func(res[i][1]/h)
else:
points[res[i][0]] += weight_func(res[i][1]/h)
if points[res[i][0]] > val_max:
val_max = points[res[i][0]]
ind_max = res[i][0]
#print(ind_max)
#return ind_max, [(res[j][0], res[j][1]) for j in range(len(res))]
return ind_max
def predict(self, X, num_nrs, weight_func):
y = []
for i in range(X.shape[0]):
#D = self.dann(X[i, :], num_nrs + 20)
D = np.identity(X.shape[1])
y.append(self.predict_one(X[i, :], num_nrs, weight_func, D))
return y
def LOO(params, X, y, reps=100):
D = np.identity(X.shape[1])
val_max = 0
param_max = None
for param in params:
s = 0
used_inds = set()
for i in range(reps):
ind = randint(0, X.shape[0])
if ind not in used_inds:
used_inds.add(ind)
X1 = np.vstack((X[:ind, :], X[ind + 1:, :]))
y1 = np.hstack((y[:ind], y[ind + 1:]))
x_test = X[ind, :]
y_test = y[ind]
est = KNN()
est.fit(X1, y1)
if est.predict_one(x_test, param[0], param[1], D) == y_test:
s += 1
print("s", s)
print("param", param)
print("val_max", val_max)
if s > val_max:
val_max = s
param_max = param
return val_max, param_max
if __name__ == "__main__":
"""
#testing
#Xy = np.loadtxt("learn.csv", delimiter=",", skiprows=1)[:, 2:]
Xy = np.load("Xy.npy")
Xy_train, Xy_test = train_test_split(Xy, 10)
X_train = Xy_train[:, :-1]
y_train = Xy_train[:, -1]
#scaler = Scaler()
#X_train = scaler.scale(X_train)
#corr_features = list(map(int, open("corr_features").readline().split(",")))
#X_train = X_train[:, corr_features]
est = KNN()
est.fit(X_train, y_train)
X_test = Xy_test[:, :-1]
y_test = Xy_test[:, -1]
#X_test = scaler.scale(X_test)
#X_test = X_test[:, corr_features]
pred_y = est.predict(X_test, 10, trian)
for i in range(len(pred_y)):
print(y_test[i], pred_y[i])
print(accuracy(pred_y, y_test))
"""
"""
# leave one out
Xy = np.loadtxt("learn.csv", delimiter=",", skiprows=1)[:, 1:]
#Xy, _ = train_test_split(Xy, test_size=1000)
print("Xy.shape", Xy.shape)
X = Xy[:, :-1]
scaler = Scaler()
X = scaler.scale(X)
#corr_features = list(map(int, open("corr_features").readline().split(",")))
#X = X[:, corr_features]
y = Xy[:, -1]
print("X.shape", X.shape)
params = []
for num_nrs in [5, 15, 25, 50, 100, 150]:
for func in [epan, const, gauss, quart, trian]:
params.append((num_nrs, func))
print(LOO(params, X, y))
"""
#get answer
Xy = np.loadtxt("learn.csv", delimiter=",", skiprows=1)[:, 1:]
X_train, y_train = Xy[:, :-1], Xy[:, -1]
#with open("useful_columns") as reader:
# useful_cols = list(map(int, reader.readline().split(",")))
#X_train = X_train[:, useful_cols[:-1]]
scaler = Scaler()
X_train = scaler.scale(X_train)
est = KNN()
est.fit(X_train, y_train)
Xid = np.loadtxt("test.csv", delimiter=",", skiprows=1)
#np.save("Xid.npy", Xid)
#Xid = np.load("Xid.npy")
X_test, ids = Xid[:, 1:], Xid[:, 0]
#X_test = X_test[:, useful_cols[:-1]]
X_test = scaler.scale(X_test)
pred_y = est.predict(X_test, 25, quart)
with open("answer", "w") as writer:
writer.write("id,label\n")
for i in range(len(pred_y)):
writer.write(str(int(ids[i])) + "," + str(int(pred_y[i])) + "\n")
|
23,604 | 4d530b0bc3f2c9e082a8cbcac7e3bc3e7ae9c8c3 | import cv2
import numpy as np
import os
# 네트워크 불러오기
# cv2.dnn.readNet : 기존에 학습된 모델(네트워크)을 불러와 실행하는 함수
# 입력 : model(*.caffemodel 등), config(*.config 등), ... 등
# model : 훈련된 가중치를 저장하고 있는 이진 파일
# config : 네트워크 구성을 저장하고 있는 텍스트 파일
facenet = cv2.dnn.readNet('models/deploy.prototxt', 'models/res10_300x300_ssd_iter_140000.caffemodel')
# with_dir : 이미지(마스크 착용) 경로
# without_dir : 이미지(마스크 미착용) 경로
with_dir = os.path.join('raw_data/with_mask2')
without_dir = os.path.join('raw_data/without_mask1')
# 이미지(마스크 착용) 경로 디렉토리 내 파일 수 출력
# 이미지(마스크 미착용) 경로 디렉토리 내 파일 수 출력
print('total training withmask images:', len(os.listdir(with_dir)))
print('total training withoutmask images:', len(os.listdir(without_dir)))
# withimgnum : 이미지(마스크 착용) 경로 디렉토리 내 파일 수
# withoutimgnum : 이미지(마스크 미착용) 경로 디렉토리 내 파일 수
withimgnum = len(os.listdir(with_dir))
withoutimgnum = len(os.listdir(without_dir))
# with_files : 이미지(마스크 착용) 경로 디렉토리 내 파일명을 저장한 리스트
# without_files : 이미지(마스크 미착용) 경로 디렉토리 내 파일명을 저장한 리스트
with_files = os.listdir(with_dir)
without_files = os.listdir(without_dir)
# with_files에서 인덱스(0, 1, ...)와 값(파일명1, 파일명2, ...)을 함께 불러와 출력
for i in enumerate(with_files):
print(i) |
23,605 | 9a98e3366582b5c799a8e6365645c254bcf41089 | # -*- coding:utf-8 -*-
import smtplib
from email.mime.text import MIMEText
from core import *
import time
def mailSend(msg):
HOST = 'mail.monitorapp.com'
me = 'shsim@monitorapp.com'
you = 'shsim@monitorapp.com'
contents = msg
now = time.localtime()
s = "%04d-%02d-%02d" % (now.tm_year, now.tm_mon, now.tm_mday)
msg = MIMEText(contents, _charset='euc-kr')
msg['Subject'] = u'Kisa 취약점 업데이트 '.encode('euc-kr')+str(s)
msg['From'] = me
msg['To'] = you
s = smtplib.SMTP(HOST)
s.ehlo() # say Hello
s.starttls() # TLS 사용시 필요
# s.login(me, '1234asdf')
s.sendmail(me, you, msg.as_string()) # [you] --> error
s.quit()
def send_mail():
data = get_data()
if data['status'] == "new":
text = "[+]New notice!!\n\n"
text += data['text']+"\n"
text += data['url']+"\n\n\n"
text += get_form(data['url']).decode('utf-8')
print (text)
mailSend(text)
send_mail()
|
23,606 | 60e9d5bf045f21883476e6f95d7bfb55bbca47d8 | import numpy as np
input_data = np.array([1,2,3])
weights = np.array([0, 2, 1])
target = 0
# Calculate the predictions: preds
preds = (input_data * weights).sum()
# Calculate the error: error
error = preds - target
# Calculate the slope: slope
slope = 2 * input_data * error
# Print the slope
print(slope)
|
23,607 | f77cc44b2f3a62978b84f87df6efbcb1fb560520 | from django.db import models
'''
Classes for everything in the backend, this are gonna be A LOT of models.
'''
class Country(models.Model):
def __str__(self):
return self.name
name = models.CharField(max_length=45)
class ArmedForce(models.Model):
name = models.CharField(max_length=45)
country = models.ForeignKey(Country,on_delete=models.DO_NOTHING)
date_established = models.DateField()
class Manufacturer(models.Model):
def __str__(self):
return self.name
name = models.CharField(max_length = 45)
country = models.ForeignKey(Country, on_delete=models.DO_NOTHING)
'''
Base class for manufactured stuff
'''
class ManufacturedThing(models.Model):
def __str__(self):
return self.name
name = models.CharField(max_length=45)
manufacturer = models.ForeignKey(Manufacturer, on_delete=models.DO_NOTHING)
# variant_of = models.OneToOneField(self.Thing ,on_delete=models.DO_NOTHING)
variant = models.CharField(max_length=45)
weight = models.FloatField()
length = models.FloatField()
width = models.FloatField()
height = models.FloatField()
start_production = models.DateField()
end_production = models.DateField()
in_service = models.DateField()
out_service = models.DateField()
in_use_by = models.ManyToManyField(ArmedForce)
'''
Subclasses of manufactured stuff
'''
class Projectile(ManufacturedThing):
caliber = models.CharField(max_length=45)
type = models.CharField(max_length=45) # Make own class
class Weapon(ManufacturedThing):
round = models.ForeignKey(Projectile, on_delete=models.DO_NOTHING)
barrel_length = models.FloatField()
class Engine(ManufacturedThing):
displacement = models.IntegerField()
class Type(models.Model):
type_string = models.CharField(max_length=45)
class WarMachine(ManufacturedThing):
armament = models.ManyToManyField(Weapon, on_delete=models.DO_NOTHING)
crew = models.IntegerField()
type = models.ManyToManyField(Type, on_delete=models.DO_NOTHING)
class Motorized(ManufacturedThing):
engine = models.ManyToManyField(Engine)
empty_weight = models.FloatField()
loaded_weight = models.FloatField()
class Aircraft(Motorized, WarMachine):
service_ceiling = models.FloatField()
wing_area = models.FloatField()
class Ships(Motorized, WarMachine):
commissioned = models.DateField()
decommissioned = models.DateField() # need to find way for multiple instances of com/decom
draft = models.FloatField()
class Vehicles(Motorized, WarMachine):
wheelbase = models.FloatField()
track = models.FloatField()
|
23,608 | 3b7e9b9811d57a0f7e415df5a151ac6eeb3612f5 | # Generated by Django 3.2.7 on 2021-09-01 17:41
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, verbose_name='عنوان')),
('description', models.TextField(verbose_name='توضیحات')),
('slug', models.SlugField(max_length=100, unique=True, verbose_name='اسلاگ')),
('thumbnail', models.ImageField(upload_to='images', verbose_name='عکس')),
('publish', models.DateField(default=django.utils.timezone.now, verbose_name='منتشر شده')),
('created', models.DateField(auto_now_add=True, verbose_name='ساخته شده')),
('updated', models.DateField(auto_now=True, verbose_name='ساخته شده')),
('status', models.CharField(choices=[('d', 'draft'), ('p', 'published')], max_length=1, verbose_name='وضعیت')),
],
),
]
|
23,609 | acad3537477cad959bc4077590e3c171a122a7ba | import math
def PrimeSum(m, n):
sum = 0
if m == 1:
m += 1
for i in range(m, n + 1):
isPrimer = True
for j in range(2, int(math.sqrt(i) + 1)):
if i % j == 0:
isPrimer = False
break
if isPrimer:
sum += i
return sum
m,n=input().split()
m = int(m)
n = int(n)
print(PrimeSum(m,n)) |
23,610 | 562687e970f53a90ea74a6bb3490b8ec8045ada0 | from sqlalchemy import Column, Integer, Enum, ForeignKey
from sqlalchemy.orm import relationship
from slackwolf.db import Base
from slackwolf.roles.types import RoleTypes
class GameUser(Base):
"""Game-User association entity"""
__tablename__ = 'game_user'
game_id = Column(Integer, ForeignKey('game.id'), primary_key=True)
user_id = Column(Integer, ForeignKey('user.id'), primary_key=True)
game = relationship('Game', back_populates='users')
user = relationship('User', back_populates='games')
role = Column(Enum(RoleTypes), default=RoleTypes.VILLAGER, nullable=False)
|
23,611 | b01c45b9e192e56c51972ede9ecda52d941aad2d | import pvporcupine
import struct
import pyaudio
import assistant.pushtotalk as pushtotalk
PROJECT_ID = 'hey-lumi'
DEVICE_MODEL_ID = 'hey-lumi-hey-lumi-e75s95'
CREDENTIALS_PATH = './credentials.json'
MODEL_PATH = './hey_lumi.ppn'
handle = pvporcupine.create(keyword_paths=[MODEL_PATH])
pa = pyaudio.PyAudio()
audio_stream = pa.open(
rate = handle.sample_rate,
channels = 1,
format = pyaudio.paInt16,
input = True,
frames_per_buffer = handle.frame_length)
try:
print('started')
while True:
pcm = audio_stream.read(handle.frame_length)
pcm = struct.unpack_from("h" * handle.frame_length, pcm)
keyword_index = handle.process(pcm)
if keyword_index >= 0:
# detected
pushtotalk.main(
project_id = PROJECT_ID,
device_model_id = DEVICE_MODEL_ID,
credentials = CREDENTIALS_PATH,
once = True
)
pass
finally:
if handle is not None:
handle.delete()
if audio_stream is not None:
audio_stream.close()
if pa is not None:
pa.terminate() |
23,612 | da4abb5b3c73059722a5f8d405743be5464ffe3b | # @ProjectName: beer_server
# @FileName: custom_model_view_set.py
# @Author: www
# @Time: 2021/11/24 下午10:16
from rest_framework import generics, mixins
from rest_framework.viewsets import GenericViewSet
class MyListRetrieveDestroyModelViewSet(mixins.ListModelMixin, mixins.RetrieveModelMixin, mixins.DestroyModelMixin,
GenericViewSet):
pass
|
23,613 | e52919bc6b9869bd0e37524def43c098bc1f0cf7 | '''
Created on Feb 20, 2018
@author: esthe
'''
##US12 - Pair Programming
##Mother less than 60 Father less than 80
from dateutil.relativedelta import relativedelta
def isValidFatherAge(childBirthDate, fatherBirthDate):
ageDifference = relativedelta(childBirthDate, fatherBirthDate).years
if ageDifference >= 80:
return False
else:
return True
def isValidMotherAge(childBirthDate, motherBirthDate):
ageDifference = relativedelta(childBirthDate, motherBirthDate).years
if ageDifference >= 60:
return False
else:
return True
|
23,614 | d24850aee3b5907ec0cd70d3836b1c9ec5872187 | """
Calls the Turbomole executable.
"""
import os
import re
from decimal import Decimal
from pathlib import Path
from typing import Any, Dict, Optional, Tuple
from qcelemental.models import AtomicResult, Provenance
from qcelemental.util import safe_version, which
from ...util import execute, temporary_directory
from ..model import ProgramHarness
from .define import execute_define, prepare_stdin
from .harvester import harvest
from .methods import KEYWORDS, METHODS
class TurbomoleHarness(ProgramHarness):
_defaults = {
"name": "Turbomole",
"scratch": True,
"thread_safe": False,
"thread_parallel": False,
"node_parallel": True,
"managed_memory": True,
}
version_cache: Dict[str, str] = {}
@staticmethod
def found(raise_error: bool = False) -> bool:
return which(
"define",
return_bool=True,
raise_error=raise_error,
raise_msg="Please install via http://www.cosmologic.de/turbomole/home.html",
)
def get_version(self) -> str:
which_prog = which("define")
if which_prog not in self.version_cache:
# We use basically a dummy stdin as we dont want to pipe any real
# input into define. We only want to parse the version number from
# the string.
with temporary_directory(suffix="_define_scratch") as tmpdir:
tmpdir = Path(tmpdir)
stdout = execute_define("\n", cwd=tmpdir)
# Tested with V7.3 and V7.4.0
version_re = re.compile("TURBOMOLE (?:rev\. )?(V.+?)\s+")
mobj = version_re.search(stdout)
version = mobj[1]
self.version_cache[which_prog] = safe_version(version)
return self.version_cache[which_prog]
def compute(self, input_model: "AtomicInput", config: "TaskConfig") -> "AtomicResult":
self.found(raise_error=True)
job_inputs = self.build_input(input_model, config)
success, dexe = self.execute(job_inputs)
# TODO: handle input errors?! But then define probably already crashed...
# if 'There is an error in the input file' in dexe["stdout"]:
# raise InputError(dexe["stdout"])
if success:
dexe["outfiles"]["stdout"] = dexe["stdout"]
dexe["outfiles"]["stderr"] = dexe["stderr"]
return self.parse_output(dexe["outfiles"], input_model)
def build_input(
self, input_model: "AtomicInput", config: "TaskConfig", template: Optional[str] = None
) -> Dict[str, Any]:
turbomolrec = {"infiles": {}, "outfiles": {"control": "control"}, "scratch_directory": config.scratch_directory}
# Handle molecule
# TODO: what's up with moldata? Do I need it?
coord_str, moldata = input_model.molecule.to_string(dtype="turbomole", return_data=True)
# Prepare stdin for define call
model = input_model.model
# geeopt will hold the for which to calculate the gradient.
# 'x' corresponds to the ground state, 'a 1' would be the GS too.
# 'a1 2' would be the 1st excited state of the irreducible group A1.
# Right now only GS are supported, so this is hardcoded as 'x'.
geoopt = "x" if input_model.driver.derivative_int() > 0 else ""
stdin, subs = prepare_stdin(
model.method,
model.basis,
input_model.keywords,
input_model.molecule.molecular_charge,
input_model.molecule.molecular_multiplicity,
geoopt,
)
with temporary_directory(suffix="_define_scratch") as tmpdir:
tmpdir = Path(tmpdir)
with open(tmpdir / "coord", "w") as handle:
handle.write(coord_str)
stdout = execute_define(stdin, cwd=tmpdir)
# The define scratch will be populated by some files that we want to keep
to_keep = "basis auxbasis coord control alpha beta mos".split()
for fn in to_keep:
full_fn = tmpdir / fn
if not full_fn.exists():
continue
with open(full_fn) as handle:
turbomolrec["infiles"][fn] = handle.read()
env = os.environ.copy()
env["PARA_ARCH"] = "SMP"
env["PARNODES"] = str(config.ncores)
env["SMPCPUS"] = str(config.ncores)
# TODO: set memory
turbomolrec["environment"] = env
keywords = input_model.keywords
ri_calculation = any([keywords.get(ri_kw, False) for ri_kw in KEYWORDS["ri"]])
# Set appropriate commands. We always need a reference wavefunction
# so the first command will be dscf or ridft.
commands = ["ridft"] if ri_calculation else ["dscf"]
# ricc2 will also calculate the gradient
if model.method in METHODS["ricc2"]:
commands.append("ricc2")
# Gradient calculation for DFT/HF
elif input_model.driver.derivative_int() == 1:
grad_command = "rdgrad" if ri_calculation else "grad"
commands.append(grad_command)
elif input_model.driver.derivative_int() == 2:
freq_command = "aoforce"
commands.append(freq_command)
# Add
# $noproj
# $nprhessian file=nprhessian
# to control file.
turbomolrec["outfiles"]["hessian"] = "nprhessian"
if input_model.driver.derivative_int() == 1:
turbomolrec["outfiles"]["gradient"] = "gradient"
command = ["; ".join(commands)]
turbomolrec["command"] = command
# TODO: check if the chosen commands are available with which()?
return turbomolrec
def execute(
self, inputs: Dict[str, Any], *, extra_outfiles=None, extra_commands=None, scratch_name=None, timeout=None
) -> Tuple[bool, Dict]:
success, dexe = execute(
inputs["command"],
inputs["infiles"],
inputs["outfiles"],
shell=True,
# TODO: scratch_messy?
# scratch_messy=False,
)
return success, dexe
def parse_output(
self, outfiles: Dict[str, str], input_model: "AtomicInput"
) -> "AtomicResult": # lgtm: [py/similar-function]
stdout = outfiles.pop("stdout")
# nwmol, if it exists, is dinky, just a clue to geometry of nwchem results
qcvars, gradient, hessian = harvest(input_model.molecule, stdout, **outfiles)
if gradient is not None:
qcvars["CURRENT GRADIENT"] = gradient
if hessian is not None:
qcvars["CURRENT HESSIAN"] = hessian
retres = qcvars[f"CURRENT {input_model.driver.upper()}"]
if isinstance(retres, Decimal):
retres = float(retres)
output_data = input_model.dict()
output_data["extras"]["outfiles"] = outfiles
output_data["properties"] = {}
output_data["provenance"] = Provenance(creator="Turbomole", version=self.get_version(), routine="turbomole")
output_data["return_result"] = retres
output_data["stdout"] = stdout
output_data["success"] = True
return AtomicResult(**output_data)
|
23,615 | 99711009126d7154cb9e5768e4e84407a160f1a3 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-08 14:11
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hopper', '0009_auto_20170108_1409'),
]
operations = [
migrations.AlterField(
model_name='event',
name='guidebook_desc',
field=models.TextField(blank=True, default=b''),
),
migrations.AlterField(
model_name='event',
name='requirements',
field=models.TextField(blank=True, default=b''),
),
migrations.AlterField(
model_name='event',
name='runners',
field=models.TextField(blank=True, default=b''),
),
]
|
23,616 | 1975029d6a73ca4543edeec7acc222238f7718c7 | import requests
from bs4 import BeautifulSoup
import re
import get_catalog
"""获得盐选小说排行榜页面"""
def get_page(url, headers):
html = requests.get(url, headers=headers).text
soup = BeautifulSoup(html, 'lxml')
html = soup.prettify()
#with open("zhihu_salt.html", 'w', encoding="utf-8") as f:
# f.write(html)
return html
"""处理小说排行榜, 并将每部小说标题及其链接传入get_catalog作进一步处理"""
def parse_html(html):
soup = BeautifulSoup(html, 'lxml')
arr = []
titles = soup.find_all(class_="VerticalMiddle-root-g9oA5 ProductTitleTag-root-mWWpm ProductCell-titleTag-8AuZ3")
for title in titles:
for t in title.next_siblings:
if(len(t.string) < 10):
pass
else:
title = t.string[13: -12]
title.replace(" ", "")
title.replace("\n", "")
data = {
'title': title,
'link': ''
}
arr.append(data)
links = soup.find_all(class_="ProductCell-root-3LLcu InfiniteData-autoCell-hFmUN")
i = -1
for link in links:
if i < 0:
i += 1
else:
result = re.match('<a class="ProductCell-root-3LLcu InfiniteData-autoCell-hFmUN" href=".*?">', str(link))
arr[i]['link'] = result.group()[68:-2]
i += 1
get_catalog.get_catalog(arr)
if __name__ == '__main__':
headers = get_catalog.get_headers()
html = get_page("https://www.zhihu.com/xen/market/vip/remix-album", headers)
#with open("zhihu_salt.html", 'r', encoding='utf-8') as f:
#html = f.read()
parse_html(html)
|
23,617 | 3608d02ab44c3302d82673da227cdee243a6613a | import random as rd
print(rd.random()) # Random number between 0-1
print(rd.randint(1, 10)) # Random number between 1-10
print(rd.randrange(start=2))
list1 = [1, 2, 3, 4, 5]
print(rd.shuffle(list1)) # shuffle list
print(list1)
list2 = ['John', 'Castor', 'Troy']
print(rd.choice(list2)) # pick a random item from list
|
23,618 | 0fb7e9bcb7e127f9d28fe3c07deccd77571a6651 | # https://leetcode.com/problems/find-all-duplicates-in-an-array/description/
class Solution(object):
def findDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
numset = set()
dupes = set()
for num in nums:
if num in numset:
dupes.add(num)
else:
numset.add(num)
return list(dupes) |
23,619 | b68e4f72feb60d64ce8e7fbe05bdcc33b99af178 | """
Say you have a module (say, operator) and you want to expose everything in it to a webservice.
This shows you how you can do this in a few lines.
WARNING: DO THIS AT HOME (but never on an actual prod server).
--> Reason is, since you're giving access to EVERYTHING, there's ways to use the power of python to backtrack into
the actual system and make damage.
The usual way to wrap a module, function, or object and expose to a webservice is to define an explicit list of
attributes that can be access, which ensures that nothing else can. It's possible to use regular expressions to get more
be more expressive, but if you do so, be careful not to expose something you don't want to! A good practice there is
to not allow anything starting with a "_" or ending with a "*" (which will give access to everything under an attribute
Run the web service and try things like:
http://0.0.0.0:5000/os?attr=path.isdir&s=/
http://0.0.0.0:5000/os?attr=path.isfile&path=not_existing_file.txt
etc.
"""
import os
from flask import jsonify
from py2api.py2rest.obj_wrap import WebObjWrapper
from py2api.py2rest.input_trans import InputTrans
from py2api.output_trans import OutputTrans
from py2api.py2rest.app_maker import mk_app, dflt_run_app_kwargs
os_path_wrap = WebObjWrapper(obj_constructor=os, # if not a callable, the wrapper wraps always the same object
obj_constructor_arg_names=[], # no construction, so no construction args
permissible_attr='path\..*', # allows all attributes below path.
input_trans=InputTrans.from_argname_trans_dict({}), # standard input_trans
output_trans=OutputTrans(trans_spec=lambda x: jsonify({'_result': x})),
name='/os',
debug=0)
app = mk_app(app_name='example', routes=[os_path_wrap])
if __name__ == "__main__":
app.run(**dflt_run_app_kwargs())
|
23,620 | 4c1d98152929cf7a982c1f22106f23faa73f92b3 | # RGB
def color_code_to_rbg_tuple(color_code):
"""
e.g. "#FF0000" => (255, 0, 0)
"""
code_r = color_code[1:3]
code_g = color_code[3:5]
code_b = color_code[5:7]
r = int(code_r, 16)
g = int(code_g, 16)
b = int(code_b, 16)
return r, g, b
# BGR for cv2
def color_code_to_bgr_tuple(color_code):
rgb = color_code_to_rbg_tuple(color_code)
return rgb[2], rgb[1], rgb[0]
def __test():
col = color_code_to_rbg_tuple("#FF0000")
print(col)
if __name__ == '__main__':
__test() |
23,621 | 9ff4ed1debd96bf3636123bcaead8a15eb5fa8a2 | {"filter":false,"title":"tests_forms.py","tooltip":"/hire/tests_forms.py","undoManager":{"mark":1,"position":1,"stack":[[{"start":{"row":0,"column":0},"end":{"row":3,"column":0},"action":"insert","lines":["from django.test import TestCase","","# Create your tests here.",""],"id":1}],[{"start":{"row":3,"column":0},"end":{"row":4,"column":0},"action":"insert","lines":["",""],"id":2}]]},"ace":{"folds":[],"scrolltop":0,"scrollleft":0,"selection":{"start":{"row":4,"column":0},"end":{"row":4,"column":0},"isBackwards":false},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":0},"timestamp":1569829230343,"hash":"87e6d385d215939dfa3dd7689f5fbd7a71aaea0a"} |
23,622 | 769b4be41f43aa46717d65eb08be5e74403f0e39 | """
Домашнее задание №1
Использование библиотек: ephem
* Установите модуль ephem
* Добавьте в бота команду /planet, которая будет принимать на вход
название планеты на английском, например /planet Mars
* В функции-обработчике команды из update.message.text получите
название планеты (подсказка: используйте .split())
* При помощи условного оператора if и ephem.constellation научите
бота отвечать, в каком созвездии сегодня находится планета.
"""
import logging
import ephem
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
import settings
logging.basicConfig(format='%(name)s - %(levelname)s - %(message)s',
level=logging.INFO,
filename='bot.log')
PROXY = {
'proxy_url': settings.PROXY_URL,
'urllib3_proxy_kwargs': {
'username': settings.PROXY_USERNAME,
'password': settings.PROXY_PASSWORD
}
}
def greet_user(update, context):
text = 'Вызван /start'
print(text)
update.message.reply_text(text)
def talk_to_me(update, context):
user_text = update.message.text
print(user_text)
update.message.reply_text(user_text)
def planet_user(update, context): #функция показывает в каком созвездии планета без команды
date_planet = '2121/03/05'
if update.message.text == 'mars':
planet = ephem.Mars(date_planet)
elif update.message.text == 'venus':
planet = ephem.Venus(date_planet)
elif update.message.text == 'jupiter':
planet = ephem.Jupiter(date_planet)
elif update.message.text == 'earth':
planet = ephem.Earth(date_planet)
else:
update.message.reply_text('что-то пошло не так')
return
const_planet = ephem.constellation(planet)
update.message.reply_text(f'{date_planet} Планета {update.message.text.upper()} в созвездии {const_planet[-1]}')
def get_planet(update, context): #функция показывает в каком созвездии планета с командой /planet
date_user = '2121/03/05' #input('Введите дату в формате год/месяц/день: ')
if context.args[0] == 'mars':
planetname = ephem.Mars(date_user)
elif context.args[0] == 'venus':
planetname = ephem.Venus(date_user)
elif context.args[0] == 'jupiter':
planetname = ephem.Jupiter(date_user)
elif context.args[0] == 'earth':
planetname = ephem.Earth(date_user)
elif context.args[0] == 'moon':
planetname = ephem.Moon(date_user)
elif context.args[0] == 'sun':
planetname = ephem.Sun(date_user)
elif context.args[0] == 'uranus':
planetname = ephem.Uranus(date_user)
elif context.args[0] == 'pluto':
planetname = ephem.Pluto(date_user)
else:
update.message.reply_text('ошибочка вышла')
return
constel_planet = ephem.constellation(planetname)
update.message.reply_text(f'{date_user} Планета {context.args[0].upper()} в созвездии {constel_planet[-1]}')
def main():
mybot = Updater(settings.API_KEY, request_kwargs=PROXY, use_context=True)
dp = mybot.dispatcher
dp.add_handler(CommandHandler("start", greet_user))
dp.add_handler(CommandHandler("planet", get_planet))
dp.add_handler(MessageHandler(Filters.text, planet_user))
dp.add_handler(MessageHandler(Filters.text, talk_to_me))
logging.info('Бот стартовал')
mybot.start_polling()
mybot.idle()
if __name__ == "__main__":
main() |
23,623 | 9c2efcfcdfc760ae18a1e8f367bdb040ec8dc384 | # Copyright 2016 Erle Robotics, LLC
#
# A relevant part of the code has been written taking inpiration
# from ROS 1 ros_comm package attributed to Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rclpy
import time
import os
import errno
import sys
from optparse import OptionParser
NAME='rosnode'
# TODO implement
def _rosnode_cmd_ping(argv):
print("NOT IMPLEMENTED\n")
sys.exit(0)
# TODO implement
def _rosnode_cmd_info(argv):
print("NOT IMPLEMENTED\n")
sys.exit(0)
# TODO implement
def _rosnode_cmd_machine(argv):
print("NOT IMPLEMENTED\n")
sys.exit(0)
# TODO implement
def _rosnode_cmd_cleanup(argv):
print("NOT IMPLEMENTED\n")
sys.exit(0)
# TODO implement
def _rosnode_cmd_kill(argv):
print("NOT IMPLEMENTED\n")
sys.exit(0)
def _rosnode_cmd_list(argv):
"""
Implements rosnode 'list' command.
"""
args = argv[2:]
parser = OptionParser(usage="usage: %prog list", prog=NAME)
parser.add_option("-u",
dest="list_uri", default=False,
action="store_true",
help="list XML-RPC URIs (NOT IMPLEMENTED)")
parser.add_option("-a","--all",
dest="list_all", default=False,
action="store_true",
help="list all information (NOT IMPLEMENTED)")
(options, args) = parser.parse_args(args)
namespace = None
if len(args) > 1:
parser.error("invalid args: you may only specify one namespace")
elif len(args) == 1:
#namespace = rosgraph.names.script_resolve_name('rostopic', args[0])
pass
# In ROS 1, the rosnode list invocation was performed using:
# rosnode_listnodes(namespace=namespace, list_uri=options.list_uri, list_all=options.list_all)
result = rclpy.get_node_names()
for node in result:
print(node)
def _fullusage(return_error=True):
"""
Prints rosnode usage information.
@param return_error whether to exit with error code os.EX_USAGE
"""
print("""rosnode is a command-line tool for printing information about ROS Nodes.
Commands:
\trosnode ping\ttest connectivity to node (NOT IMPLEMENTED)
\trosnode list\tlist active nodes
\trosnode info\tprint information about node (NOT IMPLEMENTED)
\trosnode machine\tlist nodes running on a particular machine or list machines
\trosnode kill\tkill a running node (NOT IMPLEMENTED)
\trosnode cleanup\tpurge registration information of unreachable nodes (NOT IMPLEMENTED)
Type rosnode <command> -h for more detailed usage, e.g. 'rosnode ping -h'
""")
if return_error:
sys.exit(getattr(os, 'EX_USAGE', 1))
else:
sys.exit(0)
def rosnodemain(argv=None):
"""
Prints rosnode main entrypoint.
@param argv: override sys.argv
@param argv: [str]
"""
if argv == None:
argv = sys.argv
if len(argv) == 1:
_fullusage()
try:
command = argv[1]
if command == 'ping':
sys.exit(_rosnode_cmd_ping(argv) or 0)
elif command == 'list':
sys.exit(_rosnode_cmd_list(argv) or 0)
elif command == 'info':
sys.exit(_rosnode_cmd_info(argv) or 0)
elif command == 'machine':
sys.exit(_rosnode_cmd_machine(argv) or 0)
elif command == 'cleanup':
sys.exit(_rosnode_cmd_cleanup(argv) or 0)
elif command == 'kill':
sys.exit(_rosnode_cmd_kill(argv) or 0)
elif command == '--help':
_fullusage(False)
else:
_fullusage()
except KeyboardInterrupt:
pass
|
23,624 | 769707f9802ee84922641b7ac0162cf3c0dbfa4e | #!/usr/bin/env python
# MIT License
#
# Copyright (c) 2016 Alexis Seigneurin
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# See UI proxy issues in the PR description: https://github.com/apache/spark/pull/17455
# Original code: https://github.com/aseigneurin/spark-ui-proxy
import SocketServer
import logging
import os
import sys
import urllib2
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
BIND_ADDR = os.environ.get("BIND_ADDR", "0.0.0.0")
SERVER_PORT = int(os.environ.get("SERVER_PORT", "80"))
URL_PREFIX = os.environ.get("URL_PREFIX", "").rstrip('/') + '/'
SPARK_MASTER_HOST = ""
class ProxyHandler(BaseHTTPRequestHandler):
def do_GET(self):
# Add an health checking endpoint.
if self.path in ["/healthz"]:
self.send_response(code=200)
self.send_header("Content-type", "text/plain")
self.end_headers()
self.wfile.write("OK")
return
# redirect if we are hitting the home page
if self.path in ["", URL_PREFIX]:
self.send_response(302)
self.send_header("Location", URL_PREFIX + "proxy:" + SPARK_MASTER_HOST)
self.end_headers()
return
self.proxyRequest(None)
def do_POST(self):
length = int(self.headers.getheader('content-length'))
postData = self.rfile.read(length)
self.proxyRequest(postData)
def proxyRequest(self, data):
targetHost, path = self.extractUrlDetails(self.path)
targetUrl = "http://" + targetHost + path
print("get: %s host: %s path: %s target: %s" % (self.path, targetHost, path, targetUrl))
try:
proxiedRequest = urllib2.urlopen(targetUrl, data)
except Exception as ue:
logging.error("Caught an exception trying to reach [ {0} ]".format(targetUrl))
raise ue
resCode = proxiedRequest.getcode()
if resCode == 200:
page = proxiedRequest.read()
page = self.rewriteLinks(page, targetHost)
resContentType = proxiedRequest.info()["Content-Type"]
self.send_response(200)
self.send_header("Content-Type", resContentType)
self.end_headers()
self.wfile.write(page)
elif resCode == 302:
self.send_response(302)
self.send_header("Location", URL_PREFIX + "proxy:" + SPARK_MASTER_HOST)
self.end_headers()
else:
raise Exception("Unsupported response: " + resCode)
def extractUrlDetails(self, path):
if path.startswith("/proxy:"):
start_idx = 7 # len('/proxy:') == 7
idx = path.find("/", start_idx)
targetHost = path[start_idx:] if idx == -1 else path[start_idx:idx]
path = "" if idx == -1 else path[idx:]
else:
targetHost = SPARK_MASTER_HOST
path = path
return (targetHost, path)
def rewriteLinks(self, page, targetHost):
target = "{0}proxy:{1}/".format(URL_PREFIX, targetHost)
page = page.replace('href="/', 'href="' + target)
page = page.replace("'<div><a href=' + logUrl + '>'",
"'<div><a href=' + location.origin + logUrl.replace('http://', '/proxy:') + '>'")
page = page.replace('href="log', 'href="' + target + 'log')
page = page.replace('href="http://', 'href="' + URL_PREFIX + 'proxy:')
page = page.replace('src="/', 'src="' + target)
page = page.replace('action="', 'action="' + target)
page = page.replace('"/api/v1/', '"' + target + 'api/v1/')
page = page.replace('{{uiroot}}/history', '{{uiroot}}' + target + 'history')
return page
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: <proxied host:port> [<proxy port>]")
sys.exit(1)
SPARK_MASTER_HOST = sys.argv[1]
if len(sys.argv) >= 3:
SERVER_PORT = int(sys.argv[2])
print("Starting server on http://{0}:{1}".format(BIND_ADDR, SERVER_PORT))
class ForkingHTTPServer(SocketServer.ForkingMixIn, HTTPServer):
def finish_request(self, request, client_address):
request.settimeout(30)
HTTPServer.finish_request(self, request, client_address)
server_address = (BIND_ADDR, SERVER_PORT)
httpd = ForkingHTTPServer(server_address, ProxyHandler)
httpd.serve_forever()
|
23,625 | c1ee7f1af1e923980045aa96b686695477398a44 | #!/usr/bin/env python
import bottle
import models
import os.path
import settings
import views
class Server(object):
def __init__(self, init_db=False):
self.app = bottle.Bottle()
Routes.setup_routing(self.app)
if init_db:
models.DBManager().initialize_db()
class ImageDeletor(object):
@classmethod
def delete(cls, filename=None):
if filename:
img = models.ImageORM().images(filename=filename)[0]
img.delete()
else:
for img in models.ImageORM().images():
img.delete()
class Routes(object):
@staticmethod
def setup_routing(app):
bottle.TEMPLATE_PATH = ['./templates/']
@app.get('/css/<filename:path>')
def send_js(filename):
return bottle.static_file(filename, root=settings.STYLE_DIR)
@app.get('/js/<filename:path>')
def send_js(filename):
return bottle.static_file(filename, root=settings.SCRIPT_DIR)
@app.get('/<filename:re:favicon\.ico>')
@app.get('/images/<filename:path>')
def send_image(filename):
return bottle.static_file(filename, root=settings.IMAGE_DIR)
@app.delete('/images')
@app.delete('/images/<filename:path>')
def delete_image(filename=None):
ImageDeletor.delete(filename)
bottle.redirect('/')
@app.post('/images')
def new_image():
return views.UploadView().render_POST(bottle.request.forms, bottle.request.files.new_upload)
@app.post('/munge')
def munge():
return views.MungeView().render_POST(bottle.request.forms)
@app.get('/')
def index():
image_manager = models.ImageORM()
return views.HomeView().render_GET(images=image_manager.images())
@app.get('/error')
def uh_oh():
return views.ErrorView().render(bottle.request.query)
if __name__ == '__main__':
server = Server(init_db=not os.path.isfile('oo_proj.db'))
server.app.run(host='localhost', port=8080, debug=True)
|
23,626 | 8b700c12b47500414c9022c8d91a5adda443f18b | from django.shortcuts import render
#views.py
from rest_framework.views import APIView
from rest_framework.response import Response
class HelloWorld(APIView):
def get(self, request):
return Response('HELLO WORLD! John kalyango.')
|
23,627 | 73005f627b3286547656b85929be9f37b1d5b0ce | from typing import Tuple
import rclpy
from rclpy.node import Node
from numpy import sqrt
from reachy_msgs.msg import MobileBaseDirection
from reachy_msgs.srv import SetMobileBaseMode
# from .hb_controller import HBMotorConfig
from .arduino_controller import ArduinoMotorController
class MobileBaseController(Node):
def __init__(self) -> None:
super().__init__('mobile_base_controller')
self.logger = self.get_logger()
self.mobile_base_controller = ArduinoMotorController(port='/dev/ttyACM0')
# self.mobile_base_controller = HBMotorConfig(channels=[0, 1])
self.mobile_base_controller.mode_idle(0)
self.mobile_base_controller.mode_idle(1)
self.current_mode = 'idle'
self.current_speed = (0, 0)
self.set_mobile_base_mode_srv = self.create_service(
srv_type=SetMobileBaseMode,
srv_name='set_mobile_base_mode',
callback=self.set_mobile_base_mode,
)
self.logger.info(f'Create "{self.set_mobile_base_mode_srv}" service.')
self.goal_direction_subscription = self.create_subscription(
msg_type=MobileBaseDirection,
topic='mobile_base_direction_goals',
callback=self.on_direction_goals,
qos_profile=5,
)
self.logger.info(f'Subscribe to "{self.goal_direction_subscription.topic_name}".')
self.logger.info('Node ready!')
def set_mobile_base_mode(
self,
request: SetMobileBaseMode.Request,
response: SetMobileBaseMode.Response,
) -> SetMobileBaseMode.Response:
if request.mode == 'idle':
self.mobile_base_controller.mode_idle(0)
self.mobile_base_controller.mode_idle(1)
self.current_mode = 'idle'
self.logger.info('Switching to idle mode.')
response.success = True
elif request.mode == 'close_loop':
self.mobile_base_controller.mode_close_loop_control(0)
self.mobile_base_controller.mode_close_loop_control(1)
self.current_mode = 'close_loop'
self.logger.info('Switching to close loop control mode.')
response.success = True
else:
self.logger.warning(f'Unkwnown mode {request.mode}!')
response.success = False
return response
def on_direction_goals(self, msg: MobileBaseDirection) -> None:
if self.current_mode == 'close_loop':
l, r = self.speeds_from_direction(msg.x, msg.y)
if (l, r) == self.current_speed == (0, 0):
# Do not resend 0 speed to keep static position control
return
self.mobile_base_controller.move_input_vel(0, l)
self.mobile_base_controller.move_input_vel(1, r)
self.current_speed = (l, r)
def speeds_from_direction(self, x: float, y: float) -> Tuple[float, float]:
vmax = 0.75
dead_zone = 0.01
if sqrt(x ** 2 + y ** 2) <= dead_zone:
return 0, 0
else:
y = y * vmax
x = x * vmax * 0.5
return -(x + y), -(-x + y)
def stop(self):
self.mobile_base_controller.move_input_vel(0, 0)
self.mobile_base_controller.move_input_vel(1, 0)
self.current_speed = (0, 0)
def main():
rclpy.init()
mobile_base_controller = MobileBaseController()
rclpy.spin(mobile_base_controller)
rclpy.shutdown()
if __name__ == '__main__':
main()
|
23,628 | b2f3fe3a8118e379dd9ec03b29dda89b7dc7778e | import os
import matplotlib.pyplot as plt
import random
def generate_color():
color = '#{:02x}{:02x}{:02x}'.format(*map(lambda x: random.randint(0, 255), range(3)))
return color
def print_graphic(graphic_file_name, data_file_name):
file_names = []
file_names.append(data_file_name)
print_graphics(graphic_file_name, file_names)
def print_graphics(graphic_file_name, data_file_names):
plt.ioff()
_, ax = plt.subplots()
for data_file_name in data_file_names:
x_list = []
y_list = []
with open(data_file_name, 'r') as data_file:
lines = data_file.readlines()
for line in lines:
pair = line.split(',')
x_list.append(float(pair[0]))
y_list.append(float(pair[1]))
data_file.close()
ax.plot(x_list, y_list, 'k', linestyle='solid', color=generate_color())
plt.savefig(graphic_file_name)
|
23,629 | 147faeda3588058fe225dcc0c3b57844c7ba2a8c | from osbot_utils.decorators.lists.index_by import index_by
from osbot_utils.decorators.methods.cache import cache
from osbot_utils.decorators.methods.cache_on_self import cache_on_self
from osbot_aws.apis.Session import Session
from osbot_utils.utils.Misc import base64_to_str
class ECR:
def __init__(self):
pass
@cache_on_self
def client(self):
return Session().client('ecr')
def authorization_token(self):
auth_data = self.client().get_authorization_token().get('authorizationData')[0]
auth_token = base64_to_str(auth_data.get('authorizationToken'))
proxy_endpoint = auth_data.get('proxyEndpoint')
username, password = auth_token.split(':')
return { "registry": proxy_endpoint ,
"username": username ,
"password": password }
def images(self, repository_name):
result = self.client().describe_images(repositoryName=repository_name)
return result.get('imageDetails')
@index_by
def images_ids(self, repository_name):
result = self.client().list_images(repositoryName=repository_name)
return result.get('imageIds')
def registry(self):
response = self.client().describe_registry()
return { 'registry_id' : response.get('registryId' ),
'replication_configuration' : response.get('replicationConfiguration')}
def repository_create(self, name, tags=None):
if self.repository_exists(name):
return { 'status': 'warning', 'message': f'repository {name} already existed'}
kwargs = { 'repositoryName' : name,
'tags' : [] }
if tags:
for key,value in tags.items():
kwargs.get('tags').append({'Key': key , 'Value':value})
result = self.client().create_repository(**kwargs)
return result.get('repository')
def repository_delete(self, name):
if self.repository_exists(name):
self.client().delete_repository(repositoryName=name)
return True
return False
def repository_exists(self, name):
return self.repository_info(name) is not None
def repository_info(self, name):
try:
result = self.client().describe_repositories(repositoryNames=[name])
return result.get('repositories')[0] # there should only be one repository here
except: # todo catch the actual Exception raised
return None # when repository doesn't exist
@index_by
def repositories(self):
response = self.client().describe_repositories()
return response.get('repositories') |
23,630 | dea14d72ac45ec2dc031207b7993392643f550aa | # -*- coding: utf-8 -*-
# Copyright 2016, Digital Reasoning
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import logging
import os
import re
import socket
import salt.cloud
import yaml
from django.conf import settings
from django.contrib.contenttypes.fields import GenericRelation
from django.core.cache import cache
from django.db import models, transaction
from django.core.files.base import ContentFile
from django.core.files.storage import FileSystemStorage
from django.utils.timezone import now
from django_extensions.db.models import (
TimeStampedModel,
TitleSlugDescriptionModel,
)
from guardian.shortcuts import get_users_with_perms
from model_utils import Choices
from model_utils.models import StatusModel
from stackdio.core.fields import DeletingFileField
from stackdio.core.utils import recursive_update
from stackdio.api.cloud.models import SecurityGroup
PROTOCOL_CHOICES = [
('tcp', 'TCP'),
('udp', 'UDP'),
('icmp', 'ICMP'),
]
logger = logging.getLogger(__name__)
HOST_INDEX_PATTERN = re.compile(r'.*-.*-(\d+)')
def get_hostnames_from_hostdefs(hostdefs, username='', namespace=''):
hostnames = []
for hostdef in hostdefs:
for i in xrange(hostdef.count):
hostnames.append(
hostdef.hostname_template.format(
namespace=namespace,
username=username,
index=i
)
)
return hostnames
class StackCreationException(Exception):
def __init__(self, errors, *args, **kwargs):
self.errors = errors
super(StackCreationException, self).__init__(*args, **kwargs)
class Level(object):
DEBUG = 'DEBUG'
INFO = 'INFO'
WARN = 'WARNING'
ERROR = 'ERROR'
class StatusDetailModel(StatusModel):
status_detail = models.TextField(blank=True)
class Meta:
abstract = True
default_permissions = ()
def set_status(self, status, detail=''):
self.status = status
self.status_detail = detail
return self.save()
class StackQuerySet(models.QuerySet):
def create(self, **kwargs):
new_properties = kwargs.pop('properties', {})
with transaction.atomic(using=self.db):
stack = super(StackQuerySet, self).create(**kwargs)
# manage the properties
properties = stack.blueprint.properties
recursive_update(properties, new_properties)
stack.properties = properties
# Create the appropriate hosts & security group objects
stack.create_security_groups()
stack.create_hosts()
return stack
_stack_model_permissions = (
'create',
'admin',
)
_stack_object_permissions = (
'launch',
'view',
'update',
'ssh',
'provision',
'orchestrate',
'execute',
'start',
'stop',
'terminate',
'delete',
'admin',
)
stack_storage = FileSystemStorage(location=os.path.join(settings.FILE_STORAGE_DIRECTORY, 'stacks'))
# For map, pillar, and properties. Doesn't need to go in a sub directory
def get_local_file_path(instance, filename):
return '{0}-{1}/{2}'.format(instance.pk, instance.slug, filename)
# Orchestrate files go in formula directory
def get_orchestrate_file_path(instance, filename):
return '{0}-{1}/formulas/__stackdio__/{2}'.format(instance.pk, instance.slug, filename)
class Stack(TimeStampedModel, TitleSlugDescriptionModel, StatusModel):
# Launch workflow:
PENDING = 'pending'
LAUNCHING = 'launching'
CONFIGURING = 'configuring'
SYNCING = 'syncing'
PROVISIONING = 'provisioning'
ORCHESTRATING = 'orchestrating'
FINALIZING = 'finalizing'
FINISHED = 'finished'
# Delete workflow:
# PENDING
DESTROYING = 'destroying'
# FINISHED
# Other actions
# LAUNCHING
STARTING = 'starting'
STOPPING = 'stopping'
TERMINATING = 'terminating'
EXECUTING_ACTION = 'executing_action'
# Errors
ERROR = 'error'
SAFE_STATES = [FINISHED, ERROR]
# Not sure?
OK = 'ok'
RUNNING = 'running'
REBOOTING = 'rebooting'
STATUS = Choices(PENDING, LAUNCHING, CONFIGURING, SYNCING, PROVISIONING,
ORCHESTRATING, FINALIZING, DESTROYING, FINISHED,
STARTING, STOPPING, TERMINATING, EXECUTING_ACTION, ERROR)
model_permissions = _stack_model_permissions
object_permissions = _stack_object_permissions
searchable_fields = ('title', 'description', 'history__status_detail')
class Meta:
ordering = ('title',)
default_permissions = tuple(set(_stack_model_permissions + _stack_object_permissions))
unique_together = ('title',)
# What blueprint did this stack derive from?
blueprint = models.ForeignKey('blueprints.Blueprint', related_name='stacks')
formula_versions = GenericRelation('formulas.FormulaVersion')
labels = GenericRelation('core.Label')
# An arbitrary namespace for this stack. Mainly useful for Blueprint
# hostname templates
namespace = models.CharField('Namespace', max_length=64)
create_users = models.BooleanField('Create SSH Users')
# Where on disk is the salt-cloud map file stored
map_file = DeletingFileField(
max_length=255,
upload_to=get_local_file_path,
null=True,
blank=True,
default=None,
storage=stack_storage)
# Where on disk is the custom salt top.sls file stored
top_file = DeletingFileField(
max_length=255,
null=True,
blank=True,
default=None,
storage=FileSystemStorage(location=settings.STACKDIO_CONFIG.salt_core_states))
# Where on disk is the custom orchestrate file stored
orchestrate_file = DeletingFileField(
max_length=255,
upload_to=get_orchestrate_file_path,
null=True,
blank=True,
default=None,
storage=stack_storage)
# Where on disk is the global orchestrate file stored
global_orchestrate_file = DeletingFileField(
max_length=255,
upload_to=get_orchestrate_file_path,
null=True,
blank=True,
default=None,
storage=stack_storage)
# Where on disk is the custom pillar file for custom configuration for
# all salt states used by the top file
pillar_file = DeletingFileField(
max_length=255,
upload_to=get_local_file_path,
null=True,
blank=True,
default=None,
storage=stack_storage)
# Where on disk is the custom pillar file for custom configuration for
# all salt states used by the top file
global_pillar_file = DeletingFileField(
max_length=255,
upload_to=get_local_file_path,
null=True,
blank=True,
default=None,
storage=stack_storage)
# storage for properties file
props_file = DeletingFileField(
max_length=255,
upload_to=get_local_file_path,
null=True,
blank=True,
default=None,
storage=stack_storage)
# Use our custom manager object
objects = StackQuerySet.as_manager()
def __unicode__(self):
return u'{0} (id={1})'.format(self.title, self.id)
def set_status(self, event, status, detail, level=Level.INFO):
self.status = status
self.save()
self.history.create(event=event, status=status,
status_detail=detail, level=level)
def get_driver_hosts_map(self, host_ids=None):
"""
Stacks are comprised of multiple hosts. Each host may be
located in different cloud accounts. This method returns
a map of the underlying driver implementation and the hosts
that running in the account.
@param host_ids (list); a list of primary keys for the hosts
we're interested in
@returns (dict); each key is a provider driver implementation
with QuerySet value for the matching host objects
"""
host_queryset = self.get_hosts(host_ids)
# Create an account -> hosts map
accounts = {}
for h in host_queryset:
accounts.setdefault(h.get_account(), []).append(h)
# Convert to a driver -> hosts map
result = {}
for account, hosts in accounts.items():
result[account.get_driver()] = host_queryset.filter(id__in=[h.id for h in hosts])
return result
def get_hosts(self, host_ids=None):
"""
Quick way of getting all hosts or a subset for this stack.
@host_ids (list); list of primary keys of hosts in this stack
@returns (QuerySet);
"""
if not host_ids:
return self.hosts.all()
return self.hosts.filter(id__in=host_ids)
def get_formulas(self):
return self.blueprint.get_formulas()
def get_tags(self):
tags = {}
for label in self.labels.all():
tags[label.key] = label.value
tags['stack_id'] = self.id
# No name allowed. salt-cloud uses this and it would break everything.
if 'Name' in tags:
del tags['Name']
return tags
@property
def properties(self):
if not self.props_file:
return {}
with open(self.props_file.path, 'r') as f:
return json.load(f)
@properties.setter
def properties(self, props):
props_json = json.dumps(props, indent=4)
if not self.props_file:
self.props_file.save('stack.props', ContentFile(props_json))
else:
with open(self.props_file.path, 'w') as f:
f.write(props_json)
def create_security_groups(self):
for hostdef in self.blueprint.host_definitions.all():
# create the managed security group for each host definition
# and assign the rules to the group
sg_name = 'stackdio-managed-{0}-stack-{1}'.format(
hostdef.slug,
self.pk
)
sg_description = 'stackd.io managed security group'
# cloud account and driver for the host definition
account = hostdef.cloud_image.account
if not account.create_security_groups:
logger.debug('Skipping creation of {0} because security group creation is turned '
'off for the account'.format(sg_name))
continue
driver = account.get_driver()
try:
sg_id = driver.create_security_group(sg_name,
sg_description,
delete_if_exists=True)
except Exception as e:
err_msg = 'Error creating security group: {0}'.format(str(e))
self.set_status('create_security_groups', self.ERROR,
err_msg, Level.ERROR)
logger.debug('Created security group {0}: {1}'.format(
sg_name,
sg_id
))
for access_rule in hostdef.access_rules.all():
driver.authorize_security_group(sg_id, {
'protocol': access_rule.protocol,
'from_port': access_rule.from_port,
'to_port': access_rule.to_port,
'rule': access_rule.rule,
})
# create the security group object that we can use for tracking
self.security_groups.create(
account=account,
blueprint_host_definition=hostdef,
name=sg_name,
description=sg_description,
group_id=sg_id,
is_managed=True
)
def create_hosts(self, host_definition=None, count=None, backfill=False):
"""
Creates host objects on this Stack. If no arguments are given, then
all hosts available based on the Stack's blueprint host definitions
will be created. If args are given, then only the `count` for the
given `host_definition` will be created.
@param host_definition (BlueprintHostDefinition object); the host
definition to use for creating new hosts. If None, all host
definitions for the stack's blueprint will be used.
@param count (int); the number of hosts to create. If None, all
hosts will be created.
@param backfill (bool); If True, then hosts will be created with
hostnames that fill in any gaps if necessary. If False, then
hostnames will start at the end of the host list. This is only
used when `host_definition` and `count` arguments are provided.
"""
created_hosts = []
if host_definition is None:
host_definitions = self.blueprint.host_definitions.all()
else:
host_definitions = [host_definition]
for hostdef in host_definitions:
hosts = self.hosts.all()
if count is None:
start, end = 0, hostdef.count
indexes = range(start, end)
elif not hosts:
start, end = 0, count
indexes = range(start, end)
else:
if backfill:
hosts = hosts.order_by('index')
# The set of existing host indexes
host_indexes = set([h.index for h in hosts])
# The last index available
last_index = sorted(host_indexes)[-1]
# The set of expected indexes based on the last known
# index
expected_indexes = set(range(last_index + 1))
# Any gaps any the expected indexes?
gaps = expected_indexes - host_indexes
indexes = []
if gaps:
indexes = list(gaps)
count -= len(indexes)
start = sorted(host_indexes)[-1] + 1
end = start + count
indexes += range(start, end)
else:
start = hosts.order_by('-index')[0].index + 1
end = start + count
indexes = xrange(start, end)
# all components defined in the host definition
components = hostdef.formula_components.all()
# iterate over the host definition count and create individual
# host records on the stack
for i in indexes:
hostname = hostdef.hostname_template.format(
namespace=self.namespace,
index=i
)
kwargs = dict(
index=i,
cloud_image=hostdef.cloud_image,
blueprint_host_definition=hostdef,
instance_size=hostdef.size,
hostname=hostname,
sir_price=hostdef.spot_price,
state=Host.PENDING
)
if hostdef.cloud_image.account.vpc_enabled:
kwargs['subnet_id'] = hostdef.subnet_id
else:
kwargs['availability_zone'] = hostdef.zone
host = self.hosts.create(**kwargs)
account = host.cloud_image.account
# Add in the cloud account default security groups as
# defined by an admin.
account_groups = set(list(
account.security_groups.filter(
is_default=True
)
))
host.security_groups.add(*account_groups)
if account.create_security_groups:
# Add in the security group provided by this host definition,
# but only if this functionality is enabled on the account
security_group = SecurityGroup.objects.get(
stack=self,
blueprint_host_definition=hostdef
)
host.security_groups.add(security_group)
# add formula components
host.formula_components.add(*components)
for volumedef in hostdef.volumes.all():
self.volumes.create(
host=host,
snapshot=volumedef.snapshot,
hostname=hostname,
device=volumedef.device,
mount_point=volumedef.mount_point
)
created_hosts.append(host)
return created_hosts
def generate_cloud_map(self):
# TODO: Figure out a way to make this provider agnostic
# TODO: Should we store this somewhere instead of assuming
master = socket.getfqdn()
images = {}
hosts = self.hosts.all()
cluster_size = len(hosts)
for host in hosts:
# load provider yaml to extract default security groups
cloud_account = host.cloud_image.account
cloud_account_yaml = yaml.safe_load(cloud_account.yaml)[cloud_account.slug]
# pull various stuff we need for a host
roles = [c.sls_path for c in host.formula_components.all()]
instance_size = host.instance_size.title
security_groups = set([
sg.group_id for sg in host.security_groups.all()
])
volumes = host.volumes.all()
domain = cloud_account_yaml['append_domain']
fqdn = '{0}.{1}'.format(host.hostname, domain)
# The volumes will be defined on the map as well as in the grains.
# Those in the map are used by salt-cloud to create and attach
# the volumes (using the snapshot), whereas those on the grains
# are available for states and modules to play with (e.g., to
# mount the devices)
map_volumes = []
for vol in volumes:
v = {
'device': vol.device,
'mount_point': vol.mount_point,
# filesystem_type doesn't matter, should remove soon
'filesystem_type': vol.snapshot.filesystem_type,
'type': 'gp2',
}
if vol.volume_id:
v['volume_id'] = vol.volume_id
else:
v['snapshot'] = vol.snapshot.snapshot_id
map_volumes.append(v)
host_metadata = {
'name': host.hostname,
# The parameters in the minion dict will be passed on
# to the minion and set in its default configuration
# at /etc/salt/minion. This is where you would override
# any default values set by salt-minion
'minion': {
'master': master,
'log_level': 'debug',
'log_level_logfile': 'debug',
'mine_functions': {
'grains.items': []
},
# Grains are very useful when you need to set some
# static information about a machine (e.g., what stack
# id its registered under or how many total machines
# are in the cluster)
'grains': {
'roles': roles,
'stack_id': int(self.pk),
'fqdn': fqdn,
'domain': domain,
'cluster_size': cluster_size,
'stack_pillar_file': self.pillar_file.path,
'volumes': map_volumes,
'cloud_account': host.cloud_image.account.slug,
'cloud_image': host.cloud_image.slug,
'namespace': self.namespace,
},
},
# The rest of the settings in the map are salt-cloud
# specific and control the VM in various ways
# depending on the cloud account being used.
'size': instance_size,
'securitygroupid': list(security_groups),
'volumes': map_volumes,
'delvol_on_destroy': True,
'del_all_vols_on_destroy': True,
}
if cloud_account.vpc_enabled:
host_metadata['subnetid'] = host.subnet_id
else:
host_metadata['availability_zone'] = host.availability_zone.title
# Add in spot instance config if needed
if host.sir_price:
host_metadata['spot_config'] = {
'spot_price': str(host.sir_price) # convert to string
}
images.setdefault(host.cloud_image.slug, {})[host.hostname] = host_metadata
return images
def generate_map_file(self):
images = self.generate_cloud_map()
map_file_yaml = yaml.safe_dump(images, default_flow_style=False)
if not self.map_file:
self.map_file.save('stack.map', ContentFile(map_file_yaml))
else:
with open(self.map_file.path, 'w') as f:
f.write(map_file_yaml)
def generate_top_file(self):
top_file_data = {
'base': {
'G@stack_id:{0}'.format(self.pk): [
{'match': 'compound'},
'core.*',
]
}
}
top_file_yaml = yaml.safe_dump(top_file_data, default_flow_style=False)
if not self.top_file:
self.top_file.save('stack_{0}_top.sls'.format(self.pk), ContentFile(top_file_yaml))
else:
with open(self.top_file.path, 'w') as f:
f.write(top_file_yaml)
def generate_orchestrate_file(self):
hosts = self.hosts.all()
stack_target = 'G@stack_id:{0}'.format(self.pk)
def _matcher(sls_set):
return ' and '.join(
[stack_target] + ['G@roles:{0}'.format(i) for i in sls_set]
)
groups = {}
for host in hosts:
for component in host.formula_components.all():
groups.setdefault(component.order, set()).add(component.sls_path)
orchestrate = {}
for order in sorted(groups.keys()):
for role in groups[order]:
orchestrate[role] = {
'salt.state': [
{'tgt': _matcher([role])},
{'tgt_type': 'compound'},
{'sls': role},
]
}
depend = order - 1
while depend >= 0:
if depend in groups.keys():
orchestrate[role]['salt.state'].append(
{'require': [{'salt': req} for req in groups[depend]]}
)
break
depend -= 1
yaml_data = yaml.safe_dump(orchestrate, default_flow_style=False)
if not self.orchestrate_file:
self.orchestrate_file.save('orchestrate.sls', ContentFile(yaml_data))
else:
with open(self.orchestrate_file.path, 'w') as f:
f.write(yaml_data)
def generate_global_orchestrate_file(self):
accounts = set([host.cloud_image.account for host in self.hosts.all()])
orchestrate = {}
for account in accounts:
# Target the stack_id and cloud account
target = 'G@stack_id:{0} and G@cloud_account:{1}'.format(
self.id,
account.slug)
groups = {}
for component in account.formula_components.all():
groups.setdefault(component.order, set()).add(component.sls_path)
for order in sorted(groups.keys()):
for role in groups[order]:
state_title = '{0}_{1}'.format(account.slug, role)
orchestrate[state_title] = {
'salt.state': [
{'tgt': target},
{'tgt_type': 'compound'},
{'sls': role},
]
}
depend = order - 1
while depend >= 0:
if depend in groups.keys():
orchestrate[role]['salt.state'].append(
{'require': [{'salt': req} for req in groups[depend]]}
)
break
depend -= 1
yaml_data = yaml.safe_dump(orchestrate, default_flow_style=False)
if not self.global_orchestrate_file:
self.global_orchestrate_file.save('global_orchestrate.sls', ContentFile(yaml_data))
else:
with open(self.global_orchestrate_file.path, 'w') as f:
f.write(yaml_data)
def generate_pillar_file(self, update_formulas=False):
# Import here to not cause circular imports
from stackdio.api.formulas.models import FormulaVersion
from stackdio.api.formulas.tasks import update_formula
users = []
# pull the create_ssh_users property from the stackd.io config file.
# If it's False, we won't create ssh users on the box.
if self.create_users:
user_permissions_map = get_users_with_perms(
self, attach_perms=True, with_superusers=True, with_group_users=True
)
for user, perms in user_permissions_map.items():
if 'ssh_stack' in perms:
if user.settings.public_key:
logger.debug('Granting {0} ssh permission to stack: {1}'.format(
user.username,
self.title,
))
users.append({
'username': user.username,
'public_key': user.settings.public_key,
'id': user.id,
})
else:
logger.debug(
'User {0} has ssh permission for stack {1}, but has no public key. '
'Skipping.'.format(
user.username,
self.title,
)
)
pillar_props = {
'__stackdio__': {
'users': users
}
}
# If any of the formulas we're using have default pillar
# data defined in its corresponding SPECFILE, we need to pull
# that into our stack pillar file.
# First get the unique set of formulas
formulas = set()
for host in self.hosts.all():
formulas.update([c.formula for c in host.formula_components.all()])
# Update the formulas if requested
if update_formulas:
for formula in formulas:
# Update the formula, and fail silently if there was an error.
if formula.private_git_repo:
logger.debug('Skipping private formula: {0}'.format(formula.uri))
continue
try:
version = self.formula_versions.get(formula=formula).version
except FormulaVersion.DoesNotExist:
version = formula.default_version
update_formula.si(formula.id, None, version, raise_exception=False)()
# for each unique formula, pull the properties from the SPECFILE
for formula in formulas:
recursive_update(pillar_props, formula.properties)
# Add in properties that were supplied via the blueprint and during
# stack creation
recursive_update(pillar_props, self.properties)
pillar_file_yaml = yaml.safe_dump(pillar_props, default_flow_style=False)
if not self.pillar_file:
self.pillar_file.save('stack.pillar', ContentFile(pillar_file_yaml))
else:
with open(self.pillar_file.path, 'w') as f:
f.write(pillar_file_yaml)
def generate_global_pillar_file(self, update_formulas=False):
# Import here to not cause circular imports
from stackdio.api.formulas.models import FormulaVersion
from stackdio.api.formulas.tasks import update_formula
pillar_props = {}
# Find all of the globally used formulas for the stack
accounts = set(
[host.cloud_image.account for
host in self.hosts.all()]
)
global_formulas = []
for account in accounts:
global_formulas.extend(account.get_formulas())
# Update the formulas if requested
if update_formulas:
for formula in global_formulas:
# Update the formula, and fail silently if there was an error.
if formula.private_git_repo:
logger.debug('Skipping private formula: {0}'.format(formula.uri))
continue
try:
version = self.formula_versions.get(formula=formula).version
except FormulaVersion.DoesNotExist:
version = formula.default_version
update_formula.si(formula.id, None, version, raise_exception=False)()
# Add the global formulas into the props
for formula in set(global_formulas):
recursive_update(pillar_props, formula.properties)
# Add in the account properties AFTER the stack properties
for account in accounts:
recursive_update(pillar_props,
account.global_orchestration_properties)
pillar_file_yaml = yaml.safe_dump(pillar_props, default_flow_style=False)
if not self.global_pillar_file:
self.global_pillar_file.save('stack.global_pillar', ContentFile(pillar_file_yaml))
else:
with open(self.global_pillar_file.path, 'w') as f:
f.write(pillar_file_yaml)
def query_hosts(self, force=False):
"""
Uses salt-cloud to query all the hosts for the given stack id.
"""
CACHE_KEY = 'salt-cloud-full-query'
cached_result = cache.get(CACHE_KEY)
if cached_result and not force:
logger.debug('salt-cloud query result cached')
result = cached_result
else:
logger.debug('salt-cloud query result not cached, retrieving')
logger.info('get_hosts_info: {0!r}'.format(self))
salt_cloud = salt.cloud.CloudClient(settings.STACKDIO_CONFIG.salt_cloud_config)
result = salt_cloud.full_query()
# Cache the result for a minute
cache.set(CACHE_KEY, result, 60)
# yaml_result contains all host information in the stack, but
# we have to dig a bit to get individual host metadata out
# of account and provider type dictionaries
host_result = {}
for host in self.hosts.all():
account = host.get_account()
provider = account.provider
# each host is buried in a cloud provider type dict that's
# inside a cloud account name dict
# Grab the list of hosts
host_map = result.get(account.slug, {}).get(provider.name, {})
# Grab the individual host
host_result[host.hostname] = host_map.get(host.hostname, None)
return host_result
def get_root_directory(self):
if self.map_file:
return os.path.dirname(self.map_file.path)
if self.props_file:
return os.path.dirname(self.props_file.path)
return None
def get_log_directory(self):
root_dir = self.get_root_directory()
log_dir = os.path.join(root_dir, 'logs')
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
return log_dir
def get_security_groups(self):
return SecurityGroup.objects.filter(is_managed=True,
hosts__stack=self).distinct()
def get_role_list(self):
roles = set()
for bhd in self.blueprint.host_definitions.all():
for formula_component in bhd.formula_components.all():
roles.add(formula_component.sls_path)
return list(roles)
class StackHistory(TimeStampedModel, StatusDetailModel):
class Meta:
verbose_name_plural = 'stack history'
ordering = ['-created', '-id']
default_permissions = ()
STATUS = Stack.STATUS
stack = models.ForeignKey('Stack', related_name='history')
# What 'event' (method name, task name, etc) that caused
# this status update
event = models.CharField(max_length=128)
# The human-readable description of the event
# status = models.TextField(blank=True)
# Optional: level (DEBUG, INFO, WARNING, ERROR, etc)
level = models.CharField(max_length=16, choices=(
(Level.DEBUG, Level.DEBUG),
(Level.INFO, Level.INFO),
(Level.WARN, Level.WARN),
(Level.ERROR, Level.ERROR),
))
class StackCommand(TimeStampedModel, StatusModel):
WAITING = 'waiting'
RUNNING = 'running'
FINISHED = 'finished'
ERROR = 'error'
STATUS = Choices(WAITING, RUNNING, FINISHED, ERROR)
class Meta:
verbose_name_plural = 'stack actions'
default_permissions = ()
stack = models.ForeignKey('Stack', related_name='commands')
# The started executing
start = models.DateTimeField('Start Time', blank=True, default=now)
# Which hosts we want to target
host_target = models.CharField('Host Target', max_length=255)
# The command to be run (for custom actions)
command = models.TextField('Command')
# The output from the action
std_out_storage = models.TextField()
# The error output from the action
std_err_storage = models.TextField()
@property
def std_out(self):
if self.std_out_storage != "":
return json.loads(self.std_out_storage)
else:
return []
@property
def std_err(self):
return self.std_err_storage
@property
def submit_time(self):
return self.created
@property
def start_time(self):
if self.status in (self.RUNNING, self.FINISHED):
return self.start
else:
return ''
@property
def finish_time(self):
if self.status == self.FINISHED:
return self.modified
else:
return ''
class Host(TimeStampedModel, StatusDetailModel):
PENDING = 'pending'
OK = 'ok'
DELETING = 'deleting'
STATUS = Choices(PENDING, OK, DELETING)
class Meta:
ordering = ['blueprint_host_definition', '-index']
default_permissions = ()
# TODO: We should be using generic foreign keys here to a cloud account
# specific implementation of a Host object. I'm not exactly sure how this
# will work, but I think by using Django's content type system we can make
# it work...just not sure how easy it will be to extend, maintain, etc.
stack = models.ForeignKey('Stack',
related_name='hosts')
cloud_image = models.ForeignKey('cloud.CloudImage',
related_name='hosts')
instance_size = models.ForeignKey('cloud.CloudInstanceSize',
related_name='hosts')
availability_zone = models.ForeignKey('cloud.CloudZone',
null=True,
related_name='hosts')
subnet_id = models.CharField('Subnet ID', max_length=32, blank=True, default='')
blueprint_host_definition = models.ForeignKey(
'blueprints.BlueprintHostDefinition',
related_name='hosts')
hostname = models.CharField('Hostname', max_length=64)
index = models.IntegerField('Index')
security_groups = models.ManyToManyField('cloud.SecurityGroup',
related_name='hosts')
# The machine state as provided by the cloud account
state = models.CharField('State', max_length=32, default='unknown')
state_reason = models.CharField('State Reason', max_length=255, default='', blank=True)
# This must be updated automatically after the host is online.
# After salt-cloud has launched VMs, we will need to look up
# the DNS name set by whatever cloud provider is being used
# and set it here
provider_dns = models.CharField('Provider DNS', max_length=64, blank=True)
provider_private_dns = models.CharField('Provider Private DNS', max_length=64, blank=True)
provider_private_ip = models.CharField('Provider Private IP Address', max_length=64, blank=True)
# The FQDN for the host. This includes the hostname and the
# domain if it was registered with DNS
fqdn = models.CharField('FQDN', max_length=255, blank=True)
# Instance id of the running host. This is provided by the cloud
# provider
instance_id = models.CharField('Instance ID', max_length=32, blank=True)
# Spot instance request ID will be populated when metadata is refreshed
# if the host has been configured to launch spot instances. By default,
# it will be unknown and will be set to NA if spot instances were not
# used.
sir_id = models.CharField('SIR ID',
max_length=32,
default='unknown')
# The spot instance price for this host if using spot instances
sir_price = models.DecimalField('Spot Price',
max_digits=5,
decimal_places=2,
null=True)
def __unicode__(self):
return self.hostname
@property
def provider_metadata(self):
metadata = self.stack.query_hosts()
return metadata[self.hostname]
@property
def formula_components(self):
return self.blueprint_host_definition.formula_components
def get_account(self):
return self.cloud_image.account
def get_provider(self):
return self.get_account().provider
def get_driver(self):
return self.cloud_image.get_driver()
|
23,631 | d061f5b24677cec648ef05021121b12b76425452 | import random
import pygame
# constant for window
WINDOW_RECT = pygame.Rect(0, 0, 480, 700)
# constant for refresh frame
FRAME_PER_SEC = 60
# constant for enemy timer event
ENEMY_TIMER = pygame.USEREVENT
# constant for bullet timer event
BULLET_TIMER = pygame.USEREVENT + 1
class GameSprite(pygame.sprite.Sprite):
"""Aircraft Game Sprite"""
def __init__(self, img_name, speed=2):
super().__init__()
self.image = pygame.image.load(img_name)
self.rect = self.image.get_rect()
self.speed = speed
def update(self):
self.rect.y += self.speed
class Background(GameSprite):
"""Background class"""
def __init__(self, is_alt=False):
super().__init__("./images/background.png")
if is_alt:
self.rect.y = - self.rect.height
def update(self):
super().update()
# 1. justify if the img out of window
if self.rect.y >= WINDOW_RECT.height:
self.rect.y = - self.rect.height
class Enemy(GameSprite):
"""Enemy class"""
def __init__(self):
super().__init__("./images/enemy1.png")
self.speed = random.randint(2, 4)
self.rect.bottom = 0
max_x = WINDOW_RECT.width - self.rect.width
self.rect.x = random.randint(0, max_x)
def update(self):
super().update()
if self.rect.y >= WINDOW_RECT.height:
self.kill()
class Hero(GameSprite):
"""Hero class"""
def __init__(self):
super().__init__("./images/me1.png", 0)
self.rect.centerx = WINDOW_RECT.centerx
self.rect.bottom = WINDOW_RECT.bottom - 120
self.bullets = pygame.sprite.Group()
def update(self):
self.rect.x += self.speed
if self.rect.x < 0:
self.rect.x = 0
elif self.rect.right > WINDOW_RECT.right:
self.rect.right = WINDOW_RECT.right
def fire(self):
for i in (0, 1, 2):
# 1. Make a bullet sprites
bullet = Bullet()
# 2. Set the position
bullet.rect.bottom = self.rect.y - i * 20
bullet.rect.centerx = self.rect.centerx
# 3. add it to group
self.bullets.add(bullet)
class Bullet(GameSprite):
"""Bullet class"""
def __init__(self):
super().__init__("./images/bullet1.png", -2)
def update(self):
super().update()
if self.rect.bottom < 0:
self.kill()
|
23,632 | a7f42009ea00cd6327d443895d97c0fb89566993 | from random import randint
class Die():
def __init__(self, sides):
self.sides = sides
def roll_die(self):
x = randint(1, self.sides)
print("You have rolled out :" + str(x))
my_die = Die(6)
my_die_0 = Die(10)
for number in range(10):
my_die.roll_die()
my_die_0.roll_die()
|
23,633 | 88fe726e62f866f91fbece5a59adcb27a9f6dde2 | import html
import os
import sys
import time
import eyed3
import urllib
from selenium import webdriver
from selenium.webdriver.common.by import By
def scroll (driver):
# stolen from OWADVL and sbha on stack exchange
SCROLL_PAUSE_TIME = 0.5
last_height = driver.execute_script("return document.body.scrollHeight")
while True:
# Scroll down to bottom
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(SCROLL_PAUSE_TIME)
# Calculate new scroll height and compare with last scroll height
new_height = driver.execute_script("return document.body.scrollHeight")
if new_height == last_height:
break
last_height = new_height
def parse_query(query):
q = query.replace(" my-free-mp3s.com", "")
q = urllib.parse.quote(q)
return q
def find_best_track(tracks, query):
# track with most matching words in title
best_guess = None
global_matches = 0
q = query.split("%20")
for track in tracks:
local_matches = 0
for word in q:
if word in str(track.get_attribute("aria-label")):
local_matches += 1
if local_matches > global_matches:
global_matches = local_matches
best_guess = track
return best_guess
def embed_artwork(audiofile, track):
# Grab url for artwork
artwork = track.find_element(By.XPATH, "//a[@class='sound__coverArt']/div/span")
artwork_url = artwork.get_attribute('style').split('background-image: url("')[1].split('");')[0]
# download and embed artwork
urllib.request.urlretrieve(artwork_url,"tmp.jpg")
audiofile.tag.images.set(3, open('tmp.jpg','rb').read(), 'image/jpeg')
os.remove("tmp.jpg")
def embed_date(audiofile, track):
# grab date
date = track.find_element(By.XPATH, "//div[@class='soundTitle__uploadTime']/time")
year = date.get_attribute('datetime').split('-')[0]
audiofile.tag.recording_date = year
def embed_artist(audiofile, track):
title = track.find_element(By.XPATH, "//div[@class='soundTitle__usernameTitleContainer']/a/span")
artist = track.find_element(By.XPATH, "//div[@class='soundTitle__usernameTitleContainer']/div/a/span")
t = html.unescape(title.get_attribute("innerHTML")).split(' - ')[0]
a = html.unescape(artist.get_attribute("innerHTML"))
if a in t:
audiofile.tag.artist = t
else:
audiofile.tag.artist = a
def embed_title(audiofile, track):
title = track.find_element(By.XPATH, "//div[@class='soundTitle__usernameTitleContainer']/a/span")
t = html.unescape(title.get_attribute("innerHTML").split(' - ')[-1])
audiofile.tag.title = t
def print_metadata(audiofile):
print(audiofile.tag.title)
print(audiofile.tag.artist)
print(audiofile.tag.album)
print(audiofile.tag.album_artist)
print(audiofile.tag.genre)
print(audiofile.tag.recording_date)
def main(path):
audiofile = eyed3.load(path)
query = path.split('/')[-1]
query = parse_query(query)
# prepare the option for the chrome driver
options = webdriver.ChromeOptions()
options.add_argument('headless')
# start chrome browser
driver = webdriver.Chrome(options=options)
driver.get("https://soundcloud.com/search?q=" + query)
scroll(driver)
# Grab all results
tracks = driver.find_elements(By.XPATH, "//li[@class='searchList__item']/div/div")
track = find_best_track(tracks, query)
if track:
embed_artwork(audiofile, track)
embed_date(audiofile, track)
embed_artist(audiofile, track)
embed_title(audiofile, track)
audiofile.tag.save()
else:
print('fail:', path)
driver.quit()
if __name__ == "__main__":
if len(sys.argv) != 2:
exit(1) # bad arguments
path = sys.argv[1]
# single file
if os.path.isfile(path):
main(path)
# directory
elif os.path.isdir(path):
for file in os.listdir(path):
if file.endswith(".mp3"):
print(os.path.join(path, file))
main(os.path.join(path, file))
|
23,634 | 6c99a2079d190a3a47c055135bc3807fab145ab6 | from strongr.core.abstracts.abstractservice import AbstractService
from strongr.schedulerdomain.command import ScheduleJob, RunEnqueuedJobs,\
StartJobOnVm, CheckJobsRunning, \
EnsureMinAmountOfNodes, ScaleOut, \
JobFinished, VmCreated,\
VmReady, VmDestroyed, VmNew, CheckScaling,\
CleanupNodes, ScaleIn, LogStats, CleanupOldJobs
from strongr.schedulerdomain.handler import ScheduleJobHandler, RunEnqueuedJobsHandler,\
StartJobOnVmHandler, CheckJobsRunningHandler,\
EnsureMinAmountOfNodesHandler, ScaleOutHandler, \
RequestFinishedJobsHandler, JobFinishedHandler,\
VmDestroyedHandler, VmReadyHandler,\
VmCreatedHandler, VmNewHandler, CheckScalingHandler,\
CleanupNodesHandler, ScaleInHandler, LogStatsHandler,\
CleanupOldJobsHandler
from strongr.schedulerdomain.query import RequestScheduledJobs, RequestJobInfo,\
FindNodeWithAvailableResources, RequestFinishedJobs,\
RequestResourcesRequired, RequestVmsByState
from strongr.schedulerdomain.handler import RequestScheduledTasksHandler, RequestTaskInfoHandler,\
FindNodeWithAvailableResourcesHandler, RequestResourcesRequiredHandler,\
RequestVmsByStateHandler
class SchedulerService(AbstractService):
_command_bus = None
_query_bus = None
def register_models(self):
import strongr.schedulerdomain.model
# importing alone is enough for registration
def getCommandBus(self):
if self._command_bus is None:
self._command_bus = self._make_default_commandbus({
ScheduleJobHandler: ScheduleJob,
RunEnqueuedJobsHandler: RunEnqueuedJobs,
StartJobOnVmHandler: StartJobOnVm,
CheckJobsRunningHandler: CheckJobsRunning,
EnsureMinAmountOfNodesHandler: EnsureMinAmountOfNodes,
ScaleOutHandler: ScaleOut,
JobFinishedHandler: JobFinished,
VmCreatedHandler: VmCreated,
VmDestroyedHandler: VmDestroyed,
VmReadyHandler: VmReady,
VmNewHandler: VmNew,
CheckScalingHandler: CheckScaling,
CleanupNodesHandler: CleanupNodes,
ScaleInHandler: ScaleIn,
LogStatsHandler: LogStats,
CleanupOldJobsHandler: CleanupOldJobs
})
return self._command_bus
def getQueryBus(self):
if self._query_bus is None:
self._query_bus = self._make_default_querybus({
RequestScheduledTasksHandler: RequestScheduledJobs,
RequestTaskInfoHandler: RequestJobInfo,
FindNodeWithAvailableResourcesHandler: FindNodeWithAvailableResources,
RequestFinishedJobsHandler: RequestFinishedJobs,
RequestResourcesRequiredHandler: RequestResourcesRequired,
RequestVmsByStateHandler: RequestVmsByState
})
return self._query_bus
|
23,635 | d99002782cad5f1139a018ea347f816285272db8 | #-*- coding: utf-8 -*-
from xmlrpc.client import ServerProxy
from tkinter import *
from tkinter.filedialog import *
from tkinter import ttk
from tkinter import messagebox
import sqlite3
import time
from _overlapped import NULL
#pour débugger plus facilement
import socket
from xmlrpc.client import ServerProxy
from subprocess import Popen
class Classe():
def __init__ (self, pProprietaire, pNom, pResponsabilites, pCollaborateurs):
self.proprietaire = pProprietaire
self.nom = pNom
self.responsabilites = pResponsabilites
self.collaborateurs = pCollaborateurs
class Vue():
def __init__(self, parent):
self.parent=parent
self.root=Tk() #Fenetre
self.root.title("MODULE CRC")
self.root.protocol("WM_DELETE_WINDOW", self.parent.fermerProgramme)
self.hauteurTotale=200
self.largeurTotale=200
self.hauteurMandat=200
self.largeurMandat=200
self.fenetre = Frame(master=self.root, width=self.largeurTotale, height=self.hauteurTotale, bg="steelblue")
self.fenetre.pack()
self.classes = []
self.creerVueMenu()
self.collaborateurs=[]
self.responsabilites = []
self.focused_box = None
self.nomProprietaire = StringVar( value='propriétaire')
self.nomClasse= StringVar( value='nom de classe')
def loaderNomClasses(self):
self.parent.modele.nomsDesClasses()
classes = self.parent.modele.classes
#vider la liste
self.listeClasses.delete(0, END)
for i in classes:
self.listeClasses.insert(END,i[3])
def creerVueMenu(self):
self.menu = Frame(self.fenetre, width = self.largeurMandat, height=self.hauteurMandat, bg="steelblue", relief=RAISED, padx=10, pady=10)
self.menu.pack(side=LEFT)
self.creerMenuGauche()
self.creerMenuDroite()
self.loaderNomClasses()
#chercher la liste des classes
def choisirClasse(self,event):
#vider la liste
self.listeResponsabilites.delete(0, END) #effacer la liste
self.listeCollaboration.delete(0, END) #effacer la liste
#obtenir l'index correspondant a la classe selectionnee dans la liste de classes
index = self.listeClasses.curselection()[0]
#self.classes est un tableau qui contient toutes les informations sur les classes...
#alors que self.listeClasses ne contient que les noms des classes...
self.classeChoisi = self.parent.modele.classes[index] #l'index 0 d'un element est son id
#trouver les collaborateurs de la classe
collaborateursDeLaClasse = self.parent.modele.collaborateursDeLaClasse(index)
#for element in collaborateursDeLaClasse:
# self.listeCollaboration.insert(END,element[2])
#trouver les responsabilites de la classe
#loader les responsabilités
responsabilites = self.parent.modele.responsabilitiesDeLaClasse(str(self.classeChoisi[0]))
for element in responsabilites:
self.listeResponsabilites.insert(END,element[0])
#for element in range(5):
# self.listeResponsabilites.insert(END,"coucou")
#loader les collaborateurs
collaborateurs = self.parent.modele.collaborateursDeLaClasse(str(self.classeChoisi[0]))
for element in collaborateurs:
self.listeCollaboration.insert(END,element[0])
#informations sur le propriétaire
#self.lblNomClasse.config(text = self.classes[index][3]) # 3 = nom de la classe
proprietaire= self.parent.modele.classes[index][2]
classe = self.parent.modele.classes[index][3]
#self.nomProprietaire.set(self.parent.modele.classes[index][2])
self.lblNomClasse.config(text = classe)
self.lblProprietaire.config(text = proprietaire) # 2 = proprietaire de la classe
#chargera automatiquement le nom de la classe quand on ouvre la fenêtre modifier une classe
self.nomProprietaire.set(proprietaire)
self.nomClasse.set(classe)
def creerMenuGauche(self):
self.menuGauche = Frame(self.menu, width = self.largeurMandat, height=self.hauteurMandat, bg="steelblue", relief=RAISED, padx=10, pady=10)
self.menuGauche.pack(side=LEFT)
largeur = 25
frame1 = Frame(self.menuGauche)
frame1.pack(fill=X, pady=5)
lbl1 = Label(frame1, text="Liste des classes", width=largeur)
lbl1.pack(side=LEFT, padx=55, pady=5)
frame2 = Frame(self.menuGauche)
frame2.pack()
#scrollbar
self.listeClasses = Listbox(frame2, height=25)
self.listeClasses.bind('<<ListboxSelect>>',self.choisirClasse)
#quand on désélectionne cette liste, on veut désactivier le bouton supprimer
self.listeClasses.bind("<FocusOut>", self.box_unfocused)
self.listeClasses.bind("<FocusIn>", self.box_focused)
self.listeClasses.pack(side=LEFT,fill="y")
#loader la liste de classes
#self.chercherClasse()
scrollbar = Scrollbar(frame2, orient = "vertical")
scrollbar.config(command=self.listeClasses.yview)
scrollbar.pack(side=LEFT,fill="y")
self.listeClasses.config(yscrollcommand=scrollbar.set)
self.loaderNomClasses()
#for x in range(30):
# listeClasses.insert(END, str(x))
frame3 = Frame(self.menuGauche, bg="steelblue")
frame3.pack(fill=BOTH, expand=True, pady = 5)
self.btnSuppression = Button(frame3, text = "Suppression", state=DISABLED, command = self.supprimerClasse)
self.btnSuppression.pack(side = LEFT)
self.btnModification = Button(frame3, text = "Modification",state=DISABLED, command=lambda: self.creerMenuAjout(1))
self.btnModification.pack(side = RIGHT)
def supprimerClasse(self):
id = self.classeChoisi[0]
self.parent.modele.supprimerClasse(id)
#chaine = "WHERE id = " + str(id)
#self.parent.serveur.delete("Classes","id", str(id))
self.loaderNomClasses()
self.listeResponsabilites.delete(0, END) #effacer la liste
self.listeCollaboration.delete(0, END) #effacer la liste
self.lblNomClasse.config(text = "Nom de classe")
self.lblProprietaire.config(text = "proprietaire")
def creerMenuDroite(self):
self.menuDroite = Frame(self.menu, width = self.largeurMandat, height=self.hauteurMandat, bg="steelblue", relief=RAISED, padx=10, pady=10)
self.menuDroite.pack(side=LEFT)
largeur = 25
frame1 = Frame(self.menuDroite)
frame1.pack(fill=X, pady=5)
lblTitre = Label(frame1, text="Informations", width=largeur)
lblTitre.pack(side=LEFT, padx=55, pady=5)
frame2 = Frame(self.menuDroite)
frame2.pack(fill=X)
self.lblNomClasse = Label(frame2, text = "nom de la classe")
self.lblNomClasse.pack()
#self.lblNomClasse.config(text = "afawf")
self.lblProprietaire = Label(frame2, text = "propriétaire de la classe")
self.lblProprietaire.pack()
lblResponsabilites = Label(frame2, text = "Responsabilités")
lblResponsabilites.pack()
self.listeResponsabilites = Listbox(frame2)
self.listeResponsabilites.pack()
lblCollaboration = Label(frame2, text = "Collaboration")
lblCollaboration.pack()
self.listeCollaboration = Listbox(frame2)
self.listeCollaboration.pack()
frame3 = Frame(self.menuDroite, bg="steelblue")
frame3.pack(fill=BOTH, expand=True, pady = 5)
self.boutonNouvelleClasse = Button(frame3, text="Ajouter nouvelle classe", command=lambda: self.creerMenuAjout(2))
self.boutonNouvelleClasse.pack(side =TOP)
def creerMenuAjout(self, bouton):
if bouton == 1: #modifier classe
pass
elif bouton == 2: #nouvelle classe
self.nomClasse.set("")
self.nomProprietaire.set("")
#enlever la premiere fenetre
self.menuDroite.pack_forget()
self.menuGauche.pack_forget()
self.menuAjout = Frame(self.menu, width = self.largeurMandat, height=self.hauteurMandat, bg="steelblue", relief=RAISED, padx=10, pady=10)
self.menuAjout.pack()
#zone nom de la classe
frame1 = Frame(self.menuAjout)
frame1.pack(fill=X, pady=5)
lblNomClasse = Label(frame1, text="Nom (classe)", width=25)
lblNomClasse.pack(side=LEFT)
#activer ou désactiver l'état du entry
if bouton == 1:
self.entryNomClasse = Entry(frame1, text="", width=25, textvariable=self.nomClasse, state = NORMAL)
elif bouton == 2:
self.entryNomClasse = Entry(frame1, text="", width=25, textvariable=self.nomClasse, state = NORMAL)
self.entryNomClasse.pack(side=LEFT)
#entryNomClasse.insert(END,"nom de la classe");
#zone propriétaire
frame2 = Frame(self.menuAjout)
frame2.pack(fill=X, pady=5)
lblProprietaire = Label(frame2, text="Propriétaire", width=25)
lblProprietaire.pack(side=LEFT)
#self.nomProprietaire = StringVar(frame2, value='allo') // textvariable=self.nomProprietaire
#self.nomProprietaire = StringVar( value='wafwafe')
self.entryProprietaire = Entry(frame2, width=25, textvariable=self.nomProprietaire)
self.entryProprietaire.pack(side=LEFT)
#zone responsabilités et zone collaboration (labels)
frame3 = Frame(self.menuAjout)
frame3.pack(fill=X, pady=5)
lblResponsabilite = Label(frame3, text="Responsabilité", width=25)
lblResponsabilite.pack(side=LEFT)
lblCollaboration = Label(frame3, text="Collaboration", width=25)
lblCollaboration.pack(side=LEFT)
#zone responsabilités et zone collaboration (champs)
frame4 = Frame(self.menuAjout)
frame4.pack(fill=X, pady=5)
largeur = 55
self.entryResponsabilite = Entry(frame4, text="", width=15)
self.entryResponsabilite.pack(side=LEFT, padx = largeur)
self.entryResponsabilite.bind('<Return>',self.saisirResponsabilite)
#liste qui contient le nom de toutes les classes
requete = self.parent.modele.nomsDesClasses()
if self.listeClasses.size() == 0:
valeur = ""
choix = [""]
else:
choix = []
for i in requete:
if str(i[1]) == str(self.parent.idProjet):
choix.insert(len(choix), i[3])
valeur = requete[0][3]
#liste = self.parent.modele.nomsDesClasses()
self.classeChoisie = StringVar(frame4)
self.classeChoisie.set(valeur)#la valeur par défaut de la liste déroulante
self.choixClasses = OptionMenu(frame4,self.classeChoisie,*choix)
self.choixClasses.pack(side="left")
#bouton pour ajouter un collaborateur
boutonCollaboration = Button(frame4, text="Ajouter", command = self.saisirCollaboration)
boutonCollaboration.pack(side = LEFT)
#zone pour les listebox des responsabilités et des collaborations
frameDeuxBox = Frame(self.menuAjout)
frameDeuxBox.pack()
#scrollbar gauche
frame6 = Frame(frameDeuxBox)
frame6.pack(fill=X, pady=5, side=LEFT)
scrollbar = Scrollbar(frame6, orient = "vertical")
self.listeResponsabilitesAjout = Listbox(frame6, height=25,yscrollcommand=scrollbar)
self.listeResponsabilitesAjout.pack(side=LEFT, fill=BOTH, expand=1)
self.listeResponsabilitesAjout.bind("<FocusIn>", self.box_focused)
self.listeResponsabilitesAjout.bind("<FocusOut>", self.box_unfocused)
scrollbar.config(command=self.listeResponsabilitesAjout.yview)
scrollbar.pack(side=LEFT,fill="y", expand=1)
#scrollbar droite
frame5 = Frame(frameDeuxBox)
frame5.pack(fill=X, pady=5, side=LEFT)
scrollbar = Scrollbar(frame5, orient = "vertical")
self.listeCollaborationAjout = Listbox(frame5, height=25,yscrollcommand=scrollbar)
self.listeCollaborationAjout.pack(side=LEFT, fill=BOTH, expand=1)
self.listeCollaborationAjout.bind("<FocusIn>", self.box_focused)
self.listeCollaborationAjout.bind("<FocusOut>", self.box_unfocused)
scrollbar.config(command=self.listeCollaborationAjout.yview)
scrollbar.pack(side=LEFT,fill="y", expand=1)
#bouton en bas
frame7 = Frame(self.menuAjout)
frame7.pack(fill=X, pady=5)
boutonConfirmer = Button(frame7, text="Confirmer", command=lambda: self.confirmer(bouton) )
boutonConfirmer.pack(side = LEFT)
boutonSupprimer = Button(frame7, text="Supprimer", command = self.supprimer)
boutonSupprimer.pack(side = LEFT)
boutonCanceler = Button(frame7, text="Canceler", command = self.canceler)
boutonCanceler.pack(side = LEFT)
# ajouter la liste des responsabilités de la classe choisie
if bouton == 1:
#trouver les collaborateurs de la classe
collaborateursDeLaClasse = self.parent.modele.collaborateursDeLaClasse(str(self.classeChoisi[0]))
for element in collaborateursDeLaClasse:
self.listeCollaborationAjout.insert(END,element[0])
#trouver les responsabilites de la classe
#loader les responsabilités
responsabilites = self.parent.modele.responsabilitiesDeLaClasse(str(self.classeChoisi[0]))
for element in responsabilites:
self.listeResponsabilitesAjout.insert(END,element[0])
def canceler(self):
self.entryNomClasse.delete(0, END)
self.entryNomClasse.insert(0, "")
self.entryProprietaire.delete(0, END)
self.entryProprietaire.insert(0, "")
#enlever le menu qui existait
self.menuAjout.pack_forget()
#retour en arriere<
self.menuGauche.pack(side=LEFT)
self.menuDroite.pack(side=LEFT)
self.loaderNomClasses()
#empecher la modification et la suppression
self.btnSuppression.config(state=DISABLED)
self.btnModification.config(state=DISABLED)
def saisirCollaboration(self):
saisie= self.classeChoisie.get()
self.collaborateurs.append(saisie)
self.listeCollaborationAjout.insert(END,saisie)
#vider le Entry après avoir saisi quelque chose
#self.entryCollaboration.delete(0,END)
def supprimer(self):
try:
if self.focused_box == self.listeCollaborationAjout:
index = self.listeCollaborationAjout.curselection()[0]
self.listeCollaborationAjout.delete(index)
elif self.focused_box == self.listeResponsabilitesAjout:
index = self.listeResponsabilitesAjout.curselection()[0]
self.listeResponsabilitesAjout.delete(index)
except IndexError:#si rien n'est sélectionné
pass
def saisirResponsabilite(self,event):
saisie = self.entryResponsabilite.get()
#self.listeResponsabilitesAjout.insert(saisie)
self.listeResponsabilitesAjout.insert(END,saisie)
#vider le Entry après avoir saisi quelque chose
self.entryResponsabilite.delete(0,END)
def box_focused(self, event):
self.focused_box = event.widget
#lorsque le focus est sur la liste de classes
self.btnSuppression.config(state=ACTIVE)
self.btnModification.config(state=ACTIVE)
def box_unfocused(self, event):
self.focused_box = None
#lorsque le focus n'est pas sur la liste de classes
self.btnSuppression.config(state=DISABLED)
self.btnModification.config(state=DISABLED)
def confirmer(self,bouton):
saisieNomClasse = self.entryNomClasse.get()
saisieProprietaire = self.entryProprietaire.get()
#mettre en jaune les infos manquantes
if saisieNomClasse == "":
self.entryNomClasse.configure({"background": "Yellow"})
else:
self.entryNomClasse.configure({"background": "White"})
if saisieProprietaire == "":
self.entryProprietaire.configure({"background": "Yellow"})
else:
self.entryProprietaire.configure({"background": "White"})
#affichage d'un message d'erreur
if (saisieNomClasse == "" or saisieProprietaire == ""):
pass
messagebox.showwarning("Attention", "Saisir les informations manquantes")
else:
classe = Classe(saisieProprietaire, saisieNomClasse, self.listeResponsabilitesAjout, self.listeCollaborationAjout)
self.parent.modele.insertionConfirmer(classe)
if (bouton == 1): #si on est en mode modification
self.supprimerClasse() #enlever la classe modifiée pour l'écraser avec la nouvelle classe
#self.parent.modele.insertionConfirmer(classe)
self.canceler() #retour à au menu de base CRC
def chercherClasse(self):
classes = self.parent.serveur.selectionAllSQL("Classes")
for classe in classes:
self.listeClasses.insert(END,classe[3])
class Modele():
def __init__(self, parent, serveur):
self.parent=parent
self.serveur = serveur
def responsabilitiesDeLaClasse(self, id_classe):
nomTable = "Responsabilites"
champs = "nom"
where = ["id_classe"]
valeur = [id_classe]
requete = self.parent.serveur.selDonneesWHERE(nomTable,champs,where,valeur)
return requete
def collaborateursDeLaClasse(self, id_classe):
requete = self.serveur.selectionSQL("Collaborations", "id, id_Classe, nom")
collaborateurs = []
requete = self.parent.serveur.selection
nomTable = "Collaborations"
champs = "nom"
where = ["id_classe"]
valeur = [str(id_classe)]
requete = self.parent.serveur.selDonneesWHERE(nomTable,champs,where,valeur)
return requete
def enregistrer(self,texteMandat):
#texteMandat = texteMandat.get(1.0,'end-1c')
texteMandat = texteMandat.get(1.0,'end-1c')
#print(texteMandat)
conn = sqlite3.connect('BDD.sqlite')
c = conn.cursor()
#pour des fins de tests
c.execute('''DELETE FROM mandats''')
c.execute('INSERT INTO mandats VALUES(?)', (texteMandat,))
conn.commit()
conn.close()
def nomsDesClasses(self):
selected = self.serveur.selectionSQL3("Classes", "*", "id_projet", str(self.parent.idProjet))
self.classes = []
for element in selected:
self.classes.append(element)
return selected
def insertionConfirmer(self, classe):
if(self.parent.serveur.verificationExiste("nom", "Classes", "id_projet", self.parent.idProjet, classe.nom)==False):
messagebox.showerror("Nom de classe existant","Le nom de la classe existe deja")
return
else:
pass
#insérer la classe
#valeurs = (self.parent.idProjet, classe.proprietaire,classe.nom)
#chaine = "'1','1','555'"
#liste = (str(self.parent.idProjet), str(classe.proprietaire), str(classe.nom))
chaine = "'" + str(self.parent.idProjet) + "','" +str(classe.proprietaire) + "','" + str(classe.nom)+ "'"
idClasse = self.serveur.insertionSQL("Classes", chaine)
#classe.proprietaire
#classe.nom
#self.parent.serveur.insertionSQL("Classes",valeurs)
#insérer les responsabilites
#parcorir tous les éléments de la listbox responsabilités
#insérer les collaborateurs
#parcorir tous les éléments de la listbox responsabilités
for i in range (classe.responsabilites.size()):
nom = classe.responsabilites.get(i)
chaine = "'" + str(idClasse) + "','" +str(nom) + "'"
self.parent.serveur.insertionSQL("Responsabilites",chaine)
for i in range (classe.collaborateurs.size()):
nom = classe.collaborateurs.get(i)
chaine = "'" + str(idClasse) + "','" +str(nom) + "'"
self.parent.serveur.insertionSQL("Collaborations",chaine)
def supprimerClasse(self,id_classe):
#chaine = "WHERE id = " + str(id_classe)
self.parent.serveur.delete("Classes","id", str(id_classe))
self.supprimerAttributs("Responsabilites",str(id_classe))
self.supprimerAttributs("Collaborations",str(id_classe))
def supprimerAttributs(self,type,id_classe):
#chaine = "WHERE id_classe = " + str(id_classe)
self.parent.serveur.delete(type,"id_classe", str(id_classe))
class Controleur():
def __init__(self):
# #vraie version
# self.saasIP=sys.argv[1]
# self.utilisateur=sys.argv[2]
# self.organisation=sys.argv[3]
# self.idProjet=sys.argv[4]
# self.clientIP=sys.argv[5]
# self.adresseServeur="http://"+self.saasIP+":9999"
#
# self.modele=Modele(self)
# self.serveur = self.connectionServeur()
# self.vue=Vue(self)
# self.vue.root.mainloop()
#version debug
self.saasIP= sys.argv[1]
self.utilisateur= sys.argv[2]
self.organisation= sys.argv[3]
self.idProjet= sys.argv[4]
self.clientIP= sys.argv[5]
self.portSaas=":9999"
self.adresseServeur="http://"+self.saasIP+self.portSaas
self.serveur = self.connectionServeur()
self.modele=Modele(self,self.serveur)
self.vue=Vue(self)
self.writeLog("Ouverture du Module","2")
self.vue.root.mainloop()
def connectionServeur(self):
return ServerProxy(self.adresseServeur)
def fermerProgramme(self):
self.writeLog("Fermeture du Module","3")
self.vue.root.destroy()
def writeLog(self,action,codeid):
self.serveur.writeLog(self.organisation,self.utilisateur,self.clientIP,self.saasIP,"CRC",action,codeid)
if __name__ == '__main__':
c=Controleur() |
23,636 | c86878d41835205070f64b3944575a30aeff427e | def createQueue():
q=[]
return (q)
def enqueue(q,data):
q.insert(0,data)
return(q)
def dequeue(q):
data=q.pop()
return(data)
def isEmpty(q):
return (q==[])
def size(q):
return (len(q))
def jumlahcpu(baris,kolom):
m=[]
for i in range (baris):
matriks=[]
for i in range ((kolom)):
b=(str(input("Nama Proses: ")))
a=(int(input("Waktu Proses : ")))
matriks.append(b)
matriks.append(a)
m.append(matriks)
return list(reversed(m))
cpu=int(input("Jumlah Proses yang akan dijadwal di CPU :"))
print("Antrian Proses : ",jumlahcpu(cpu,1))
m=[m+jumlahcpu(cpu,1)]
queue=createQueue()
c=int(input("Waktu Proses CPU = "))
print("Antrian Proses beserta Waktunya = ",m)
for i in range (len(baris,kolom)):
print("Iterasi ke- ",i)
if m[size(m)-1][1] - c > 0:
d= d+ (m[size(m)-1][0]-c)
print("Proses",m[size(m)-1][0],"sedang diproses, dan sisa waktu proses",m[size(m)-1][0],"=",d)
print(enqueue(queue,dequeue(m)))
else:
print("Proses C telah selesai diproses")
dequeue(m)
print(m)
|
23,637 | fa133b38a7b172da8c4f28e89f937594074a1548 | # -*- coding: utf-8 -*-
#
import helpers
from matplotlib import pyplot as plt
import numpy as np
def plot():
fig = plt.figure()
x = np.logspace(0, 6, num=5)
plt.loglog(x, x**2, lw=2.1)
return fig
def test():
phash = helpers.Phash(plot())
assert phash.phash == 'e9c37e896e21be80', phash.get_details()
return
|
23,638 | 463e57f97b2dfc5471dcc46ac22ca95cccc94e5f | # | Copyright 2009-2016 Karlsruhe Institute of Technology
# |
# | Licensed under the Apache License, Version 2.0 (the "License");
# | you may not use this file except in compliance with the License.
# | You may obtain a copy of the License at
# |
# | http://www.apache.org/licenses/LICENSE-2.0
# |
# | Unless required by applicable law or agreed to in writing, software
# | distributed under the License is distributed on an "AS IS" BASIS,
# | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# | See the License for the specific language governing permissions and
# | limitations under the License.
import os, xml.dom.minidom
from grid_control import utils
from grid_control.backends.aspect_cancel import CancelJobsWithProcessBlind
from grid_control.backends.aspect_status import CheckInfo, CheckJobsMissingState, CheckJobsWithProcess
from grid_control.backends.backend_tools import BackendDiscovery, ProcessCreatorViaArguments
from grid_control.backends.wms import BackendError, WMS
from grid_control.backends.wms_pbsge import PBSGECommon
from grid_control.config import ConfigError
from grid_control.job_db import Job
from grid_control.utils.parsing import parseTime
from grid_control.utils.process_base import LocalProcess
from python_compat import any, imap, izip, lmap, set, sorted
class GridEngine_CheckJobsProcessCreator(ProcessCreatorViaArguments):
def __init__(self, config):
ProcessCreatorViaArguments.__init__(self, config)
self._cmd = utils.resolveInstallPath('qstat')
self._user = config.get('user', os.environ.get('LOGNAME', ''), onChange = None)
def _arguments(self, wmsIDs):
if not self._user:
return [self._cmd, '-xml']
return [self._cmd, '-xml', '-u', self._user]
class GridEngine_CheckJobs(CheckJobsWithProcess):
def __init__(self, config, user = None):
CheckJobsWithProcess.__init__(self, config, GridEngine_CheckJobsProcessCreator(config))
def _parse(self, proc):
proc.status(timeout = self._timeout)
status_string = proc.stdout.read(timeout = 0)
# qstat gives invalid xml in <unknown_jobs> node
unknown_start = status_string.find('<unknown_jobs')
unknown_jobs_string = ''
if unknown_start >= 0:
unknown_end_tag = '</unknown_jobs>'
unknown_end = status_string.find(unknown_end_tag) + len(unknown_end_tag)
unknown_jobs_string = status_string[unknown_start:unknown_end]
unknown_jobs_string_fixed = unknown_jobs_string.replace('<>', '<unknown_job>').replace('</>', '</unknown_job>')
status_string = status_string.replace(unknown_jobs_string, unknown_jobs_string_fixed)
try:
dom = xml.dom.minidom.parseString(status_string)
except Exception:
raise BackendError("Couldn't parse qstat XML output!")
for job_node in dom.getElementsByTagName('job_list'):
job_info = {}
try:
for node in job_node.childNodes:
if node.nodeType != xml.dom.minidom.Node.ELEMENT_NODE:
continue
if node.hasChildNodes():
job_info[str(node.nodeName)] = str(node.childNodes[0].nodeValue)
job_info[CheckInfo.WMSID] = job_info.pop('JB_job_number')
job_info[CheckInfo.RAW_STATUS] = job_info.pop('state')
if 'queue_name' in job_info:
queue, node = job_info['queue_name'].split('@')
job_info[CheckInfo.QUEUE] = queue
job_info[CheckInfo.WN] = node
except Exception:
raise BackendError('Error reading job info:\n%s' % job_node.toxml())
yield job_info
def _parse_status(self, value, default):
if any(imap(lambda x: x in value, ['E', 'e'])):
return Job.UNKNOWN
if any(imap(lambda x: x in value, ['h', 's', 'S', 'T', 'w'])):
return Job.QUEUED
if any(imap(lambda x: x in value, ['r', 't'])):
return Job.RUNNING
return Job.READY
class GridEngine_Discover_Nodes(BackendDiscovery):
def __init__(self, config):
BackendDiscovery.__init__(self, config)
self._configExec = utils.resolveInstallPath('qconf')
def discover(self):
nodes = set()
proc = LocalProcess(self._configExec, '-shgrpl')
for group in proc.stdout.iter(timeout = 10):
yield {'name': group.strip()}
proc_g = LocalProcess(self._configExec, '-shgrp_resolved', group)
for host_list in proc_g.stdout.iter(timeout = 10):
nodes.update(host_list.split())
proc_g.status_raise(timeout = 0)
for host in sorted(nodes):
yield {'name': host.strip()}
proc.status_raise(timeout = 0)
class GridEngine_Discover_Queues(BackendDiscovery):
def __init__(self, config):
BackendDiscovery.__init__(self, config)
self._configExec = utils.resolveInstallPath('qconf')
def discover(self):
tags = ['h_vmem', 'h_cpu', 's_rt']
reqs = dict(izip(tags, [WMS.MEMORY, WMS.CPUTIME, WMS.WALLTIME]))
parser = dict(izip(tags, [int, parseTime, parseTime]))
proc = LocalProcess(self._configExec, '-sql')
for queue in imap(str.strip, proc.stdout.iter(timeout = 10)):
proc_q = LocalProcess(self._configExec, '-sq', queue)
queueInfo = {'name': queue}
for line in proc_q.stdout.iter(timeout = 10):
attr, value = lmap(str.strip, line.split(' ', 1))
if (attr in tags) and (value != 'INFINITY'):
queueInfo[reqs[attr]] = parser[attr](value)
proc_q.status_raise(timeout = 0)
yield queueInfo
proc.status_raise(timeout = 0)
class GridEngine(PBSGECommon):
alias = ['SGE', 'UGE', 'OGE']
configSections = PBSGECommon.configSections + ['GridEngine'] + alias
def __init__(self, config, name):
cancelExecutor = CancelJobsWithProcessBlind(config, 'qdel',
fmt = lambda wmsIDs: [str.join(',', wmsIDs)], unknownID = ['Unknown Job Id'])
PBSGECommon.__init__(self, config, name,
cancelExecutor = cancelExecutor,
checkExecutor = CheckJobsMissingState(config, GridEngine_CheckJobs(config)),
nodesFinder = GridEngine_Discover_Nodes(config),
queuesFinder = GridEngine_Discover_Queues(config))
self._project = config.get('project name', '', onChange = None)
self._configExec = utils.resolveInstallPath('qconf')
def getSubmitArguments(self, jobNum, jobName, reqs, sandbox, stdout, stderr):
timeStr = lambda s: '%02d:%02d:%02d' % (s / 3600, (s / 60) % 60, s % 60)
reqMap = { WMS.MEMORY: ('h_vmem', lambda m: '%dM' % m),
WMS.WALLTIME: ('s_rt', timeStr), WMS.CPUTIME: ('h_cpu', timeStr) }
# Restart jobs = no
params = ' -r n -notify'
if self._project:
params += ' -P %s' % self._project
# Job requirements
(queue, nodes) = (reqs.get(WMS.QUEUES, [''])[0], reqs.get(WMS.SITES))
if not nodes and queue:
params += ' -q %s' % queue
elif nodes and queue:
params += ' -q %s' % str.join(',', imap(lambda node: '%s@%s' % (queue, node), nodes))
elif nodes:
raise ConfigError('Please also specify queue when selecting nodes!')
return params + PBSGECommon.getCommonSubmitArguments(self, jobNum, jobName, reqs, sandbox, stdout, stderr, reqMap)
def parseSubmitOutput(self, data):
# Your job 424992 ("test.sh") has been submitted
return data.split()[2].strip()
|
23,639 | a2e523a7f22f5685a03ddf4b6344e1409bfd85b9 | import requests
import pandas as pd
r = requests.get("http://www.openbl.org/lists/base_all.txt")
data = r.text.split("\n")
df = pd.DataFrame(data[4:], columns=['ips']).to_csv("dns_blacklist_ip.csv") |
23,640 | 47435fe9a62ede5c0421b4ebd8ab25e2d545331e | #https://www.acmicpc.net/problem/2798
# 백준 2798 : 블랙잭
#테스트용 입력
#import os
#f = open(os.path.join(os.path.dirname(__file__), 'data.txt'), 'r')
#제출용 입력
import sys
f = sys.stdin
# 데이터가 남아있으면 반복동작
while True:
data = f.readline() #list(map(int, f.readline().split()))
if(data==""): break
data1 = list(map(int, data.split())) # 입력받은 한 줄의 데이터를 각각 분리하여 정수형 리스트에 넣음
data2 = list(map(int, f.readline().split()))
#문제풀이
num, target = data1 #첫째줄의 값을 각각 카드 갯수, 목표값
cards = sorted(data2, reverse = True)# 카드 목록을 sorted 함수로 내림차순 정렬하여 변수에 저장
sum, result = 0, 0
for i in range (0, num):
for j in range (i+1, num):
for k in range (j+1, num):
sum = cards[i] + cards[j] + cards[k]
if sum <= target:
if sum > result: result = sum
print(result)
# 원래 내림차순 정렬하고 각 for문 진입시 target 보다 크면 해당 for문을 탈출하는것을 구현하려고 했으나
# 생각대로 안됨
|
23,641 | 5fc70a8fd25ed35376e4849d6852b9c0380bc98a | from tri_declarative import *
from tri_table import *
from tri_query import *
from tri_query import MISSING as q_MISSING
documentation = {
'Table': documentation_tree(Table),
'Column': documentation_tree(Column),
# 'Form': documentation_tree(Form), # TODO
'Field': documentation_tree(Field),
'Query': documentation_tree(Query),
'Variable': documentation_tree(Variable),
}
from json import JSONEncoder, dumps
class PythonObjectEncoder(JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if obj is q_MISSING:
return '<MISSING>'
return JSONEncoder.default(self, obj)
with open('tri.table-docs.json', 'w') as f:
f.write('var docs = ' + dumps(documentation, cls=PythonObjectEncoder))
|
23,642 | 0c465c036ca7c4a7cac09c19ae3d5d11cd2280aa | from tkinter import *
class Calculator:
def __init__(self, my_attr):
self.my_attr = my_attr
my_attr.title('My Basic Python Calculator')
# create screen widget
self.screen = Text(my_attr, state='disabled', width=30, height=3, background='yellow', foreground='blue')
# position screen in window
self.screen.grid(row=0, column=0, columnspan=4, padx=5, pady=5)
self.screen.configure(state='normal')
# initialize screen as empty
self.equation = ''
# create buttons using createButton method
button_1 = self.createButton(7)
button_2 = self.createButton(8)
button_3 = self.createButton(9)
button_4 = self.createButton(u"\u232B", None)
button_5 = self.createButton(4)
button_6 = self.createButton(5)
button_7 = self.createButton(6)
button_8 = self.createButton(u"\u00F7")
button_9 = self.createButton(1)
button_10 = self.createButton(2)
button_11 = self.createButton(3)
button_12 = self.createButton('*')
button_13 = self.createButton('.')
button_14 = self.createButton(0)
button_15 = self.createButton('+')
button_16 = self.createButton('-')
button_17 = self.createButton('=', None, 34)
# buttons stored in a list
buttons = [button_1, button_2, button_3, button_4, button_5, button_6, button_7, button_8, button_9, button_10, button_11, button_12,
button_13, button_14, button_15, button_16, button_17]
# initialize the counter
count = 0
# arrange buttons with grid manager
for row in range(1, 5):
for column in range(4):
buttons[count].grid(row=row, column=column)
count += 1
# arrange '=' button at the bottom
buttons[16].grid(row=5, column=0, columnspan=4)
def createButton(self, value, write=True, width=7):
# function creates a button, and takes one compulsory argument, the value that should be on the button
return Button(self.my_attr, text=value, command=lambda:self.click(value, write), width=width)
def click(self, text, write):
# this function handles what happens when you click a button
# 'write' argument if True means the value 'value' should be written on the screen,
# if None, should not be written
if write is None:
# Only evaluate code when there is an equation to be evaluated
if text == '=' and self.equation:
# replace the unicode value of division ./. with python division (/) using regex
self.equation = re.sub(u"\u00F7", '/', self.equation)
print(self.equation)
answer = str(eval(self.equation))
print(answer)
self.clear_screen()
self.insert_screen(answer, newline=True)
elif text == u"\u232B":
self.clear_screen()
else:
# add text to the screen
self.insert_screen(text)
def clear_screen(self):
# to clear screen
# set the equation to empty before deleting screen
self.equation = ''
self.screen.configure(state='normal')
self.screen.delete('1.0', END)
def insert_screen(self, value, newline=False):
self.screen.configure(state='normal')
self.screen.insert(END, value)
# record every value inserted in the screen
self.equation += str(value)
self.screen.configure(state='disabled')
my_tkinter = Tk()
calc_gui = Calculator(my_tkinter)
my_tkinter.mainloop()
|
23,643 | 0029fc951e573c6c2608b6c6e4ef4ed4e806dee2 | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-14 21:04
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('home', '0018_auto_20160914_1351'),
]
operations = [
migrations.AddField(
model_name='recipe',
name='ingredients',
field=django.contrib.postgres.fields.jsonb.JSONField(default=dict),
),
migrations.AddField(
model_name='recipe',
name='instructions',
field=django.contrib.postgres.fields.jsonb.JSONField(default=dict),
),
]
|
23,644 | 596d59d26c4ead040dce40e94725c9aa7cec486e | #use the nonlocal topological method to detect tips.
# also records topologcially preserved values.
#Tim Tyree
#9.13.2021
from skimage import measure
from numba import jit, njit
from numba.typed import List
import numpy as np, os
from . import *
# from .intersection import *
from scipy.interpolate import interp2d
from .intersection import *
# from . import find_contours
# from ._utils_find_contours import *
# from ._utils_find_tips import *
# from ._find_tips import *
@njit#(cache=True)#, nogil = True)
def get_tips(contours_a,contours_b):
'''Must recieve contours that make no attempt to jump the boundaries
returns tips with indices of parent contours returned as the nested list, n_list.
tuple(contours_a),tuple(contours_b) are each tuples of m-by-2 np.ndarrays. m is any positive int.
each member is a 1D line.
get_tips returns all intersections of
contours_a with contours_b.
will throw a TypingError exception if either input tuple is empty.
if you get a nonsingular matrix error, make sure that you`re not comparing a contour to itself.'''
n_list = List(); x_list = List(); y_list = List();
ncr = len(contours_a); nci = len(contours_b)
for n1 in range(ncr):
for n2 in range(nci):
# for n1, c1 in enumerate(contours_a):
# for n2, c2 in enumerate(contours_b):
c1 = contours_a[n1]
c2 = contours_b[n2]
x1 = c1[:, 0]
y1 = c1[:, 1]
x2 = c2[:, 0]
y2 = c2[:, 1]
x,y = intersection(x1, y1, x2, y2)
if len(x)>0:
s = (n1,n2)
xl = list(x)
yl = list(y)
n_list.append(s)
x_list.append(xl)
y_list.append(yl)
return n_list, x_list, y_list
def enumerate_tips(tips):
'''returns n_list, x_list, y_list
gets tips into neat sorted python primitives'''
n_list = []; x_lst = []; y_lst = []
if len(tips)==0:
return None # [],[],[]
for n,q in enumerate(tips):
if not (len(q)==0):
y, x = q
x = list(x)
x.sort()
y = list(y)
y.sort()
n_list.append(n)
x_lst.append(x)
y_lst.append(y)
return n_list, x_lst, y_lst
def list_tips(tips):
return tips_to_list(tips)
def tips_to_list(tips):
'''returns x_list, y_list
ets tips into neat sorted python primitives'''
x_lst = []; y_lst = []
if len(tips)==0:
return x_lst, y_lst#None # [],[]
for q in tips:
if not (len(q)==0):
y, x = q
x = list(x)
x.sort()
y = list(y)
y.sort()
x_lst.append(x)
y_lst.append(y)
return x_lst, y_lst
def my_numba_list_to_python_list(numba_lst):
normal_list = []
for lst in numba_lst:
normal_list.append(list(lst))
return normal_list
@njit
def unpad_xy_position (position, pad_x, width, rejection_distance_x,
pad_y, height, rejection_distance_y):
x = unpad(X=position[0], pad=pad_x, width=width, rejection_distance=rejection_distance_x)
y = unpad(X=position[1], pad=pad_y, width=height, rejection_distance=rejection_distance_y)
return x,y
@njit
def unpad(X, pad, width, rejection_distance):
'''unpads 1 coordinate x or y for the padding:
[0... pad | pad ... width + pad | width + pad ... width + 2 * pad]
return -9999 if X is within rejection_distance of the edge,
return X if X is in [pad ... width + pad], which is if X is in the unpadded frame, which has width = width
else return X reflected onto the unpadded frame'''
P = rejection_distance
X -= pad
if X < -pad+P:
X = -9999 # throw out X later
elif X < 0:
X += width
if X > width+pad-P:
X = -9999 # throw out X later
elif X >= width:
X -= width
return X
# @njit
def textures_to_padded_textures(txt,dtexture_dt, pad):
'''large pad allows knots to be recorded right.
consider pad = int(512/2), edge_tolerance = int(512/4)'''
width, height = txt.shape[:2]
# padded_width = 512 + pad #pixels
padded_txt = np.pad(array = txt[...,0], pad_width = pad, mode = 'wrap')
dpadded_txt_dt = np.pad(array = dtexture_dt[...,0], pad_width = pad, mode = 'wrap')
return padded_txt, dpadded_txt_dt
def matrices_to_padded_matrices(txt,dtexture_dt, pad):
'''txt and dtexture_dt are rank two tensors. i.e. the channel_no is 1.
large pad allows knots to be recorded right.
'''
# width, height = txt.shape[:2]
# padded_width = 512 + pad #pixels
padded_txt = np.pad(array = txt, pad_width = pad, mode = 'wrap')
dpadded_txt_dt = np.pad(array = dtexture_dt, pad_width = pad, mode = 'wrap')
return padded_txt, dpadded_txt_dt
# #informal test for ^that
# padded_txt = np.pad(array = txt, pad_width = pad, mode = 'wrap')
# print(txt[0,0])
# print(padded_txt[...,2:5][pad,pad])
# @njit
def pad_matrix(mat, pad, channel_no=3):
''''''
return np.pad(array = mat, pad_width = pad, mode = 'wrap')[...,pad:pad+channel_no]
# width, height = mat.shape[:2]
# padded_width = 512 + pad #pixels
# padded_mat = np.pad(array = mat, pad_width = pad, mode = 'wrap')
# return padded_mat[...,2:5]
# @njit
def pad_texture(txt, pad):
'''large pad allows knots to be recorded right.
consider pad = int(512/2), edge_tolerance = int(512/4)'''
width, height = txt.shape[:2]
# padded_width = 512 + pad #pixels
padded_txta = np.pad(array = txt[...,0], pad_width = pad, mode = 'wrap')
padded_txtb = np.pad(array = txt[...,1], pad_width = pad, mode = 'wrap')
padded_txtc = np.pad(array = txt[...,2], pad_width = pad, mode = 'wrap')
# dpadded_txt_dt = np.pad(array = dtexture_dt[...,0], pad_width = pad, mode = 'wrap')
return np.array([padded_txta,padded_txtb,padded_txtc]).T
def map_pbc_tips_back(tips, pad, width, height, edge_tolerance, atol = 1e-11):
'''width and height are from the shape of the unpadded buffer.
TODO: get intersection to be njit compiled, then njit map_pbc_tips_back,
for which I'll need to return to using numba.typed.List() instead of [].'''
atol_squared = atol**2
min_dist_squared_init = width**2
s_tips, x_tips, y_tips = tips
s1_mapped_lst = []; s2_mapped_lst = [];
x_mapped_lst = []; y_mapped_lst = [];
# s1_mapped_lst = List(); s2_mapped_lst = List();
# x_mapped_lst = List(); y_mapped_lst = List();
for n, x in enumerate(x_tips):
y = y_tips[n]; s = s_tips[n]
S1, S2 = s_tips[n]
y = y_tips[n]
for X, Y in zip(x, y):
X = unpad(X=X, pad=pad, width=width , rejection_distance=edge_tolerance)
if not (X == -9999):
Y = unpad(X=Y, pad=pad, width=height, rejection_distance=edge_tolerance)
if not (Y == -9999):
# find the index and distance to the nearest tip already on the mapped_lsts
min_dist_squared = min_dist_squared_init; min_index = -1
for j0, (x0,y0) in enumerate(zip(x_mapped_lst,y_mapped_lst)):
# compute the distance between x0,y0 and X,Y
dist_squared = (X-x0)**2+(Y-y0)**2
# if ^that distance is the smallest, update min_dist with it
if dist_squared < min_dist_squared:
min_dist_squared = dist_squared
min_index = j0
#if this new tip is sufficiently far from all other recorded tips,
if min_dist_squared >= atol:
# then append the entry to all four lists
x_mapped_lst.append(X)
y_mapped_lst.append(Y)
lst_S1 = []#List()
lst_S1.append(S1)
lst_S2 = []#List()
lst_S2.append(S2)
s1_mapped_lst.append(lst_S1)
s2_mapped_lst.append(lst_S2)
else:
#just append to the previous entry in the s1 and s2 lists if the contour isn't already there
s1_mapped_lst[min_index].append(S1)
s2_mapped_lst[min_index].append(S2)
return s1_mapped_lst, s2_mapped_lst, x_mapped_lst, y_mapped_lst
#########################################################################
# Interpolating Electrophysiological state values to spiral tip locations
#########################################################################
def get_state_nearest(x, y, txt):
'''nearest local texture values, ignore any index errors and/or periodic boundary conditions'''
xint = np.round(x).astype(dtype=int)
yint = np.round(y).astype(dtype=int)
try:
state_nearest = list(txt[xint,yint])
except IndexError:
state_nearest = nanstate
return state_nearest
#for get_state_interpolated
import sys
if not sys.warnoptions:
import warnings
warnings.simplefilter("ignore", category=RuntimeWarning, lineno=0, append=False)
#TODO: restrict ^this warning filter to onlyt get_state_interpolated
def get_state_interpolated(x, y, txt, nanstate, xcoord_mesh, ycoord_mesh,
channel_no = 3, rad = 0.5, kind='linear'):
'''linear interpolation of local texture values to subpixel precision
using 2D linear interpolation with scipy.interpolate.interp2d.
channel_no must be len(nanstate).
for channel_no = 3, use nanstate = [np.nan,np.nan,np.nan].
rad = the pixel radius considered in interpolation.
kind can be "linear" or "cubic".
if kind="cubic", then set rad = 3.5.'''
state_interpolated = nanstate #.copy() if you change nanstate to a numpy array
try:
xlo = np.round(x-rad).astype(dtype=int)
ylo = np.round(y-rad).astype(dtype=int)
xhi = np.round(x+rad).astype(dtype=int)
yhi = np.round(y+rad).astype(dtype=int)
yloc = ycoord_mesh[ylo:yhi+1,xlo:xhi+1].flatten().copy()
xloc = xcoord_mesh[ylo:yhi+1,xlo:xhi+1].flatten().copy()
local_values = txt[ylo:yhi+1,xlo:xhi+1]
interp_foo = lambda x,y,zloc: interp2d(yloc,xloc,zloc,kind=kind)(y,x)
for c in range(channel_no):
zloc = local_values[...,c].flatten().copy()
state_interpolated[c] = float(interp_foo(x,y,zloc))
except IndexError:
pass
except RuntimeWarning:
pass
return state_interpolated
# ###############
# # Example Usage
# ###############
# #Caution! : check whether spiral tips are recorded as 'x': x coordinate or 'x': y coordinate
# #precompute the following the __padded__ coordinates
# xcoord_mesh, ycoord_mesh = np.meshgrid(np.arange(0,200),np.arange(0,200))
# x = 169.75099760896785
# y = 68.05364536542943
# nanstate = [np.nan,np.nan,np.nan]
# txt = np.stack([texture,texture,texture]).T
# print(
# get_state_nearest(x,y,txt)
# )
# print (
# get_state_interpolated(x, y, txt.astype('float32'), nanstate, xcoord_mesh, ycoord_mesh,
# channel_no = 3, rad = 3.5, kind='cubic')
# )
# print (
# get_state_interpolated(x, y, txt.astype('float32'), nanstate, xcoord_mesh, ycoord_mesh,
# channel_no = 3, rad = 0.5, kind='linear')
# )
##############################################
## Get Electrophysiological (EP) State Data #
##############################################
def get_states(x_values, y_values, txt, pad,
nanstate, xcoord_mesh, ycoord_mesh, channel_no = 3):
'''iterates through x_locations and y_locations contained in tips_mapped and returns the electrophysiological states'''
# tips_mapped gives tip locations using the correct image pixel coordinates, here.
# padded_txt = txt
padded_txt = pad_matrix(txt, pad)
n_lst, x_lst, y_lst = tips_mapped
y_locations = np.array(flatten(x_lst))+pad#np.array(tips_mapped[2])
x_locations = np.array(flatten(y_lst))+pad#np.array(tips_mapped[3])
states_nearest = []; states_interpolated_linear = []; states_interpolated_cubic = [];
for x,y in zip(x_locations,y_locations):
state_nearest = get_state_nearest(x,y,txt=padded_txt)
state_interpolated_linear = get_state_interpolated(x, y, padded_txt, nanstate, xcoord_mesh, ycoord_mesh,
channel_no = channel_no, rad = 0.5, kind='linear')
state_interpolated_cubic = get_state_interpolated(x, y, padded_txt, nanstate, xcoord_mesh, ycoord_mesh,
channel_no = channel_no, rad = 3.5, kind='cubic')
states_nearest.append(state_nearest)
states_interpolated_linear.append(state_interpolated_linear)
states_interpolated_cubic.append(state_interpolated_cubic)
return states_nearest, states_interpolated_linear, states_interpolated_cubic
def add_states(tips_mapped, states_EP):
tips_mapped = list(tips_mapped)
tips_mapped.extend(states_EP)
return tuple(tips_mapped)
def unwrap_EP(df,
EP_col_name = 'states_interpolated_linear',
drop_original_column=False):
'''If this function is slow, it may be a result of df[EP_col_name] containing strings.'''
EP_col_exists = EP_col_name in df.columns.values
if not EP_col_exists:
print(f"Caution! EP_col_name '{EP_col_exists}' does not exist. Returning input df.")
return df
else:
V_lst = []
f_lst = []
s_lst = []
for index, row in df.iterrows():
try:
V,f,s = row[EP_col_name]
except Exception as e:
V,f,s = eval(row[EP_col_name])
V_lst.append(V)
f_lst.append(f)
s_lst.append(s)
df['V'] = V_lst
df['f'] = f_lst
df['s'] = s_lst
df.drop(columns=[EP_col_name], inplace=True)
return df
@njit
def get_grad_direction(texture):
'''get the gradient direction field, N
out_Nx, out_Ny = get_grad_direction(texture)
'''
height, width = texture.shape
out_Nx = np.zeros_like(texture, dtype=np.float64)
out_Ny = np.zeros_like(texture, dtype=np.float64)
DX = 1/0.025; DY = 1/0.025;
for y in range(height):
for x in range(width):
up = _pbc(texture,y+1,x,height,width)
down = _pbc(texture,y-1,x,height,width)
left = _pbc(texture,y,x-1,height,width)
right = _pbc(texture,y,x+1,height,width)
Nx = (right-left)/DX
Ny = (up-down)/DY
norm = np.sqrt( Nx**2 + Ny**2 )
if norm == 0:
out_Nx[y,x] = -10.
out_Ny[y,x] = -10.
else:
out_Nx[y,x] = Nx/norm
out_Ny[y,x] = Ny/norm
return out_Nx, out_Ny
# ################################
# deprecated
# ################################
#deprecated - needs parameters
# def get_contours(img_nxt,img_inc):
# contours_raw = measure.find_contours(img_nxt, level=0.5,fully_connected='low',positive_orientation='low')
# contours_inc = measure.find_contours(img_inc, level=0.9,fully_connected='low',positive_orientation='low')
# return contours_raw,contours_inc
#tip locating for stable parameters
# img_inc = (img_nxt * ifilter(dtexture_dt[..., 0]))**2 #mask of instantaneously increasing voltages
# img_inc = filters.gaussian(img_inc,sigma=2., mode='wrap')
# contours_raw = measure.find_contours(img_nxt, level=0.5,fully_connected='low',positive_orientation='low')
# contours_inc = measure.find_contours(img_inc, level=0.0005)#,fully_connected='low',positive_orientation='low')
# @jit
# def get_contours(img_nxt,img_inc):
# contours_raw = measure.find_contours(img_nxt, level=0.5,fully_connected='low',positive_orientation='low')
# contours_inc = measure.find_contours(img_inc, level=0.0005)#,fully_connected='low',positive_orientation='low')
# return contours_raw, contours_inc
# # @njit
# def get_tips(contours_raw, contours_inc):
# '''returns tips with indices of parent contours'''
# n_list = []; x_lst = []; y_lst = []
# for n1, c1 in enumerate(contours_raw):
# for n2, c2 in enumerate(contours_inc):
# x1, y1 = (c1[:, 0], c1[:, 1])
# x2, y2 = (c2[:, 0], c2[:, 1])
# x, y = intersection(x1, y1, x2, y2)
# if len(x)>0:
# s = (n1,n2)
# x = list(x)
# y = list(y)
# n_list.append(s)
# x_lst.append(x)
# y_lst.append(y)
# return n_list, x_lst, y_lst
# def get_tips(contours_raw, contours_inc):
# '''returns tips with indices of parent contours'''
# n_list = []; x_lst = []; y_lst = []
# for n1, c1 in enumerate(contours_raw):
# for n2, c2 in enumerate(contours_inc):
# x1, y1 = (c1[:, 0], c1[:, 1])
# x2, y2 = (c2[:, 0], c2[:, 1])
# # tmp = intersection(x1, y1, x2, y2)
# x, y = intersection(x1, y1, x2, y2)
# # if a tip has been detected, save it and its contour ids
# if len(x)>0:
# s = (n1,n2)
# x = list(x)
# # x.sort()
# y = list(y)
# # y.sort()
# # tmp = (s,x,y)
# # tips.append(tmp)
# n_list.append(s)
# x_lst.append(x)
# y_lst.append(y)
# return n_list, x_lst, y_lst
# def get_states(tips_mapped, txt, pad,
# nanstate, xcoord_mesh, ycoord_mesh, channel_no = 3):
# '''iterates through x_locations and y_locations contained in tips_mapped and returns the electrophysiological states'''
# # tips_mapped gives tip locations using the correct image pixel coordinates, here.
# padded_txt = pad_matrix(txt, pad)
# y_locations = np.array(tips_mapped[2]) + pad
# x_locations = np.array(tips_mapped[3]) + pad
#
# states_nearest = states_interpolated_linear = states_interpolated_cubic = [];
# for x,y in zip(x_locations,y_locations):
# state_nearest = get_state_nearest(x,y,txt=padded_txt)
# state_interpolated_linear = get_state_interpolated(x, y, padded_txt, nanstate, xcoord_mesh, ycoord_mesh,
# channel_no = channel_no, rad = 0.5, kind='linear')
# state_interpolated_cubic = get_state_interpolated(x, y, padded_txt, nanstate, xcoord_mesh, ycoord_mesh,
# channel_no = channel_no, rad = 3.5, kind='cubic')
# states_nearest.append(state_nearest)
# states_interpolated_linear.append(state_interpolated_linear)
# states_interpolated_cubic.append(state_interpolated_cubic)
# return states_nearest, states_interpolated_linear, states_interpolated_cubic
|
23,645 | d4afe2326da55cf31939b560ee14e9ad79ab2976 | # -*- coding: utf-8 -*-
#
# Copyright 2017 Open Targets
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Docker container wrapper for Luigi.
Enables running a docker container as a task in luigi.
This wrapper uses the Docker Python SDK to communicate directly with the
Docker API avoiding the common pattern to invoke the docker client
from the command line. Using the SDK it is possible to detect and properly
handle errors occurring when pulling, starting or running the containers.
On top of this, it is possible to mount a single file in the container
and a temporary directory is created on the host and mounted allowing
the handling of files bigger than the container limit.
Requires:
- docker: ``pip install docker``
Written and maintained by Andrea Pierleoni (@apierleoni).
Contributions by Eliseo Papa (@elipapa).
"""
from tempfile import mkdtemp
import logging
import luigi
from luigi.local_target import LocalFileSystem
from luigi import six
logger = logging.getLogger('luigi-interface')
try:
import docker
from docker.errors import ContainerError, ImageNotFound, APIError
except ImportError:
logger.warning('docker is not installed. DockerTask requires docker.')
docker = None
# TODO: may need to implement this logic for remote hosts
# class dockerconfig(luigi.Config):
# '''
# this class allows to use the luigi.cfg file to specify the path to the docker config.json.
# The docker client should look by default in the main directory,
# but on different systems this may need to be specified.
# '''
# docker_config_path = luigi.Parameter(
# default="~/.docker/config.json",
# description="Path to dockercfg file for authentication")
class DockerTask(luigi.Task):
@property
def image(self):
return 'alpine'
@property
def command(self):
return "echo hello world"
@property
def name(self):
return None
@property
def container_options(self):
return {}
@property
def environment(self):
return {}
@property
def container_tmp_dir(self):
return '/tmp/luigi'
@property
def binds(self):
'''
Override this to mount local volumes, in addition to the /tmp/luigi
which gets defined by default. This should return a list of strings.
e.g. ['/hostpath1:/containerpath1', '/hostpath2:/containerpath2']
'''
return None
@property
def network_mode(self):
return ''
@property
def docker_url(self):
return None
@property
def auto_remove(self):
return True
@property
def force_pull(self):
return False
@property
def mount_tmp(self):
return True
def __init__(self, *args, **kwargs):
'''
When a new instance of the DockerTask class gets created:
- call the parent class __init__ method
- start the logger
- init an instance of the docker client
- create a tmp dir
- add the temp dir to the volume binds specified in the task
'''
super(DockerTask, self).__init__(*args, **kwargs)
self.__logger = logger
'''init docker client
using the low level API as the higher level API does not allow to mount single
files as volumes
'''
self._client = docker.APIClient(self.docker_url)
# add latest tag if nothing else is specified by task
if ':' not in self.image:
self._image = ':'.join([self.image, 'latest'])
else:
self._image = self.image
if self.mount_tmp:
# create a tmp_dir, NOTE: /tmp needs to be specified for it to work on
# macOS, despite what the python documentation says
self._host_tmp_dir = mkdtemp(suffix=self.task_id,
prefix='luigi-docker-tmp-dir-',
dir='/tmp')
self._binds = ['{0}:{1}'.format(self._host_tmp_dir, self.container_tmp_dir)]
else:
self._binds = []
# update environment property with the (internal) location of tmp_dir
self.environment['LUIGI_TMP_DIR'] = self.container_tmp_dir
# add additional volume binds specified by the user to the tmp_Dir bind
if isinstance(self.binds, six.string_types):
self._binds.append(self.binds)
elif isinstance(self.binds, list):
self._binds.extend(self.binds)
# derive volumes (ie. list of container destination paths) from
# specified binds
self._volumes = [b.split(':')[1] for b in self._binds]
def run(self):
# get image if missing
if self.force_pull or len(self._client.images(name=self._image)) == 0:
logger.info('Pulling docker image ' + self._image)
try:
for logline in self._client.pull(self._image, stream=True):
logger.debug(logline.decode('utf-8'))
except APIError as e:
self.__logger.warning("Error in Docker API: " + e.explanation)
raise
# remove clashing container if a container with the same name exists
if self.auto_remove and self.name:
try:
self._client.remove_container(self.name,
force=True)
except APIError as e:
self.__logger.warning("Ignored error in Docker API: " + e.explanation)
# run the container
try:
logger.debug('Creating image: %s command: %s volumes: %s'
% (self._image, self.command, self._binds))
host_config = self._client.create_host_config(binds=self._binds,
network_mode=self.network_mode)
container = self._client.create_container(self._image,
command=self.command,
name=self.name,
environment=self.environment,
volumes=self._volumes,
host_config=host_config,
**self.container_options)
self._client.start(container['Id'])
exit_status = self._client.wait(container['Id'])
if exit_status != 0:
stdout = False
stderr = True
error = self._client.logs(container['Id'],
stdout=stdout,
stderr=stderr)
if self.auto_remove:
try:
self._client.remove_container(container['Id'])
except docker.errors.APIError:
self.__logger.warning("Container " + container['Id'] +
" could not be removed")
if exit_status != 0:
raise ContainerError(container, exit_status, self.command, self._image, error)
except ContainerError as e:
# catch non zero exti status and return it
container_name = ''
if self.name:
container_name = self.name
try:
message = e.message
except AttributeError:
message = str(e)
self.__logger.error("Container " + container_name +
" exited with non zero code: " + message)
raise
except ImageNotFound as e:
self.__logger.error("Image " + self._image + " not found")
raise
except APIError as e:
self.__logger.error("Error in Docker API: "+e.explanation)
raise
# delete temp dir
filesys = LocalFileSystem()
if self.mount_tmp and filesys.exists(self._host_tmp_dir):
filesys.remove(self._host_tmp_dir, recursive=True)
|
23,646 | 4642a87e28d1439efae4a504dac7b7006d7c93d0 | import pytest
from ..word_representation_of_a_number import word_rep_of_number
@pytest.mark.parametrize('number, expected', [
(12, 'twelve'),
(201, 'two hundred and one'),
(3, 'three'),
(600, 'six hundred '),
(601, 'six hundred and one'),
(310, 'three hundred and ten'),
(10, 'ten')
])
def test_get_sexy_pairs(number, expected):
actual = word_rep_of_number(number)
assert actual == expected
|
23,647 | 07d225bfda4ef8a900cda70ee29d4e2563ef53c4 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.24 on 2020-01-13 17:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('engine', '0017_auto_20191101_1002'),
]
operations = [
migrations.AddField(
model_name='scheduleservice',
name='multi_callback_enabled',
field=models.BooleanField(default=False, verbose_name='是否支持多次回调'),
),
]
|
23,648 | cc96c7bd9ee6dbbedbe94f4d33e7c60abccd5ebb | Estatura = int (input ("ingrese su Estatura en centimetros: "))
if Estatura <= 150:
print ("Eres Bajito")
if Estatura >= 151 and Estatura<= 170:
print ("Eres una persona con una estatura promedio")
if Estatura >= 171:
print ("Sos Alto") |
23,649 | 426a5e6e77220662956e97242a5042c36aa3a9ba | import sys, os, time
import json
import codecs
import threading
from PyQt5.QtWidgets import QWidget, QApplication, QMainWindow, QApplication, QAbstractItemView, \
QFileDialog, QTableWidgetItem, QMessageBox
from PyQt5.QtGui import QPainter, QColor, QFont, QBrush, QSyntaxHighlighter, QTextCharFormat
from PyQt5.QtCore import Qt, QRegExp, QFile
from PyQt5 import QtGui, QtCore, uic
from PyQt5.QtWidgets import qApp
from utils import Instruction, Interpret
import re
UI_MainWindow, QtBaseClass = uic.loadUiType("x0Compiler.ui")
qssFile = QFile("style.qss")
qssFile.open(QFile.ReadOnly)
themeQss = qssFile.readAll()
themeQss = str(themeQss, encoding="utf8")
'''
delete temporary files after a build work finished
'''
def cleanfiles():
from os.path import isfile
if isfile(".\\~.tmp"):
os.remove(".\\~.tmp")
if isfile(os.getcwd()+"\\ferr.json"):
os.remove(os.getcwd()+"\\ferr.json")
if isfile(os.getcwd()+"\\fcode.json"):
os.remove(os.getcwd()+"\\fcode.json")
'''
This function is the real processing for backstage-interpretation
and it should work in a new thread so that I/O cannot block the UI
'''
def procRun(codeList, window):
window.interpret = Interpret(codeList)
run = window.interpret
mod = window.mod
window.setStackValues(run.showStack())
while True:
# tip the current processing TAC code
window.setCodeStatus(run.p, True)
tag = run.sg_step()
window.setStackValues(run.showStack())
if tag == 1: #input
window.input()
if tag == 2: #output
window.output(run.recv())
if window.stopsgl == 1:
window.setCodeStatus(run.c, False)
break
if run.judge() == False:
break
if mod == 1:
while window.debug == 0:
time.sleep(0.05)
window.setCodeStatus(run.c, False)
if window.debug == 1: # next step
pass
if window.debug == 2: # step into
pass
if window.debug == 3: # over step
mod = 0
window.setDebugEnabled(False)
if window.debug == 4: # step out
run.paramInit()
window.RuntimePad.clear()
window.RuntimePad.textCursor().insertText("")
window.debug = 0
continue
window.debug = 0
# cancel the tip for TAC code just processing
window.setCodeStatus(run.c, False)
window.setDebugEnabled(False)
window.actionStop.setEnabled(False)
window.output("\n=== the processing is over ===")
'''
extending the QSyntaxHighlighter class for highlight the x0 texts
'''
class x0Highlighter(QSyntaxHighlighter):
Rules = []
Formats = {}
def __init__(self, parent=None):
super(x0Highlighter, self).__init__(parent)
self.initializeFormats()
BUILDINS = ["and", "not", "int", "char", "bool", "true", "false"]
OPERATORS = ["\+", "-", "\*", "/", "%", "&", "\|", "~", "\^", "\!",
"<", ">", "=", "\.","+="]
KEYWORDS = ["read", "if", "else",
"for", "do", "while", "repeat", "until",
"write", "return", "break", "continue",
"main", "switch", "case"]
FUNCTIONS = ["procedure", "call"]
CONSTANTS = ["False", "True"]
x0Highlighter.Rules.append((QRegExp(
"|".join([r"\b%s\b" % keyword for keyword in KEYWORDS])),
"keyword"))
x0Highlighter.Rules.append((QRegExp(
"|".join([r"\b%s\b" % buildin for buildin in BUILDINS])),
"buildin"))
x0Highlighter.Rules.append((QRegExp(
"|".join([r"%s" % operator for operator in OPERATORS])),
"operator"))
x0Highlighter.Rules.append((QRegExp(
"|".join([r"%s" % function for function in FUNCTIONS])),
"function"))
x0Highlighter.Rules.append((QRegExp(
r"\b[+-]?[0-9]+[lL]?\b"),
"number"))
x0Highlighter.Rules.append((QRegExp(
r"(/\*(.|\n)*\*/)|(\/\/.*/n)"),
"comment"))
x0Highlighter.Rules.append((QRegExp(
r"\".*\"|'.*'"),
"string"))
x0Highlighter.Rules.append((QRegExp(
r"procedure.*)\("),
"funcName"))
@staticmethod
def initializeFormats():
baseFormat = QTextCharFormat()
baseFormat.setFontFamily("Consolas")
baseFormat.setFontPointSize(12)
for name, fcolor, bcolor in (
("operator", QColor(103,166,228), None),
("keyword", QColor(249,35,112), None),
("buildin", QColor(104,216,235), None),
("normal", Qt.white, None),
("string", QColor(231,219,116), None),
("function", QColor(245,150,32), None),
("funcName", QColor(166,226,44), None),
("number", QColor(167,128,255), None),
("comment", QColor(90,88,85), None)):
format = QTextCharFormat(baseFormat)
format.setForeground(QColor(fcolor))
if bcolor is not None:
format.setBackground(QColor(bcolor))
if name in ("buildin"):
format.setFontWeight(QFont.Bold)
if name == "comment":
format.setFontItalic(True)
x0Highlighter.Formats[name] = format
def highlightBlock(self, text):
NORMAL, TRIPLESINGLE, TRIPLEDOUBLE = range(3)
textLength = len(text)
prevState = self.previousBlockState()
self.setFormat(0, textLength, x0Highlighter.Formats["normal"])
for regex, format in x0Highlighter.Rules:#素数测试
i = regex.indexIn(text)
while i >= 0:
length = regex.matchedLength()
self.setFormat(i, length, x0Highlighter.Formats[format])
i = regex.indexIn(text, i + length)
if not text:
pass
else:
stack, pre = [], None
for i, c in enumerate(text):
if c == "/" and pre == c:
self.setFormat(i-1, len(text)-i+1, x0Highlighter.Formats["comment"])
break
pre = c
self.setCurrentBlockState(NORMAL)
def rehighlight(self):
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
QSyntaxHighlighter.rehighlight(self)
QApplication.restoreOverrideCursor()
class x0Compiler(QMainWindow, UI_MainWindow):
inputWrite = False
def __init__(self):
QMainWindow.__init__(self)
UI_MainWindow.__init__(self)
self.setupUi(self)
self.runDlg = None
self.highlighter = x0Highlighter(self.codeTextEdit.document())
self.initUI()
def onclick(self):
self.inputWrite = True
text = self.inputEdit.toPlainText()
f = open("input.txt","w")
f.write(text)
f.close()
self.inputEdit.clear()
print("click")
def errTbInit(self):
'''
This function is used to initialize the errMsgTable
'''
self.errorMsgTable.clear()
self.errorMsgTable.setColumnCount(3)
self.errorMsgTable.setRowCount(1)
self.errorMsgTable.setHorizontalHeaderLabels(['errno', 'line', 'message'])
self.errorMsgTable.verticalHeader().setVisible(False)
self.errorMsgTable.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.errorMsgTable.setSelectionBehavior(QAbstractItemView.SelectRows)
self.errorMsgTable.setColumnWidth(0, 70)
self.errorMsgTable.setColumnWidth(2, 595)
for idx in range(self.errorMsgTable.columnCount()):
headItem = self.errorMsgTable.horizontalHeaderItem(idx)
headItem.setForeground(QColor(0, 0, 0))
def fileInit(self):
self.filetag = False
self.filepath = os.getcwd()
self.filename = ""
self.workPathLabel.setText("")
cleanfiles()
def initUI(self):
self.fileInit()
self.errTbInit()
#self.scroll = QScrollArea()
#self.scroll.setWidgrt(self.)
self.actionNew.triggered.connect(self.newFile)
self.actionOpen.triggered.connect(self.openFile)
self.actionSave.triggered.connect(self.saveFile)
self.actionBuildAndRun.triggered.connect(self.BuildAndRun)
self.actionDebug.triggered.connect(self.DebugMod)
self.linelabel.setText("")
lines=""
for num in range(1,23):
lines=lines+str(num)
if num < 22:
lines=lines+'\n'
num=num+1
self.linelabel.setText(lines)
self.linelabel.setFixedWidth(30)
font = QFont("Consolas",11,QFont.Normal)
QFont.setLetterSpacing(font,QFont.AbsoluteSpacing,0.5)
self.linelabel.setFont(font)
self.outputLabel.setFont(font)
self.tableWidget.setFont(font)
self.label.setFont(font)
self.codeTextEdit.setFont(font)
self.label.setFixedWidth(280)
self.label.setText(" pcode:\n")
self.label.setAlignment(Qt.AlignTop)
#设置表格属性:
self.tableWidget.setObjectName("Variable Watches")
self.tableWidget.setColumnCount(6)
for i in range(6):
self.tableWidget.setColumnWidth(i,60)
self.infoTabs.tabBar().setAutoFillBackground(True)
#添加表头:
list = ['idx','name','value','level','addr','size']
for i in range(6):
item = QTableWidgetItem(list[i])
item.setBackground(QColor(13,13,13))
self.tableWidget.setHorizontalHeaderItem(i, item)
self.tableWidget.horizontalHeader().setStyleSheet("QHeaderView::section{background-color: #282923;}")
self.tableWidget.horizontalHeader().setDefaultAlignment(QtCore.Qt.AlignCenter)
self.commitButton.clicked.connect(self.onclick)
self.show()
def setBuildEnabled(self, ok):
self.actionBuildAndRun.setEnabled(ok)
self.actionDebug.setEnabled(ok)
def startBuild(self):
'''
Preparation for build&run or debug a processing
'''
# clear output label and table contents
self.label.setText("")
self.outputLabel.setText("")
self.tableWidget.clear()
self.tableWidget.setRowCount(0);
#添加表头:
list = ['idx','name','value','level','addr','size']
for i in range(6):
item = QTableWidgetItem(list[i])
item.setBackground(QColor(13,13,13))
self.tableWidget.setHorizontalHeaderItem(i, item)
self.tableWidget.horizontalHeader().setStyleSheet("QHeaderView::section{background-color: #282923;}")
self.tableWidget.horizontalHeader().setDefaultAlignment(QtCore.Qt.AlignCenter)
text = self.codeTextEdit.toPlainText()
text.encode('utf-8')
if text == "":
text = u" "
# If the current working codefile is existed, use it directly
curfile = self.filepath+'\\'
if self.filetag == True:
curfile = curfile+self.filename
# If the current working codefile is new, used a temporary file
else:
curfile = curfile+"~.tmp"
codecs.open(curfile, 'w', 'utf-8').write(text)
#os.system(os.getcwd()+"\\x0cpl.exe "+curfile)
#os.system('./build.sh test0_simp.txt')
import subprocess
'''p = subprocess.Popen(['./demo','test0_simp.txt'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
print(out)
print(out)
#output=commands.getstatusoutput('./demo test0_simp.txt')
self.label.setText(out)'''
file_output = open("debug.txt","w")
if self.inputWrite == False :
subprocess.Popen(["./demo",'testfile/'+self.filename],stdout = file_output).wait() ######输出重定向,不卡住界面也不会因输出而在生成exe时有问题,如果不需要等待子进程结束,去掉.wait即可
elif self.inputWrite == True :
file_input = open("input.txt")
openfile = "./demo testfile/%s"%self.filename
print(self.filename)
subprocess.Popen([openfile], stdin = file_input, stdout = file_output, shell = True).wait()
file_input.close()
self.inputWrite = False
file_output.close()
f = open("debug.txt","r")
lines = f.readlines()
flag = 0
for line in lines:
if flag == 0 and line == "pcode:\n":
flag = 2
line = " " + line
elif flag == 2 and re.search('Start',line)!=None:
flag = 3
elif line == "table:\n" and flag == 3:
flag = 4
elif flag == 4:
flag = 1
if flag == 2:
pcodeText = self.label.text()
pcodeText = '%s%s'%(pcodeText,line)
#print(str)
self.label.setText(pcodeText)
elif flag == 1 and line.split != "":
dataList = line.split()
#插入一行
rowCount = self.tableWidget.rowCount()
self.tableWidget.insertRow(rowCount)
self.tableWidget.setRowHeight(rowCount, 22)
for i in range(0, 5):
item = QTableWidgetItem(str(dataList[i]))
item.setTextAlignment(Qt.AlignCenter)
self.tableWidget.setItem(rowCount, i, item)
text = ""
for i in range(5, len(dataList)):
if text != "" :
text = text + ' '
text = text + str(dataList[i])
item = QTableWidgetItem(str(text))
item.setTextAlignment(Qt.AlignCenter)
self.tableWidget.setItem(rowCount, 5, item)
elif flag == 3:
outputText = self.outputLabel.text()
outputText = '%s%s'%(outputText,line)
self.outputLabel.setText(outputText)
f.close()
def runOver(self):
self.setBuildEnabled(True)
def errTbBuild(self):
'''
This function is to get error messages and fill the errorMsgTable
return: errNum
'''
errData = codecs.open(os.getcwd()+"\\ferr.json", 'r', 'utf-8').read()
errData = json.loads(errData)
idx = 0
self.errTbInit()
self.errorMsgTable.setItem(idx, 2, QTableWidgetItem(errData[u"total"]))
for err in errData[u'errors']:
self.errorMsgTable.insertRow(idx)
self.errorMsgTable.setItem(idx, 0, QTableWidgetItem(err[u'typeno']))
self.errorMsgTable.setItem(idx, 1, QTableWidgetItem(err[u'line']))
self.errorMsgTable.setItem(idx, 2, QTableWidgetItem(err[u'message']))
idx += 1
return errData[u'errNum']
def BuildAndRun(self):
self.startBuild()
def DebugMod(self):
self.startBuild()
if judge() == True:
errNum = self.errTbBuild()
if errNum == 0:
self.runDlg = RuntimeWin(1, self)
self.IOthread = threading.Thread(target=procRun,\
args=(self.runDlg.codeList, self.runDlg))
#self.IOthread.setDaemon('True')
self.IOthread.start()
else:
QMessageBox.critical(self, "Critical", self.tr("Compiler processing error"))
def newFile(self):
self.fileInit()
self.codeTextEdit.setPlainText("")
def openFile(self):
dirO = QFileDialog.getOpenFileName(self, "Open Exist File", self.filepath + '/testfile', \
"Text Files (*.txt)")
dirO = dirO[0]
if dirO != "":
print(dirO)
from os.path import isfile
if isfile(dirO):
text = codecs.open(dirO, 'r', 'utf-8').read()
lineNum = text.count("\n")
lines = ""
for i in range(1,lineNum + 3):
lines = lines + str(i) + '\n'
self.linelabel.setText(lines)
self.codeTextEdit.setPlainText(text)
#dirO = str(dirO.toUtf8(), 'utf-8', 'ignore')
self.filepath, self.filename = os.path.split(dirO)
self.filetag = True
self.workPathLabel.setText(self.filepath)
def saveFile(self):
text = self.codeTextEdit.toPlainText()
#text = str(text.toUtf8, 'utf-8', 'ignore')
print("save")
if self.filetag == True:
print(self.filepath+'\\'+self.filename)
codecs.open(self.filepath+'\\'+self.filename, 'w', 'utf-8').write(text)
else:
dirS = QFileDialog.getSaveFileName(self, "Save File", self.filepath + '/testfile', \
"x0 Files (*.x0);;Text Files (*.txt)")
print(dirS)
if dirS != "":
print(dirS)
codecs.open(dirS, 'w', 'utf-8').write(text)
dirS = unicode(dirS.toUtf8(), 'utf-8', 'ignore')
self.filepath, self.filename = os.path.split(dirS)
self.filetag = True
self.workPathLabel.setText(self.filepath)
def closeEvent(self, event):
if self.runDlg:
self.runDlg.close()
cleanfiles()
if __name__ == "__main__":
app = QApplication(sys.argv)
qApp.setStyleSheet(themeQss)
window = x0Compiler()
sys.exit(app.exec_()) |
23,650 | b6bd4de44b8a883499d4e733cd84aad43fd5f3bb | import os
import sumstats.api_v1.snp.loader as loader
from sumstats.api_v1.snp.search.access.service import Service
from tests.prep_tests import *
from sumstats.api_v1.errors.error_classes import *
import pytest
class TestUnitSearcher(object):
h5file = ".testfile.h5"
f = None
def setup_method(self, method):
load = prepare_load_object_with_study(self.h5file, 'PM001', loader)
load.load()
load = prepare_load_object_with_study(self.h5file, 'PM002', loader)
load.load()
load = prepare_load_object_with_study(self.h5file, 'PM003', loader)
load.load()
self.start = 0
self.size = 20
self.query = Service(self.h5file)
self.studies = ['PM001', 'PM002', 'PM003']
self.existing_snp = 'rs7085086'
self.non_existing_snp = 'rs1234567'
def teardown_method(self, method):
os.remove(self.h5file)
def test_query_for_snp(self):
self.query.query(snp=self.existing_snp, start=self.start, size=self.size)
datasets = self.query.get_result()
assert isinstance(datasets, dict)
for dset_name in datasets:
assert len(datasets[dset_name]) == 3
assert len(set(datasets[CHR_DSET])) == 1
assert set(datasets[CHR_DSET]).pop() == 2
def test_query_for_non_existing_snp_raises_error(self):
with pytest.raises(NotFoundError):
self.query.query(snp=self.non_existing_snp, start=self.start, size=self.size)
def test_get_snp_size_raises_error_for_not_existing_snp(self):
with pytest.raises(NotFoundError):
self.query.get_snp_size(self.non_existing_snp)
def test_get_snp_size(self):
snp_size = self.query.get_snp_size(self.existing_snp)
assert snp_size == len(self.studies) |
23,651 | e193976db63d6c16953332a33a547fc096d4f2bc | # -*- coding: utf-8 -*-
"""
# Task 1
"""
# Commented out IPython magic to ensure Python compatibility.
#import all libraries we will use for task 1
# %pylab inline --no-import-all
import numpy as np
import matplotlib.pyplot as plt
from keras.models import load_model
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import cross_val_score
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Convolution2D, MaxPooling2D
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.keras.optimizers import Adam
from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint
from sklearn.metrics import accuracy_score
#import the dataset
with np.load('training-dataset.npz') as data:
img = data['x']
lbl = data['y']
del data
print(img.shape)
print(type(img))
print(lbl.shape)
#split datasets to training, validation and testing sets
X_train, X_val_test, y_train, y_val_test = train_test_split(img, lbl, test_size = 0.3, random_state = 666)
X_val, X_test, y_val, y_test = train_test_split(X_val_test, y_val_test, test_size = 0.5, random_state = 666)
print(y_train.shape)
print(y_val.shape)
print(y_test.shape)
#normalization, image pixel is between 0-255, according to the formula, we divide by 255 and then reshape
X_train = X_train/255
X_val = X_val/255
X_test = X_test/255
x_train = X_train.reshape((-1, 28, 28, 1))
x_val = X_val.reshape((-1, 28, 28, 1))
x_test = X_test.reshape((-1, 28, 28, 1))
print(x_train.shape)
#dummy coding labels
onehot = LabelBinarizer()
d_y_train = onehot.fit_transform(y_train)
d_y_val = onehot.transform(y_val)
d_y_test = onehot.transform(y_test)
print(d_y_train.shape)
"""## Model 1: KNN """
#tune hyperparameter K using 3-fold cross validation
k_range = range(3, 8)
cv_scores = []
for k in k_range:
knn = KNeighborsClassifier(n_neighbors = k, n_jobs = -1)
scores = cross_val_score(knn, X_train, d_y_train, cv = 3) #3 folds
cv_score = np.mean(scores)
print('k = {}, accuracy on validation set = {:.3f}'.format(k, cv_score))
cv_scores.append((k, cv_score))
#selecting k that gave the best accuracy on validation set
best_k = max(cv_scores, key = lambda x:x[-1])
print('best_k: {} with validation accuracy of {}'.format(best_k[0], best_k))
#using best parameter k to train KNN on training data
knn_model = KNeighborsClassifier(n_neighbors = best_k[0], n_jobs = -1)
knn_model.fit(X_train, d_y_train)
#evaluate fitted KNN on test data
knn_y_pred = knn_model.predict(X_test)
test_accuracy = accuracy_score(d_y_test, knn_y_pred)
print(test_accuracy)
"""## Model 2: CNN
"""
#tune hyperparameters, here we tune batch size, dropout rate and learning rate
settings = []
for batch in [64, 100, 128]:
for drop in [0, 0.5, 0.8]:
for lr in [0.1, 0.01, 0.001]:
print("batch :", batch)
print("drop:", drop)
print('learning rate:', lr)
model = Sequential()
#convolution 1
model.add(Convolution2D(input_shape=(28,28,1),
filters=32,
kernel_size=5,
strides=1,
activation='relu'))
#pooling 1
model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same'))
#convolution 2
model.add(Convolution2D(filters=64,
kernel_size=5,
strides=1,
activation='relu'
))
#pooling 2
model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same'))
#convolution 3
model.add(Convolution2D(filters=128,
kernel_size=5,
strides=1,
activation='relu'
))
#pooling 3
model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same'))
#convolution 4
model.add(Convolution2D(filters=256,
kernel_size=5,
strides=1,
activation='relu'
))
#pooling 4
model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same'))
#Flatten, transfer to vectors
model.add(Flatten())
#Dropout
model.add(Dropout(drop))
#fully connected network 1
model.add(Dense(500, activation='relu'))
#fully connected network 2, 26 because 26 different letters in total
model.add(Dense(26, activation='softmax'))
#earlystopping to prevent overfitting
early_stopping = EarlyStopping(monitor = 'val_loss', patience = 3, mode = 'min')
#reducing learning rate
reduce_lr = ReduceLROnPlateau(monitor='val_loss',
factor = 0.1,
patience = 1,
verbose = 1,
mode = 'min',
min_delta =0.0001,
cooldown=0,
min_lr=0)
callback_lists = [early_stopping, reduce_lr]
adam = Adam(lr = lr)
model.compile(optimizer=adam,loss="categorical_crossentropy",metrics=['accuracy'])
model.fit(x_train,
d_y_train,
batch_size = batch,
epochs = 5,
verbose = 1,
validation_data = (x_val, d_y_val),
shuffle = True,
callbacks = callback_lists)
loss, acc = model.evaluate(x_val, d_y_val)
settings.append((batch, drop, lr, acc))
#print best accuracy
best_accuracy = max(settings, key = lambda x:x[-1])
print(best_accuracy) #lr = 0.001
best_batch, best_drop, best_lr = best_accuracy[:-1]
print(best_batch, best_drop, best_lr)
#using tuned parameters to train model
model = Sequential()
#convolution 1, activation
model.add(Convolution2D(input_shape=(28,28,1),
filters=32,
kernel_size=5,
strides=1,
padding='same',
activation='relu'))
#pooling
model.add(MaxPooling2D(pool_size=(2,2), strides=1,padding='same'))
#convolution 2, activation
model.add(Convolution2D(filters=64,
kernel_size=5,
strides=1,
padding='same',
activation='relu'
))
#pooling
model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same'))
#convolution 3, activation
model.add(Convolution2D(filters=128,
kernel_size=5,
strides=1,
padding='same',
activation='relu'
))
#pooling
model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same'))
#convolution 4, activation
model.add(Convolution2D(filters=256,
kernel_size=5,
strides=1,
padding='same',
activation='relu'
))
#pooling
model.add(MaxPooling2D(pool_size=(2,2), strides=1, padding='same'))
#Flatten, transfer to vectors
model.add(Flatten())
#Dropout
model.add(Dropout(best_drop))
#fully connected network 1
model.add(Dense(500,activation='relu'))
#fully connected network 2
model.add(Dense(26, activation='softmax'))
#early stopping, to prevent overfitting
early_stopping = EarlyStopping(monitor = 'val_loss', patience = 3, mode = 'min')
#reducing learning rate
reduce_lr = ReduceLROnPlateau(monitor='val_loss',
factor = 0.1,
patience = 1,
verbose = 1,
mode = 'min',
min_delta =0.0001,
cooldown=0,
min_lr=0)
callback_lists = [early_stopping, reduce_lr]
#optimizer
adam = Adam(lr = best_lr)
model.compile(optimizer=adam,loss="categorical_crossentropy",metrics=['accuracy'])
#training
history = model.fit(x_train,d_y_train,
batch_size = best_batch,
epochs = 12,
validation_data = (x_val, d_y_val),
verbose = 1,
shuffle = True,
callbacks = callback_lists)
#plotting loss and accuracy of training and validation sets
#accuracy
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.show()
#loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.show()
#save our model
model.save('my_model1.h5')
#make predictions of testing sets and see how accurate those predictions are
loss,acc = model.evaluate(x_test, d_y_test)
print(loss,acc)
"""We have got around 85% accuray on testing set with KNN model and 95% accuracy on testing set with CNN model. So we decided to use CNN for our task 2.
# Task 2
"""
# Commented out IPython magic to ensure Python compatibility.
# import libraries used for task 2
# %pylab inline --no-import-all
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from keras.preprocessing.image import ImageDataGenerator
import cv2
from keras.utils import plot_model
from skimage.util import random_noise
from skimage.filters import threshold_local
from skimage.morphology import remove_small_objects
from skimage.measure import label, regionprops
from skimage.color import label2rgb
from google.colab import drive
# load testing-dataset
test = np.load('test-dataset.npy')
print(test.shape)
# see what images are like before denoise
plt.imshow(test[-1])
plt.show()
# denoise all images and see what they are like now
from scipy import ndimage
import matplotlib.pyplot as plt
testing_filtered = []
for i in range(len(test)):
new_image = ndimage.median_filter(test[i], 2)
testing_filtered.append(ndimage.median_filter(new_image, 3))
plt.imshow(testing_filtered[-1])
plt.show()
#define a function to split the images
def image_crop(data):
testing_cropped = []
for i in range(len(data)):
#threshold each image and find contours
img = (data[i]).astype('uint8')
_, threshold = cv2.threshold(img.copy(), 10, 255, 0)
contours, _ = cv2.findContours(threshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2:]
bboxes = []
#creating bounding boxes from contours
for f in range(len(contours)):
bboxes.append(cv2.boundingRect(contours[f]))
split = []
to_remove = []
#threshold to remove small w and h bbox, and split those with w >= 28
for j in range(len(bboxes)):
if (bboxes[j][2] < 20) and (bboxes[j][3] < 17):
to_remove.append(bboxes[j])
if bboxes[j][2] >= 30:
split.append(j)
#modifying bboxes to get half w and move x to (x + w/2)
for g in split:
bboxes[g] = (bboxes[g][0], bboxes[g][1], int(bboxes[g][2]/2), bboxes[g][3])
modified_bboxes = bboxes[g]
modified_bboxes = (int(bboxes[g][0]) + int(bboxes[g][2]), int(bboxes[g][1]),
int(bboxes[g][2]), int(bboxes[g][3]))
bboxes.append(modified_bboxes)
#removing bboxes with small w and h
for b in to_remove:
bboxes.remove(b)
#sorting bboxes
bboxes = sorted(np.array(bboxes), key = lambda x: x[0])
cut = []
for h in range(len(bboxes)):
images = img[bboxes[h][1]:bboxes[h][1]+bboxes[h][3],
bboxes[h][0]:bboxes[h][0]+bboxes[h][2]]
if images[0].shape > np.max(3):
cut.append(images)
cropped = []
#reshaping the cut images to be able to use CNN
for image_split in cut:
crop = image_split.reshape((image_split.shape[0],image_split.shape[1],1))
crop = np.array(tf.image.resize_with_crop_or_pad(crop, 28, 28))
img_cropped = crop.reshape(28,28)
cropped.append(img_cropped)
testing_cropped.append(cropped)
return np.array(testing_cropped)
testing_cropped = image_crop(testing_filtered)
print(len(testing_cropped)) #10000 images
# let's see an example letter from testing_cropped dataset
plt.imshow(testing_cropped[420][0])
plt.show()
#most of images are separated into 4 letters, but still many are into 3 or 5 letters
l=[]
for i in range(len(testing_cropped)):
l.append(len(testing_cropped[i]))
plt.hist(l)
plt.show()
#make 5 predictions which have highest probability scores by using our CNN model
block_size = 55
predictions = []
top1 = []
top2 = []
top3 = []
top4 = []
top5 = []
final = []
for i in range(10000):
crops_number = (len(testing_cropped[i]))
for sample in testing_cropped[i]:
imbw = sample > threshold_local(sample, block_size, method = 'mean')
imbw1 = remove_small_objects(imbw, 10, connectivity=1)
roi = imbw1
roi = roi.reshape((roi.shape[0],roi.shape[1],1))
roi = tf.image.resize_with_crop_or_pad(roi, 28, 28).numpy()
image = roi.reshape(28, 28)
pre = model.predict(image.reshape(-1,28,28,1))
for i in pre:
'''i is the probability of each letter, in total 26 probability scores,
we select the highest 5 and their index is the predicted label'''
prob1 = np.argsort(i)[-5] + 1
top5.append(prob1)
prob2 = np.argsort(i)[-4] + 1
top4.append(prob2)
prob3 = np.argsort(i)[-3] + 1
top3.append(prob3)
prob4 = np.argsort(i)[-2] + 1
top2.append(prob4)
prob5 = np.argsort(i)[-1] + 1
top1.append(prob5)
pred = top5 + top4 + top3 + top2 + top1
pred1 = []
y_pred = []
for i in pred:
i = str(i)
if len(i) == 1:
s = i.zfill(2)
pred1.append(s)
else:
pred1.append(i)
for step in range(0, len(pred1), crops_number):
pred2 = pred1[step:step + crops_number]
pred3 = ''.join(pred2)
y_pred.append(pred3)
final1 = y_pred.copy()
final.append(final1)
top1.clear()
top2.clear()
top3.clear()
top4.clear()
top5.clear()
y_pred.clear()
#print(final)
#take the last image as an example, to see the most probable labels, we got 100% accuracy for this image
print(final[-1][-1])
plt.imshow(testing_filtered[-1])
plt.show()
#take another image that includes 3 letters as an example, we got at least 2 of 3 letters correctly predicted
print(final[18][-1])
plt.imshow(testing_filtered[18])
plt.show()
# save to a csv file
import csv
with open('Predictions.csv', 'w', newline = '') as myfile:
wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
wr.writerows(final)
|
23,652 | 0bcb683cd55323bc14c211b8bbdb0e2dabbabccb | import networkx as nx
from parse import read_input_file, write_output_file
from utils import is_valid_solution, calculate_score
import sys
from os.path import basename, normpath
import glob
import random
from itertools import islice
from collections import defaultdict
# def solve(G,a,b):
# """
# Args:
# G: networkx.Graph
# Returns:
# c: list of cities to remove
# k: list of edges to remove
# """
# num_nodes = nx.number_of_nodes(G)
# num_edges = nx.number_of_edges(G)
# k_constraint = min(a, num_edges)
# c_constraint = b
# nodes_removed = 0
# edges_removed = 0
# removed_edge = False
# last_edge = None
# H = G.copy()
# shortest_path = nx.dijkstra_path(H, 0, num_nodes - 1)
# #print(shortest_path)
# num_edges = len(shortest_path) - 1
# #print("Num edges:", num_edges)
# index = 0
# while not removed_edge:
# try:
# sorted_edges = sorted( [(shortest_path[i], shortest_path[i+1]) for i in range(0,len(shortest_path) - 1)], key = (lambda e : H.edges[e[0],e[1]]['weight']) , reverse=True)
# last_edge = sorted_edges[index]
# H.remove_edge(*last_edge)
# if nx.has_path(H, 0, num_nodes - 1):
# return [], [last_edge]
# else:
# index += 1
# except nx.NetworkXNoPath:
# print("No path from source to target node")
# except Exception as e:
# print("Last edge:", last_edge)
# print(e)
# break
# return [],[]
def min_cut_solve(G,a,b):
num_nodes = nx.number_of_nodes(G)
num_edges = nx.number_of_edges(G)
k_constraint = min(a, num_edges)
c_constraint = b
# H = G.copy() # edited copy of graph
S = nx.Graph()
rem_nodes = []
H = G.copy()
control = nx.dijkstra_path(G, 0, num_nodes - 1)
# print(H.edges)
while True:
try:
shortest_path = nx.dijkstra_path(H, 0, num_nodes - 1)
except nx.NetworkXNoPath:
break
S.add_weighted_edges_from([(x, y, G.edges[x,y]["weight"]) for x,y in nx.utils.pairwise(shortest_path)])
nodes = nx.algorithms.connectivity.minimum_st_node_cut(S,0,num_nodes - 1)
if len(nodes) > c_constraint:
break
elif len(nodes) == 0:
break
else:
H.remove_nodes_from(nodes)
if nx.is_connected(H):
rem_nodes = nodes
else:
break
I = G.copy()
I.remove_nodes_from(rem_nodes)
#calculate_score(H,c,rem_edges)
rem_edges = []
prev_path = []
while len(rem_edges) < k_constraint:
# print("BYE")
try:
shortest_path = nx.dijkstra_path(I, 0, num_nodes - 1)
except nx.NetworkXNoPath:
if rem_edges:
del rem_edges[-1]
break
if prev_path == shortest_path:
break
count = 5
while count > 0:
edge = random.choice(list(nx.utils.pairwise(shortest_path)))
# print(count,edge)
K = I.copy()
K.remove_edge(*edge)
if nx.is_connected(K) and nx.has_path(K,0,num_nodes-1):
I.remove_edge(*edge)
rem_edges.append(edge)
break
count -= 1
prev_path = shortest_path
# min edge in shortest path
# heuristic logic
# maxScore = float('-inf')
# edge = None
# for x,y in nx.utils.pairwise(shortest_path):
# curr_score = calculate_score(I, [], [(x,y)])
# if curr_score > maxScore:
# maxScore = curr_score
# edge = (x,y)
# if edge:
# rem_edges.append(edge)
# I.remove_edge(*edge)
# else:
# break
# try:
# shortest_path = nx.dijkstra_path(I, 0, num_nodes - 1)
# except nx.NetworkXNoPath:
# if not I.has_path(0,num_nodes-1):
# del rem_edges[-1]
return rem_nodes, rem_edges
# if not shortest_path:
# break
# S.add_weighted_edges_from([(x, y, G[x,y]["weight"]) for x,y in nx.utils.pairwise(shortest_path)])
# nodes = nx.algorithms.connectivity.minimum_st_edges_cut(S,0,num_nodes - 1)
# if len(nodes) > c_constraint:
# break
# # edit H
# else:
# H.remove_nodes_from(nodes)
# if nx.is_connected(H):
# rem_nodes = nodes
# else:
# break
# random step
# S.add_edge(a,b, weight = n)
# nx.utils.pairwise(shortest_path)
# S.edges[a,b]["weight"]
# process edges next
# keep track of a list of shortest paths + costs
# keep track of weighted out-degree of a node
# k shortest paths
def get_k_shortest_path(G,S,edge_limit,node_limit):
# takes in graph and returns list of k shortest paths
num_nodes = nx.number_of_nodes(G)
# num_edges = nx.number_of_edges(G)
# k_constraint = min(edge_limit, num_edges)
c_constraint = node_limit
# Have to figure out how to choose k
# n = n_val
# shortest_paths = list(
# islice(nx.shortest_simple_paths(G, 0, num_nodes-1, weight="weight"), n)
# )
# compute a subgraph
# S = nx.Graph()
# for i in range(n):
# S.add_weighted_edges_from([(x, y, G.edges[x,y]["weight"]) for x,y in nx.utils.pairwise(shortest_paths[i])])
# find min_edge cut + process adjacent nodes
min_cut = nx.algorithms.connectivity.cuts.minimum_st_edge_cut(S, 0, num_nodes-1)
nodes_in_cut = set()
prev_edges = []
for x,y in min_cut:
nodes_in_cut.add(x)
nodes_in_cut.add(y)
if 0 in nodes_in_cut:
nodes_in_cut.remove(0)
if num_nodes-1 in nodes_in_cut:
nodes_in_cut.remove(num_nodes-1)
# sort node by degree
sorted_nodes = sorted(list(G.degree(nodes_in_cut)), key = (lambda x : x[1]), reverse=True)
# select nodes to remove in descending order
nodes_removed = 0
removed_nodes = set()
for node,_ in sorted_nodes:
I = G.copy()
I.remove_node(node)
if nx.is_connected(I) and nx.has_path(I,0,num_nodes-1): # jic
removed_nodes.add(node)
prev_edges = [edge for edge in min_cut if node in edge]
nodes_removed += 1
else:
continue
if nodes_removed == c_constraint:
break
return list(removed_nodes), prev_edges
# def new_solve(G, edge_limit, node_limit):
# with open("results.txt", "w") as f:
# path_dict = defaultdict(int)
# num_nodes = nx.number_of_nodes(G)
# num_edges = nx.number_of_edges(G)
# k_constraint = min(edge_limit, num_edges)
# c_constraint = node_limit
# removed_nodes = []
# print("Beginning")
# path_generator = nx.shortest_simple_paths(G, 0, num_nodes-1, weight="weight")
# print("Between")
# path_list = list(path_generator)
# print(len(path_list))
# print("After")
# # path_generator = nx.shortest_simple_paths(G, 0, num_nodes-1, weight="weight")
# # S = nx.Graph()
# # print("Start")
# # print("---------------------------")
# # prev_edges = []
# # buffer = next(path_generator)
# # while True:
# # #update subgraph
# # next_path = buffer
# # try:
# # buffer = next(path_generator)
# # except StopIteration:
# # break
# # # if not buffer:
# # # break
# # print("next_path:", next_path)
# # S.add_weighted_edges_from([(x, y, G.edges[x,y]["weight"]) for x,y in nx.utils.pairwise(next_path)])
# # #get nodes for current subgraph
# # returned_nodes, prev_edges = get_k_shortest_path(G,S,k_constraint,c_constraint)
# # print("returned_nodes:", returned_nodes)
# # print("prev_edges:", prev_edges)
# # path_dict[str(prev_edges)] += 1
# # #check for stop condition
# # S_copy = S.copy()
# # S_copy.remove_nodes_from(returned_nodes)
# # if len(prev_edges) >= k_constraint:
# # break
# # else:
# # print("else")
# # if returned_nodes:
# # removed_nodes = returned_nodes
# # else:
# # print("Returned_nodes is empty")
# # f.write(str(path_dict))
# # # most recent S
# # G_minus_nodes = G.copy()
# # G_minus_nodes.remove_nodes_from(removed_nodes)
# # I = G_minus_nodes.copy()
# # removed_edges = []
# # for edge in prev_edges:
# # I.remove_edge(*edge)
# # if nx.is_connected(I) and nx.has_path(I,0,num_nodes-1):
# # removed_edges.append(edge)
# # else:
# # continue
# # return removed_nodes, removed_edges
# return [],[]
# def new_new_solve(G, edge_limit, node_limit):
# num_nodes = nx.number_of_nodes(G)
# num_edges = nx.number_of_edges(G)
# k_constraint = min(edge_limit, num_edges)
# c_constraint = node_limit
# removed_nodes = []
# # print("Beginning")
# # path_generator = nx.shortest_simple_paths(G, 0, num_nodes-1, weight="weight")
# # print("Between")
# # path_list = list(path_generator)
# # print(len(path_list))
# # print("After")
# # path_generator = nx.shortest_simple_paths(G, 0, num_nodes-1, weight="weight")
# length = dict(nx.single_source_bellman_ford_path_length(G,0))
# neighbor_shortest_paths = nx.single_source_bellman_ford_path(G,0)
# shortest_paths = sorted([ n for n in G.neighbors(num_nodes - 1)], key=(lambda x : length[x] + G.edges[x,num_nodes - 1]["weight"]))
# S = nx.Graph()
# print("Start")
# print("---------------------------")
# index = 0
# prev_edges = []
# # buffer = next(path_generator)
# while True:
# #update subgraph
# if index >= len(shortest_paths):
# break
# next_path = neighbor_shortest_paths[shortest_paths[index]]
# index += 1
# # try:
# # buffer = next(path_generator)
# # except StopIteration:
# # break
# # if not buffer:
# # break
# print("next_path:", next_path)
# S.add_weighted_edges_from([(x, y, G.edges[x,y]["weight"]) for x,y in nx.utils.pairwise(next_path)])
# #get nodes for current subgraph
# returned_nodes, prev_edges = get_k_shortest_path(G,S,k_constraint,c_constraint)
# print("returned_nodes:", returned_nodes)
# print("prev_edges:", prev_edges)
# # path_dict[str(prev_edges)] += 1
# #check for stop condition
# S_copy = S.copy()
# S_copy.remove_nodes_from(returned_nodes)
# if len(prev_edges) >= k_constraint:
# break
# else:
# print("else")
# if returned_nodes:
# removed_nodes = returned_nodes
# else:
# print("Returned_nodes is empty")
# # f.write(str(path_dict))
# # most recent S
# G_minus_nodes = G.copy()
# G_minus_nodes.remove_nodes_from(removed_nodes)
# I = G_minus_nodes.copy()
# removed_edges = []
# for edge in prev_edges:
# I.remove_edge(*edge)
# if nx.is_connected(I) and nx.has_path(I,0,num_nodes-1):
# removed_edges.append(edge)
# else:
# continue
# return removed_nodes, removed_edges
# remove from min cut nodes
def remove_min_cut(G,edge_limit, node_limit):
num_nodes = nx.number_of_nodes(G)
num_edges = nx.number_of_edges(G)
k_constraint = min(edge_limit, num_edges)
c_constraint = node_limit
removed_nodes = []
path_generator = nx.shortest_simple_paths(G, 0, num_nodes-1, weight="weight")
S = nx.Graph()
removed_nodes = []
while True:
try:
next_path = next(path_generator)
except StopIteration:
break
S.add_weighted_edges_from([(x, y, G.edges[x,y]["weight"]) for x,y in nx.utils.pairwise(next_path)])
returned_nodes = get_nodes(G,S,c_constraint)
if returned_nodes:
removed_nodes = returned_nodes
else:
break
I = G.copy()
I.remove_nodes_from(removed_nodes)
removed_edges = []
prev_path = None
while len(removed_edges) < k_constraint:
try:
shortest_path = nx.dijkstra_path(I, 0, num_nodes - 1)
except nx.NetworkXNoPath:
if removed_edges:
del removed_edges[-1]
break
if prev_path == shortest_path:
break
count = 5
while count > 0:
edge = random.choice(list(nx.utils.pairwise(shortest_path)))
# print(count,edge)
K = I.copy()
K.remove_edge(*edge)
if nx.is_connected(K) and nx.has_path(K,0,num_nodes-1):
I.remove_edge(*edge)
removed_edges.append(edge)
break
count -= 1
prev_path = shortest_path
return removed_nodes, removed_edges
def get_nodes(G,S,node_limit):
num_nodes = nx.number_of_nodes(G)
min_cut = nx.algorithms.connectivity.cuts.minimum_st_node_cut(S, 0, num_nodes-1)
if len(min_cut) <= node_limit:
I = G.copy()
I.remove_nodes_from(min_cut)
if nx.is_connected(I) and nx.has_path(I, 0, num_nodes - 1):
return min_cut
return None
# Here's an example of how to run your solver.
# Usage: python3 solver.py test.in
# if __name__ == '__main__':
# assert len(sys.argv) == 2
# path = sys.argv[1]
# file_name = basename(normpath(path))[:-3]
# G = read_input_file(path)
# # c,k = new_solve(G,3,50)
# # c,k = min_cut_solve(G,50,3)
# max_c,max_k = remove_min_cut(G,50,3)
# max_score = calculate_score(G, max_c,max_k)
# # for i in range(1):
# # c,k = remove_min_cut(G,50,3)
# # score = calculate_score(G, c, k)
# # print(f"Shortest Path Difference: {score}")
# # if score > max_score:
# # max_c, max_k = c, k
# # max_score = score
# # print(max_c,max_k)
# # assert is_valid_solution(G, c, k)
# print("Best score:", max_score)
# write_output_file(G, max_c,max_k, f'outputs/small/{file_name}.out')
# For testing a folder of inputs to create a folder of outputs, you can use glob (need to import it)
if __name__ == '__main__':
size = [("small",15,1,200), ("medium",50,3,100), ("large",100,5,50)]
for s in size:
size_rep = s[3]
inputs = glob.glob(f"inputs/{s[0]}/*")
for input_path in inputs:
file_name = basename(normpath(input_path))[:-3]
print(file_name)
output_path = f'outputs/{s[0]}/' + file_name + '.out'
G = read_input_file(input_path)
max_c,max_k = min_cut_solve(G,s[1],s[2]) # Original Random Solver
# max_c,max_k = remove_min_cut(G,s[1],s[2]) #new Random Solver
max_score = calculate_score(G, max_c, max_k)
for i in range(size_rep): #Change the range for how many times u want to run it
c,k = min_cut_solve(G,s[1],s[2]) # Original Random Solver
# c,k = remove_min_cut(G,s[1],s[2]) #new Random Solver
score = calculate_score(G, c, k)
if score > max_score:
max_c, max_k = c, k
max_score = score
print("Score for '", file_name, "':", max_score)
write_output_file(G, max_c, max_k, output_path)
|
23,653 | 2b3e9fcb9ed715e6010de858f65ff75af4bf5ea5 | # Импорт модуля для генерации случайных чисел
from random import randint
print("Задайте Гадалке любое кол-во закрытых вопросов, по очереди получая на" +
"них ответы. Чтобы прекратить задавать вопросы, напишите - \"стоп\"")
count = 0 # счетчик для нумерации вопросов
# 1. Ввод вопроса
# 2. Проверка усливя завершения цикла вопрсов и пустота вместо вопроса
# 3. Если цикл вопросов не завершен и вопрос задан, то генерируется
# случайное число от 0 до 9. Остатком от деления определеяется чет/нечет,
# в зависимости от результата выдается ответ "да" или "нет"
while True:
count += 1
print ("\n{0}. Вопрос: ". format(count))
question = input()
if question == "стоп":
print("Сеанс гадания завершен!")
break
if not question:
print("Вопрос не задан. Задайте вопрос или напишите - \"стоп\"")
else:
random_number = randint(0,9)
random_number %= 2
if random_number == 0:
print("Ответ: Нет")
else:
print("Ответ: Да")
|
23,654 | a332cb11cc64c5182d46838e0a4b06f40cac28cd | import unittest
import requests
import os
import sys
import random
sys.path.insert(1, os.path.join(sys.path[0], '..'))
from plugins.engines.freemarker import Freemarker
from core.channel import Channel
class FreemarkerTest(unittest.TestCase):
expected_data = {
'language': 'java',
'engine': 'freemarker',
'exec' : True,
'trailer_tag': '${%(trailer)s}',
'header_tag': '${%(header)s}',
'render_tag': '${%(payload)s}',
}
def test_reflection(self):
template = '%s'
channel = Channel({
'url' : 'http://127.0.0.1:15003/freemarker?inj=*'
})
Freemarker(channel).detect()
del channel.data['os']
self.assertEqual(channel.data, self.expected_data)
def test_reflection_within_text(self):
template = 'AAAA%sAAAA'
channel = Channel({
'url' : 'http://127.0.0.1:15003/freemarker?inj=*'
})
Freemarker(channel).detect()
del channel.data['os']
self.assertEqual(channel.data, self.expected_data)
|
23,655 | 398b0f0826fa04d922a7454ada6034bb51243e56 | x = "jack"
print(x)
# We can store only one value in a variable
#If we try to add one more value than last value will be overridden now value will be krish
x = "Krish"
print(x)
#If we want to store multiple values in single variable for that we use List Datatype
db = [1, "Nitesh", "Ok"] #indexing start from zero
print(db)
print(type(db)) #list
#To add item to list we use append()
db.append(1111)
print(db)
#Slicing Operartion -> it is done to manupulate the list
print(db[1:3]) #start from 1 index till second index as last index is excluded
#output-> ['Nitesh','Ok']
print(db[1:4])
#output->['Nitesh', 'Ok', 1111]
print(db[2:]) #starting from 2 to till last
#output->['Ok', 1111]
print(db[ : 3]) #from 0 to 3
#output->[1, 'Nitesh', 'Ok']
print(db[-1]) #last element, in tis case indexing starts from 1
#output-> 1111
print(db[-3:]) #last 3 and indexing starts from 1 as it is indexing from backward
#output->['Nitesh', 'Ok', 1111]
#List with multiple records
db = [ [1, "Nitesh", 1111, "good"], [2, "rahul",2222, "Ok" ], [3,"raj",3333, "Ok"] ]
print(db)
print(type(db))
print(db[1]) #record at 1 index will get printed
#output-> [2, 'rahul', 2222, 'Ok']
print(db[1:3]) #record at 1 index and 2nd index will get printed
#output-> [[2, 'rahul', 2222, 'Ok'], [3, 'raj', 3333, 'Ok']]
print(db[2][2]) #prints 2nd index records 2nd element
#output-> 3333
# Can we retrrive all columns in List?
# -> Not possible, Because list is note meant for columnwise operations
#If we want to work columnwise for that we use numpy array
import numpy
a = numpy.array(db) #converted list into numpy array
print(a)
#output-> [['1' 'Nitesh' '1111' 'good']
# ['2' 'rahul' '2222' 'Ok']
# ['3' 'raj' '3333' 'Ok']]
print(type(a))
#<class 'numpy.ndarray'>
print(a[0]) #prints first list
#output-> ['1' 'Nitesh' '1111' 'good']
#coloumn wise operation
print(a[ : , 1]) #prints column indexed at postion 1 ( indexing start from 0)
#output-> ['Nitesh' 'rahul' 'raj']
print(a[ : , 1:3]) #retreive column 1 and 2 as last number is excluded
#output->[['Nitesh' '1111']
# ['rahul' '2222']
# ['raj' '3333']]
#One Dimentional array
a1 = numpy.array( [ 1,2,3,4,5,6,7,8]) #if numpy array has only one row than only one sq bracket comes up
print(a1.shape) # it shows the total element
#output-> (8,) 1d array
print(a1[2:5])
#output-> [3 4 5]
print(a1.reshape(2,4)) #convert array into 2 rows and 4 columns
#output-> [[1 2 3 4]
# [5 6 7 8]]
print(a1.reshape(4,2).shape) #converts into 4 rows and 2 coloumns ans shows number of rows and col
#output-> (4, 2)
print(a1.reshape(1,8).shape)
#output-> (1, 8)
print(a1.reshape(-1,1).shape)
#output-> (8, 1)
|
23,656 | 7dda8e2973be8677af5d3ef4b464fd37123552fa | import requests
import json
import logging
from _collections import defaultdict
logger = logging.getLogger(__name__)
emoji = defaultdict(lambda: ":clown_face:")
emoji["WARNING"] = ":thinking_face:"
emoji["ERROR"] = ":face_with_thermometer:"
emoji["INFO"] = ":male-teacher:"
emoji["CRITICAL"] = ":bomb:"
emoji["DEBUG"] = ":female-mechanic:"
def SlackMessageHandlerFactory(webhook_url):
return SlackMessageHandler(webhook_url)
class SlackMessageHandler(logging.Handler):
def __init__(self, webhook_url):
self.webhook_url = webhook_url
super().__init__()
def create_block(self, name, value):
return {"type": "mrkdwn", "text": "*{}*:\n{}".format(name, value)}
# replacing default django emit (https://github.com/django/django/blob/master/django/utils/log.py)
def emit(self, record: logging.LogRecord, *args, **kwargs):
# Check if a logging url was set
if self.webhook_url == None or self.webhook_url == "":
return
if getattr(record, "logHandlerException", None) == self.__class__:
return # This error was caused in this handler, no sense in trying again
req = getattr(record, "request", None)
request_fields = []
request_fields.append(self.create_block("Method", getattr(req, "method", "n/a")))
request_fields.append(self.create_block("Path", getattr(req, "path", "n/a")))
request_fields.append(
self.create_block("Status Code", getattr(record, "status_code", "n/a"))
)
message = {
"blocks": [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": "{} *{}*:\n[{}]: {}".format(
emoji[record.levelname],
record.levelname,
record.name,
record.getMessage(),
),
},
},
{
"type": "section",
"fields": [
{"type": "mrkdwn", "text": "*Level*:\n{}".format(record.levelname),},
*request_fields,
],
},
]
}
try:
requests.post(self.webhook_url, data=json.dumps(message))
except requests.exceptions.RequestException as e: # Catch all request related exceptions
logger.exception(
"Exception while trying to send a log message to Slack",
exc_info=e,
extra={"logHandlerException": self.__class__},
)
|
23,657 | 5e4883fb36beb71d4d66e8f4b356159e7d16bae5 | class CoinJam(object):
def __init__(self, filename):
file = open(filename, "r")
lines = file.readlines()
outfile = open(filename + "_out", "w")
N = int(lines[1].split(" ")[0])
J = int(lines[1].split(" ")[1])
result = CoinJam.find_all(N, J)
output = "Case #1:\n" + result
outfile.write(output + "\n")
@staticmethod
def find_all(N, J):
result_string = ""
candidate = CoinJam.get_candidate(N)
count = 0
for i in range(1, len(candidate) - 1):
candidate = CoinJam.get_candidate(N)
for j in range(i, len(candidate) - 1):
candidate[j] = "1"
candidate_string = "".join(candidate)
result = CoinJam.calculate(candidate_string)
if result is not None:
count += 1
result_string += candidate_string + " " + " ".join(result) + "\n"
print(candidate_string + " " + str(result))
if count >= J:
return result_string
return 0
@staticmethod
def get_candidate(N):
candidate = ["0"] * N
candidate[0] = "1"
candidate[len(candidate) - 1] = "1"
return candidate
@staticmethod
def calculate(input_str):
divisors = []
for base in range(2, 10 + 1):
number = int(input_str, base)
divisor = CoinJam.find_divisor(number)
if divisor is None:
return None
else:
divisors.append(str(divisor))
return divisors
@staticmethod
def find_divisor(n):
for i in range(2, int(n**0.5) + 1):
if n % i == 0:
return i
return None
if __name__ == '__main__':
count = CoinJam("input") |
23,658 | 5b4d6ac40022751411e17dd59654fd9b4b0806a8 | # coding: utf-8
# Author:Brent
# Date :2020/8/5 10:31 PM
# Tool :PyCharm
# Describe :数组的每个索引作为一个阶梯,第 i个阶梯对应着一个非负数的体力花费值 cost[i](索引从0开始)。
#
# 每当你爬上一个阶梯你都要花费对应的体力花费值,然后你可以选择继续爬一个阶梯或者爬两个阶梯。
#
# 您需要找到达到楼层顶部的最低花费。在开始时,你可以选择从索引为 0 或 1 的元素作为初始阶梯。
#
# 示例 1:
#
# 输入: cost = [10, 15, 20]
# 输出: 15
# 解释: 最低花费是从cost[1]开始,然后走两步即可到阶梯顶,一共花费15。
# 示例 2:
#
# 输入: cost = [1, 100, 1, 1, 1, 100, 1, 1, 100, 1]
# 输出: 6
# 解释: 最低花费方式是从cost[0]开始,逐个经过那些1,跳过cost[3],一共花费6。
#
# 来源:力扣(LeetCode)
# 链接:https://leetcode-cn.com/problems/min-cost-climbing-stairs
# 注意:
#
# cost 的长度将会在 [2, 1000]。
# 每一个 cost[i] 将会是一个Integer类型,范围为 [0, 999]。
class Solution(object):
def minCostClimbingStairs(self, cost):
# 防止越界,设置楼顶为0花费
cost.append(0)
# 定义状态数组
dp = [0] * len(cost)
# 初始化
dp[0] = cost[0]
dp[1] = cost[1]
for i in range(2, len(cost)):
# 和爬楼梯相似,i级台阶是从 i-1一步上来的 或者 i-2 两步上来的。
dp[i] = min(dp[i-1] + cost[i], dp[i-2] + cost[i])
return dp[-1]
def minCostClimbingStairs_1(self, cost):
# 防止越界,设置楼顶为0花费
cost.append(0)
a = cost[0]
b = cost[1]
for i in range(2, len(cost)):
c = min(b + cost[i], a + cost[i])
a, b = b, c
return c
if __name__ == '__main__':
cost = [1, 100, 1, 1, 1, 100, 1, 1, 100, 1]
solution = Solution()
print(solution.minCostClimbingStairs_1(cost))
|
23,659 | 2493fb5147662e5cb66faaafbdd5c364f6bfae6c | '''
CREDENTIALS FOR NEO4J DB CONNECTION
'''
AUTH_CREDENTIAL="neo4j"
AUTH_PASSWORD="WikipediaIsAwesome!" |
23,660 | 91d9ce41e2a946cd74f75ce082640b375cb4201b | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, IntegerField,FileField,SubmitField, TextAreaField,validators,SelectField,SelectMultipleField
from wtforms.widgets import TextArea, html5
from wtforms.fields.html5 import DateField
from flask_wtf.file import FileField
from wtforms.validators import DataRequired, Length
class UploadForm(FlaskForm):
fileName=FileField("Upload your Video here",validators=[DataRequired()])
submit=SubmitField('Upload')
|
23,661 | be5e807cd884e8d6fbb73a9b405d8c874d2c2652 | from django.conf.urls import url
from .views import QQAuthURLView, QQAuthUserView
urlpatterns = [
# qq登录页面回调
url(r'^qq/authorization/$', QQAuthURLView.as_view()),
# 用户扫码登录的回调处理
url(r'^qq/user/$', QQAuthUserView.as_view()),
]
|
23,662 | b56e63c4aa9d286be6950e5fd3abc24bfb992e7a | dictionary = {1: 3,
2: 3,
3: 5,
4: 4,
5: 4,
6: 3,
7: 5,
8: 5,
9: 4,
10: 3,
11: 6,
12: 6,
13: 8,
14: 8,
15: 7,
16: 7,
17: 9,
18: 8,
19: 8,
20: 6,
30: 6,
40: 5,
50: 5,
60: 5,
70: 7,
80: 6,
90: 6}
numbers = range(1, 1001)
def main():
total = 0
for each in numbers:
num = each
each = str(each)
if len(each) < 2: # for all the ones numbers
tmp = dictionary[int(each)]
total += tmp
elif len(each) == 2: # meaning the tens numbers
if num in dictionary:
tmp = dictionary[num]
total += tmp
else:
tmp = (dictionary[int(each[0]+"0")] +
dictionary[int(each[-1])])
total += tmp
elif len(each) == 3:
tmp = dictionary[int(each[0])] + 7
total += tmp
if not int(each[1:]) == 0:
if int(each[1:]) in dictionary:
tmp = dictionary[int(each[1:])]
total += tmp + 3
else:
total += (dictionary[int(each[-2]+"0")] +
dictionary[int(each[-1])]) + 3
else:
total += 11
print total
main()
hundred = 7
andd = 3
thousand = 8
total_and = 9 * 99 * andd
total_hundred = 900 * hundred
tens = 0
# for each in (10, 20, 30, 40, 50, 60, 70, 80, 90):
# tens += dictionary[each] * 100
# ones = 0
# for each in (1, 2, 3, 4, 5, 6, 7, 8, 9):
# ones += 91 * dictionary[each]
# total_eleven = 0
# for each in (11, 12, 13, 14, 15, 16, 17, 18, 19):
# total_eleven += dictionary[11] * 10
# grandtotal = (total_and + total_eleven + total_hundred
# + tens + ones + 11)
# print grandtotal
# def counter(number):
# length = len(str(number))
# if length < 3:
# if number in dictionary and number not in (100, 1000):
# return dictionary[number]
# else:
# return (dictionary[int((str(number)[0] + '0'))] +
# dictionary[int(str(number)[-1])])
# elif number == 1000:
# return 11
# else:
# try:
# last = counter(int(str(number)[1:]))
# return last + 10 + dictionary[int(str(number)[0])]
# except:
# return 7 + dictionary[int(str(number)[0])]
# sum = 0
# for each in range(1, 1001):
# print each, counter(each)
# sum += counter(each)
# print sum
|
23,663 | ddd21aa329c721078150e53ad15841f91d654674 | import abc
import collections
import copy
import json
from abc import ABC, abstractmethod
from importlib import import_module
from typing import Dict
import math
import numpy as np
import scipy.stats
from ConfigSpace import ConfigurationSpace
from ConfigSpace.conditions import InCondition
from ConfigSpace.hyperparameters import UniformIntegerHyperparameter, CategoricalHyperparameter, \
UniformFloatHyperparameter
from hyperopt import hp
from hyperopt.pyll import scope
from config import MetaConfig, MetaConfigCollection, ConfigInheritanceGraph, ConfigFeature, CATEGORICAL, UNI_INT, \
UNI_FLOAT, PARENT, VALUE
class BaseConverter(ABC):
@abstractmethod
def convert(self, config: MetaConfigCollection) -> object:
pass
@abstractmethod
def convert_single(self, config: MetaConfig) -> object:
pass
def inverse(self, config: Dict, config_space: MetaConfigCollection) -> Dict:
return copy.deepcopy(config)
class NoopConverter(BaseConverter):
def convert(self, config: MetaConfigCollection) -> object:
return config
def convert_single(self, config: MetaConfig) -> object:
return config
class ConfigSpaceConverter(BaseConverter):
def convert(self, config: MetaConfigCollection) -> ConfigurationSpace:
'''
Converting input JSON to SMAC ConfigurationSpace
:param config: JSON file withe configurations
:return: ConfigurationSpace
'''
from .util import ConfigSpace as util
configs = {}
for key, estimator in config.items():
estimator_cs = self.convert_single(estimator)
component = util.sklearn_mapping(key)
component.get_hyperparameter_search_space = lambda dataset_properties=None: estimator_cs
configs[key] = estimator_cs
cs = ConfigurationSpace()
estimator = CategoricalHyperparameter('__choice__', list(configs.keys()), default_value='sklearn.svm.SVC')
cs.add_hyperparameter(estimator)
for name, search_space in configs.items():
parent_hyperparameter = {'parent': estimator, 'value': name}
cs.add_configuration_space(name, search_space, parent_hyperparameter=parent_hyperparameter)
return cs
def convert_single(self, estimator: MetaConfig) -> ConfigurationSpace:
'''
Builds a ConfigurationSpace for a single estimator
:param estimator: A dict in form
{parameter_name1: {Type:XY, Min: z1, Max: z2 condition: {parent: p, value: [v]}}parameter_name2 ...}
:return: ConfigurationSpace for input estimator
'''
cs = ConfigurationSpace()
for name, entry in estimator.items():
if entry.type == CATEGORICAL:
cs.add_hyperparameter(
CategoricalHyperparameter(name, entry.choices, default_value=entry.default))
elif entry.type == UNI_INT:
cs.add_hyperparameter(
UniformIntegerHyperparameter(name, entry.lower, entry.upper, default_value=entry.default,
log=entry.log))
elif entry.type == UNI_FLOAT:
cs.add_hyperparameter(
UniformFloatHyperparameter(name, entry.lower, entry.upper, default_value=entry.default,
log=entry.log))
if entry.has_condition():
cs.add_condition(
InCondition(child=cs.get_hyperparameter(name),
parent=cs.get_hyperparameter(entry.condition[PARENT]), values=entry.condition[VALUE]))
return cs
def inverse(self, config: Dict, config_space: MetaConfigCollection) -> Dict:
algorithm = config['__choice__']
d = {'algorithm': algorithm}
for key, value in config.items():
if key == '__choice__':
continue
d[key.split(':')[1]] = value
return d
class TpotConverter(BaseConverter):
def convert(self, config: MetaConfigCollection, points: int = 10) -> dict:
'''
Converting input JSON to TPOT config_dict
:param points: Amount of points a uniform_float should split in to
:param config: Name of JSON file withe configurations
:return: config_dict for TPOTClassifier() or TPOTRegressor()
'''
config_dict = dict()
for algorithm, conf in config.items():
d = self.convert_single(conf, points)
config_dict[algorithm] = d
return config_dict
# noinspection PyMethodOverriding
def convert_single(self, config: MetaConfig, points: int) -> dict:
d = dict()
for key, value in config.items():
if value.type == CATEGORICAL:
d[key] = value.choices
if value.type == UNI_INT:
d[key] = range(value.lower, value.upper)
if value.type == UNI_FLOAT:
steps = abs(value.lower - value.upper) / points
d[key] = np.arange(value.lower, value.upper, steps)
return d
class HyperoptConverter(BaseConverter):
def __init__(self, as_scope: bool = False):
self.as_scope = as_scope
def convert(self, config: MetaConfigCollection) -> hp.choice:
'''
Converting input JSON to Hyperopt ConfigurationSpace
:param config: JSON file withe configurations
:param as_scope:
:return: ConfigurationSpace
'''
config_space = []
for key, conf in config.items():
d = self.convert_single(conf, key)
config_space.append(d)
return hp.choice('estimator_type', config_space)
# noinspection PyMethodOverriding
def convert_single(self, config: MetaConfig, algorithm: str = '') -> dict:
parents = set()
for key, param in config.items():
if param.has_condition():
parents.add(param.condition['parent'])
if len(parents) > 1:
raise ValueError('More than one parent is currently no supported')
for parent in parents:
label = 'custom_{}'.format(algorithm)
c = config.dict[parent]
if c.type != CATEGORICAL:
raise ValueError('Non categorical parameter has children')
l = [self.__get_algo_config(config, algorithm, parent, choice) for choice in c.choices]
return hp.choice(label, l)
return self.__get_algo_config(config, algorithm)
def __get_algo_config(self, config: MetaConfig, algorithm: str, parent: str = None, parent_value: str = None):
d = {}
for parameter, value in config.items():
label = 'custom_{}_{}_{}'.format(algorithm, parent_value if parent_value is not None else '', parameter)
if parameter == parent:
d[parameter] = parent_value
else:
if value.has_condition() and value.condition['parent'] == parent and parent_value not in \
value.condition['value']:
continue
if value.type == UNI_INT:
# TODO check if difference between hyperopt and hyperopt-sklearn
# d[parameter] = hp.quniform(label, value.lower, value.upper, 1)
d[parameter] = scope.int(hp.quniform(label, value.lower, value.upper, 1))
elif value.type == UNI_FLOAT:
d[parameter] = hp.uniform(label, value.lower, value.upper)
elif value.type == CATEGORICAL:
d[parameter] = hp.choice(label, value.choices)
if self.as_scope:
return scope.generate_sklearn_estimator(algorithm, **d)
else:
if len(algorithm) > 0:
d['algorithm'] = algorithm
return d
@staticmethod
@scope.define
def generate_sklearn_estimator(estimator_name, *args, **kwargs):
module_name = estimator_name.rpartition('.')[0]
class_name = estimator_name.split('.')[-1]
module = import_module(module_name)
class_ = getattr(module, class_name)
return class_(*args, **kwargs)
def inverse(self, config: Dict, config_space: MetaConfigCollection) -> Dict:
algorithm = list(config_space.algos.keys())[config['estimator_type']]
d = {'algorithm': algorithm}
definition = config_space.algos[algorithm]
conditional = None
for key, value in config.items():
if key == 'estimator_type':
continue
k = key[7 + len(algorithm):]
if len(k) == 0:
continue
if k.startswith('__'):
k = k[2:]
else:
if conditional is None:
ls = [hyper.condition['parent'] for hyper in definition.dict.values() if hyper.has_condition()]
if len(set(ls)) != 1:
raise ValueError('ConfigSpaces with multiple conditional hyperparameters are not supported')
conditional = k[1:].split('_')[0]
d[ls[0]] = conditional
k = k[1 + len(conditional) + 1:]
if definition.dict[k].type == CATEGORICAL:
value = definition.dict[k].choices[value]
d[k] = value
return d
class BtbConverter(BaseConverter):
def convert(self, config: MetaConfigCollection) -> list:
ls = []
for algorithm, conf in config.items():
ls.append(self.convert_single(conf, algorithm))
return ls
# noinspection PyMethodOverriding
def convert_single(self, config: MetaConfig, name: str = '') -> dict:
hyperparamters = {}
root = []
conditional = {}
for key, value in config.items():
if value.type == CATEGORICAL:
t = 'bool' if value.choices[0] in [True, False] else 'string'
hyperparamters[key] = {'type': t, 'values': value.choices}
if value.type == UNI_INT:
hyperparamters[key] = {'type': 'int', 'range': [value.lower, value.upper]}
if value.type == UNI_FLOAT:
hyperparamters[key] = {'type': 'float', 'range': [value.lower, value.upper]}
if value.condition is None:
root.append(key)
else:
d = conditional.setdefault(value.condition['parent'], {})
for v in value.condition['value']:
d.setdefault(v, []).append(key)
d = {
'name': name,
'class': name,
'hyperparameters': hyperparamters,
'root_hyperparameters': root,
'conditional_hyperparameters': conditional
}
return d
class NaiveSearchConverter(BaseConverter, abc.ABC):
def __init__(self):
self.processed_nodes = set()
def convert(self, config: MetaConfigCollection):
estimators = {}
for name, conf in config.items():
d = self.convert_single(conf)
estimators.update({name: d})
return estimators
def convert_single(self, conf: MetaConfig) -> dict:
d = {}
self.processed_nodes = set()
graph = ConfigInheritanceGraph(conf)
for key, value in conf.items():
if key in self.processed_nodes:
continue
d.update({key: self._get_algo_config(key, value, graph)})
return d
@abc.abstractmethod
def _get_algo_config(self, key, value: ConfigFeature, graph: ConfigInheritanceGraph):
pass
class RandomSearchConverter(NaiveSearchConverter):
def _get_algo_config(self, key, value: ConfigFeature, graph: ConfigInheritanceGraph):
self.processed_nodes.add(key)
if value.type == UNI_INT:
return range(value.lower, value.upper)
elif value.type == UNI_FLOAT:
return scipy.stats.uniform(loc=value.lower, scale=value.upper - value.lower)
elif value.type == CATEGORICAL:
choices_list = []
for choice in value.choices:
choices_list.append(choice)
return choices_list
else:
raise ValueError('Unknown type {}'.format(value.type))
class GridSearchConverter(NaiveSearchConverter):
def __init__(self, n: int = 10):
super().__init__()
self.n = n
def _get_algo_config(self, key, value: ConfigFeature, graph: ConfigInheritanceGraph):
self.processed_nodes.add(key)
if value.type == UNI_INT:
if value.lower == value.upper:
return [value.lower]
size = abs(value.lower - value.upper)
if size <= self.n:
return np.arange(value.lower, value.upper, 1, dtype=int)
else:
return np.arange(value.lower, value.upper, math.ceil(size / self.n), dtype=int)
elif value.type == UNI_FLOAT:
if value.lower == value.upper:
return [value.lower]
return np.linspace(value.lower, value.upper, self.n)
elif value.type == CATEGORICAL:
choices_list = []
for choice in value.choices:
choices_list.append(choice)
return choices_list
else:
raise ValueError('Unknown type {}'.format(value.type))
class RoBoConverter(BaseConverter):
def convert(self, config: MetaConfigCollection):
estimators = {}
for name, conf in config.items():
d = self.convert_single(conf)
estimators.update({name: d})
return estimators
def convert_single(self, config: MetaConfig) -> object:
lower = []
upper = []
names = []
for name, value in config.items():
if value.type == UNI_FLOAT or value.type == UNI_INT:
lower.append(value.lower)
upper.append(value.upper)
names.append(name)
elif value.type == CATEGORICAL:
lower.append(0)
upper.append(len(value.choices) - 1)
names.append(name)
else:
raise ValueError('Unknown type {}'.format(value.type))
return np.array(lower), np.array(upper), names
def inverse(self, config: Dict, config_space: MetaConfigCollection) -> Dict:
d = {}
for key, hyper in config_space.items():
if hyper.dict.keys() == config.keys():
d['algorithm'] = key
for key2, meta in hyper.items():
if meta.type == CATEGORICAL:
value = meta.choices[round(config[key2])]
elif meta.type == UNI_INT:
value = round(config[key2])
else:
value = config[key2]
d[key2] = value
break
else:
raise ValueError('Unable to determine algorithm: {}'.format(str(config)))
return d
class GPyOptConverter(BaseConverter):
def convert(self, config: MetaConfigCollection) -> object:
raise NotImplementedError('RoBo is not suited for CASH solving')
def convert_single(self, config: MetaConfig) -> object:
ls = []
for name, value in config.items():
if value.type == UNI_INT:
ls.append(
{
'name': name,
'type': 'discrete',
'domain': (value.lower, value.upper)
}
)
elif value.type == UNI_FLOAT:
ls.append(
{
'name': name,
'type': 'continuous',
'domain': (value.lower, value.upper)
}
)
elif value.type == CATEGORICAL:
ls.append(
{
'name': name,
'type': 'continuous',
'domain': (value.lower, value.upper)
}
)
else:
raise ValueError('Unknown type {}'.format(value.type))
return ls
class OptunityConverter(BaseConverter):
def convert(self, config: MetaConfigCollection) -> object:
# TODO generated configuration dict contains empty string key
d = {}
for key, conf in config.items():
d[key] = self.convert_single(conf)
return {'algorithm': d}
def convert_single(self, config: MetaConfig) -> object:
parents = set()
for key, param in config.items():
if param.has_condition():
parents.add(param.condition['parent'])
if len(parents) > 1:
raise ValueError('More than one parent is currently no supported')
for parent in parents:
c = config.dict[parent]
if c.type != CATEGORICAL:
raise ValueError('Non categorical parameter has children')
d = {}
for choice in c.choices:
d[choice] = self.__get_algo_config(config, parent, choice)
return {parent: d}
return self.__get_algo_config(config)
@staticmethod
def __get_algo_config(config: MetaConfig, parent: str = None, parent_value: str = None):
d = {}
for parameter, value in config.items():
if parameter == parent:
continue
if value.has_condition() and value.condition['parent'] == parent and parent_value not in \
value.condition['value']:
continue
if value.type == UNI_INT:
tmp = {}
for i in range(value.lower, value.upper + 1):
tmp[str(i)] = None
d[parameter] = tmp
elif value.type == UNI_FLOAT:
d[parameter] = [value.lower, value.upper]
elif value.type == CATEGORICAL:
tmp = {}
for c in value.choices:
tmp[str(c)] = None
d[parameter] = tmp
return d
CONVERTER_MAPPING = {
'Random Search': RandomSearchConverter(),
'Grid Search': GridSearchConverter(),
'SMAC': ConfigSpaceConverter(),
'hyperopt': HyperoptConverter(),
'BOHB': ConfigSpaceConverter(),
'RoBo gp': RoBoConverter(),
'RoBO': RoBoConverter(),
'Optunity': OptunityConverter(),
'BTB': BtbConverter()
}
|
23,664 | 4161d61f0269f619e2a7b552313e1a19d5c4e9f2 | # encoding=utf-8
import os
root_dir = '../data/iqiyi/'
for video_path in os.listdir(root_dir):
video_dir = root_dir + video_path
if os.path.isfile(video_dir):
video_id = video_path.strip().split('.')[0].split('_')[-1]
print 'loading video: '+ video_id
out_dir = '../data/iqiyi/%s/' % (video_id)
os.mkdir(out_dir)
decode_cmd = '../bin/ffmpeg -v 0 -i %s -r 1 ../data/iqiyi/%s/%s_%s.jpg' % (video_dir, video_id, video_id, '%5d')
os.system(decode_cmd)
|
23,665 | 91f14cfce4270574eaeb0dcddc68b3541532da94 | # Generated by Django 3.0.6 on 2020-08-02 08:09
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('home', '0036_auto_20200802_1437'),
('user', '0003_auto_20200727_1521'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='ward',
field=models.ForeignKey(blank=True, default=1, on_delete=django.db.models.deletion.CASCADE, to='home.ward'),
),
]
|
23,666 | 0e6b1da7c30d349ab9e36a8525fc7669020259ac | import numpy as np
#import matplotlib.pylab as pl
import regression as reg
from trajectories.Trajectory import Trajectory, Pattern
from kernels import CurlFreeKernel as cfk, DivFreeKernel as dfk
def run_models(samples):
trajectory = Trajectory(nsamples=samples, integration_time=30, n_timesteps=15, pattern=Pattern.grid)
div_k = dfk.DivFreeK(3)
curl_k = cfk.CurlFreeK(3)
k = div_k + curl_k
regression = reg.Regression(dim=3)
regression.initialize_samples(ndrifters=samples, trajectory=trajectory)
regression.run_model(kernel=k)
cdk_e = regression.compute_error(errors=["ge_av_raw"])
return cdk_e
def main():
cdk_errors = []
sample = 1
while sample <= 500:
print(sample)
try:
cdk_e = run_models(samples=sample)
cdk_errors.append(cdk_e[0])
sample = sample + 20
np.savetxt("cdf_errors.csv", cdk_errors)
except Exception as e:
print("An error ocurred")
print(str(e))
pass
#np.savetxt("rbf_errors.csv", rbf_errors)
#np.savetxt("cdf_errors.csv", cdk_errors)
if __name__ == "__main__":
main()
|
23,667 | 412a938d94f756969575e20a8851de593c658a8a | sayilar = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
teksayilar = []
ciftsayilar = []
# # dizi içerisinde yer alan, tek sayıları bir diziye çift sayıları ayrı bir diziye ekleyiniz.
# işlem sonunuda toplamda dizi içerisinde kaç eleman var kullanıcıya bildirim veriniz
i = 0
while i < len(sayilar):
if i % 2 == 0:
ciftsayilar.append(i)
else:
teksayilar.append(i)
print(teksayilar) |
23,668 | 1d80f571a1409e0300556485a4a4baa765ce6fbd | '''
Created on 04/04/2009
@author: Pc
'''
from framework.base.base import Component
from framework.base.base import ComponentFamily
class StrValueComponent(Component):
family = ComponentFamily.strValue
def __init__(self, parent):
Component.__init__(self,parent)
def addAttr(self, attrName, attrValue):
setattr(self, attrName, attrValue)
def removeAttr(self, attrName):
remattr(self, attrName)
def getAtrValue(self, attrName):
getattr(self, attrName) |
23,669 | d7bf16832050df75f96edd67fac6a4c242586d6a | from db_connect import cursor, conn
from flask import render_template, request, redirect
from flask import Blueprint
account = Blueprint('account', __name__)
def error_info():
return redirect('/login&no_login')
def error_login():
return redirect('/login&fail_login')
# 用户身份认证
def is_user(account):
sql = 'select isOnline, ip ' \
'from userinfo ' \
'where account = "%s" ' % account
cursor.execute(sql)
user = cursor.fetchone()
ip = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
# 如果没有账号或者账号不在线的话都要返回错误
if not user or user[0] != 1 or user[1] != ip:
return False
else:
return True
@account.route('/', methods=['GET']) # 跳转至login.html,请求方式GET
@account.route('/login', methods=['GET'])
def show():
return redirect('/login&normal')
@account.route('/login&<state>', methods=['POST', 'GET'])
def login(state):
if request.method == 'GET':
print(state)
return render_template('/account/login.html', state=state)
else:
account = request.form['account'] # 界面传值
password = request.form['password'] # 界面传值
if len(account) == 0 | len(password) == 0:
return redirect('/login&fail_login')
cursor.execute('select account from userinfo') # 查询test表查询用户名
accounts = cursor.fetchall()
for account in accounts:
if request.form['account'] == account[0]:
cursor.execute('select password, usertype from userinfo where account=%s', (account[0],))
user = cursor.fetchone() # 从useinfo表中获取密码
if request.form['password'] == user[0]: # 如果页面输入的password匹配test表返回的密码
# 标记当前登录状态
ip = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
sql = 'update userinfo ' \
'set isOnline = true, ip = "%s" ' \
'where account = "%s"' % (ip, request.form['account'])
print(sql)
cursor.execute(sql)
conn.commit()
account_type = request.form['type']
if account_type == 'admin' and user[1] == 'admin':
return redirect('/index_admin/account=%s/index/' %
(request.form['account']))
elif account_type == "student" and user[1] == 'student':
cursor.execute('select stu_number from student where account = "%s"' % account)
stu_number = cursor.fetchone()[0]
return redirect('/index_student/account=%s&user_no=%s/index/' %
(request.form['account'], stu_number))
elif account_type == "teacher" and user[1] == 'teacher':
cursor.execute('select teac_number from teacher where account = "%s"' % account)
teac_number = cursor.fetchone()[0]
return redirect('/index_teacher/account=%s&user_no=%s/index/' %
(request.form['account'], teac_number))
else:
return error_login()
# return '<h>欢迎回来,%s!</h>' % account[0]
return error_login()
cursor.close() # 关闭游标
conn.close() # 关闭连接
@account.route('/logout/account=<account>', methods=['GET'])
def logout(account):
sql = 'update userinfo ' \
'set isOnline = false ' \
'where account = "%s"' % account
print(sql)
cursor.execute(sql)
conn.commit()
return redirect('/login')
@account.route('/regist', methods=['POST', 'GET']) # 表单提交
def regist():
if request.method == 'GET':
return render_template('/account/regist.html')
else:
account = request.form.get('account')
pw = request.form.get('password')
username = request.form.get('username')
usertype = request.form.get('usertype')
sql = 'insert into userinfo(account, password, username, usertype) ' \
'values ("%s", "%s", "%s", "%s")' % \
(account, pw, username, usertype)
print(sql)
cursor.execute(sql)
# 如果是老师类型,插入teacher表中
# 如果是学生类型,插入student表中
if usertype == 'teacher':
teac_name = request.form.get('teac_name')
teac_number = request.form.get('teac_number')
col_number = request.form.get('col_number')
teac_mail = request.form.get('teac_mail')
teac_office = request.form.get('teac_office')
sql = 'insert into teacher(account, teac_name, teac_number, col_number, teac_mail, teac_office) ' \
'values ("%s", "%s", "%s", "%s", "%s", "%s")' % \
(account, teac_name, teac_number, col_number, teac_mail, teac_office)
print(sql)
cursor.execute(sql)
else:
stu_name = request.form.get('stu_name')
stu_number = request.form.get('stu_number')
major_number = request.form.get('major_number')
stu_phone = request.form.get('stu_phone')
stu_birth = request.form.get('stu_birth')
sql = 'insert into student(account, stu_name, stu_number, major_number, stu_phone, stu_birth) ' \
'values ("%s", "%s", "%s", "%s", "%s", "%s")' % \
(account, stu_name, stu_number, major_number, stu_phone, stu_birth)
cursor.execute(sql)
# cursor.execute('insert into student(account, stu_name, stu_number, major_number, stu_phone, stu_birth) '
# 'values ("%s", "%s", "%s", "%s", "%s", "%s")',
# (account, stu_name, stu_number, major_number, stu_phone, stu_birth))
conn.commit()
return redirect('/login®_ok')
|
23,670 | 224ff76b51b1321f8a153037802d18a4d06d6516 | from selenium.webdriver.common.by import By
from base.base_action import BaseAction
class GoodsSearchPage(BaseAction):
# 加入购物车按钮 (目的: 为了跳转到商品详情)
add_to_cart_btn = By.CSS_SELECTOR, ".p-btn > a:nth-child(1)"
# 点击加入购物车按钮
def click_add_to_cart_btn(self):
return self.click(self.add_to_cart_btn) |
23,671 | bf6b7b57c8d62ad547007578fc76401833cc012c | # Copyright (c) 2021 Graphcore Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the “License”);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an “AS IS” BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import numpy as np
import os
from PIL import Image
import tensorflow as tf
from tensorflow import keras
from tensorflow.python import ipu
from time import perf_counter
from model import model_fn
from model_utils import set_pipeline_options
from utils import configure_ipu, PerfCallback
from losses import dice_coef_accuracy_fn, dice_ce_loss, ce_loss
logger = logging.getLogger(__name__)
def get_optimizer(args):
def gradient_normalizer(grads_and_vars):
return [(grad / args.replicas / args.gradient_accumulation_count, var) for grad, var in grads_and_vars]
if args.optimizer == "adam":
optimizer_instance = keras.optimizers.Adam(
learning_rate=args.learning_rate, epsilon=1e-4, gradient_transformers=[gradient_normalizer]
)
else:
# Create learning rate schedule
learning_rate_fn = tf.keras.optimizers.schedules.ExponentialDecay(
args.learning_rate, decay_steps=args.num_epochs, decay_rate=args.decay_rate, staircase=False
)
optimizer_instance = keras.optimizers.SGD(
learning_rate=learning_rate_fn, momentum=args.momentum, gradient_transformers=[gradient_normalizer]
)
# Use loss scaling for FP16
if args.dtype == "float16":
optimizer_instance = tf.keras.mixed_precision.LossScaleOptimizer(optimizer_instance, False, args.loss_scale)
return optimizer_instance
def create_model(args):
model = keras.Model(*model_fn(args))
if args.nb_ipus_per_replica > 1:
set_pipeline_options(model, args)
model.print_pipeline_stage_assignment_summary()
elif args.nb_ipus_per_replica == 1:
model.set_gradient_accumulation_options(
gradient_accumulation_steps_per_replica=args.gradient_accumulation_count,
offload_weight_update_variables=False,
)
model.compile(
optimizer=get_optimizer(args),
loss=dice_ce_loss,
# Number of micro batches to process sequentially in a single execution
steps_per_execution=args.steps_per_execution if args.nb_ipus_per_replica > 0 else None,
metrics=[dice_coef_accuracy_fn, ce_loss],
)
return model
def train_model(args, model, ds_train, ds_eval):
callbacks = []
# Record throughput
callbacks.append(PerfCallback(steps_per_execution=args.steps_per_execution, batch_size=args.micro_batch_size))
eval_accuracy = None
eval_loss = None
if args.nb_ipus_per_replica <= 1:
executions = args.num_epochs
else:
total_num_steps = args.gradient_accumulation_count * args.num_epochs
if total_num_steps < args.steps_per_execution:
logger.warning(
f"The steps per execution is reduced to the total number of steps ({total_num_steps})."
f"To keep the user-defined steps per execution, gradient accumulation count"
f" * nb of epochs ({args.gradient_accumulation_count * args.num_epochs}) "
f"needs to be at least the nb of steps per execution ({args.steps_per_execution})"
)
executions = 1
args.steps_per_execution = total_num_steps
else:
executions = int(total_num_steps / args.steps_per_execution)
additional_args = {}
if args.eval:
callbacks.append(
keras.callbacks.ModelCheckpoint(
filepath=os.path.join(args.model_dir, "checkpoints"),
monitor="val_dice_coef_accuracy_fn",
save_best_only=True,
save_weights_only=True,
)
)
if args.eval_freq > executions:
logger.warning(
f"The number of executions in model.fit ({executions}) needs to be at least the validation frequency ({args.eval_freq})."
)
args.eval_freq = min(args.eval_freq, executions)
additional_args = {
"validation_data": ds_eval,
"validation_steps": args.gradient_accumulation_count,
"validation_freq": args.eval_freq,
}
elif not args.benchmark:
callbacks.append(
keras.callbacks.ModelCheckpoint(
filepath=os.path.join(args.model_dir, "checkpoints"),
monitor="dice_coef_accuracy_fn",
save_best_only=True,
save_weights_only=True,
)
)
train_result = model.fit(
ds_train, steps_per_epoch=args.steps_per_execution, epochs=executions, callbacks=callbacks, **additional_args
)
if args.eval:
eval_accuracy = train_result.history["val_dice_coef_accuracy_fn"]
eval_loss = train_result.history["val_loss"]
return eval_accuracy, eval_loss
def infer_model(args, model, ds_infer):
if args.benchmark:
# Warmup
model.predict(ds_infer, steps=args.steps_per_execution)
t0 = perf_counter()
model.predict(ds_infer, steps=args.steps_per_execution)
t1 = perf_counter()
duration = t1 - t0
total_nb_samples = args.steps_per_execution * args.micro_batch_size
tput = f"{total_nb_samples / duration:0.15f}"
logger.info(f"Inference\t Time: {duration} seconds\t throughput: {tput} samples/sec.")
else:
if args.model_dir:
model.load_weights(os.path.join(args.model_dir, "checkpoints")).expect_partial()
predictions = model.predict(ds_infer, steps=args.steps_per_execution)
binary_masks = [np.argmax(p, axis=-1).astype(np.uint8) * 255 for p in predictions]
prediction_tif = [
Image.fromarray(mask).resize(size=(512, 512), resample=Image.BILINEAR) for mask in binary_masks
]
output_dir = os.path.join(args.model_dir, "predictions")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
prediction_tif[0].save(
os.path.join(output_dir, "test-masks.tif"),
compression="tiff_deflate",
save_all=True,
append_images=prediction_tif[1:],
)
logger.info(f"Predictions saved at {output_dir}.")
def get_strategy(args):
if args.nb_ipus_per_replica > 0:
logger.info("On IPU...")
# Create an IPU distribution strategy
strategy = ipu.ipu_strategy.IPUStrategy()
else:
logger.info("On CPU...")
strategy = tf.distribute.OneDeviceStrategy(device="/cpu:0")
return strategy
def unet(args, ds_train, ds_eval, ds_infer):
tf.keras.backend.clear_session()
eval_accuracy = None
eval_loss = None
if args.nb_ipus_per_replica > 0:
configure_ipu(args)
strategy = get_strategy(args)
with strategy.scope():
model = create_model(args)
model.summary()
if args.train:
logger.info("Training model...")
eval_accuracy, eval_loss = train_model(args, model, ds_train, ds_eval)
logger.info("Training complete")
if args.infer:
logger.info("Start inference...")
infer_model(args, model, ds_infer)
logger.info("Inference complete")
return eval_accuracy, eval_loss
|
23,672 | d33fe3f9390758be2ca587f738bfdb95a6f8ab8f | from django.shortcuts import render
from django.http.response import HttpResponse
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views import View
# Create your views here.
class Index(View):
def get(self, request):
return HttpResponse("get")
def post(self, request):
return HttpResponse('post')
class isLogin(LoginRequiredMixin, View):
def get(self, request):
return HttpResponse('get')
def post(self, request):
return HttpResponse('post')
|
23,673 | 6094a8a205a56d98fab26662916cadc8649e3c41 | from operator import gt, le
from typing import Any, List, Optional, Union
import numpy as np
import pandas as pd
from bartpy.bartpy.errors import NoSplittableVariableException
from bartpy.bartpy.splitcondition import SplitCondition
def is_not_constant(series: np.ndarray) -> bool:
"""
Quickly identify whether a series contains more than 1 distinct value
Parameters
----------
series: np.ndarray
The series to assess
Returns
-------
bool
True if more than one distinct value found
"""
#print("enter bartpy/bartpy/data.py is_not_constant")
if len(series) <= 1:
#print("-exit bartpy/bartpy/data.py is_not_constant")
return False
first_value = None
for i in range(1, len(series)):
# if not series.mask[i] and series.data[i] != first_value:
if series[i] != first_value:
if first_value is None:
first_value = series.data[i]
else:
#print("-exit bartpy/bartpy/data.py is_not_constant")
return True
#print("-exit bartpy/bartpy/data.py is_not_constant")
return False
def ensure_numpy_array(X: Union[np.ndarray, pd.DataFrame]) -> np.ndarray:
#print("enter bartpy/bartpy/data.py ensure_numpy_array")
if isinstance(X, pd.DataFrame):
#print("-exit bartpy/bartpy/data.py ensure_numpy_array")
return X.values
else:
#print("-exit bartpy/bartpy/data.py ensure_numpy_array")
return X
def ensure_float_array(X: np.ndarray) -> np.ndarray:
#print("enter bartpy/bartpy/data.py ensure_float_array")
#print("-exit bartpy/bartpy/data.py ensure_float_array")
return X.astype(float)
def format_covariate_matrix(X: Union[np.ndarray, pd.DataFrame]) -> np.ndarray:
#print("enter bartpy/bartpy/data.py format_covariate_matrix")
X = ensure_numpy_array(X)
output = ensure_float_array(X)
#print("-exit bartpy/bartpy/data.py format_covariate_matrix")
return output
def make_bartpy_data(X: Union[np.ndarray, pd.DataFrame],
y: np.ndarray,
normalize: bool=True) -> 'Data':
#print("enter bartpy/bartpy/data.py make_bartpy_data")
X = format_covariate_matrix(X)
y = y.astype(float)
output = Data(X, y, normalize=normalize)
#print("-exit bartpy/bartpy/data.py make_bartpy_data")
return output
class CovariateMatrix(object):
def __init__(self,
X: np.ndarray,
mask: np.ndarray,
n_obsv: int,
unique_columns: List[int],
splittable_variables: List[int]):
#print("enter bartpy/bartpy/data.py CovariateMatrix __init__")
if type(X) == pd.DataFrame:
X: pd.DataFrame = X
X = X.values
self._X = X
self._n_obsv = n_obsv
self._n_features = X.shape[1]
self._mask = mask
# Cache iniialization
if unique_columns is not None:
self._unique_columns = [x if x is True else None for x in unique_columns]
else:
self._unique_columns = [None for _ in range(self._n_features)]
if splittable_variables is not None:
self._splittable_variables = [x if x is False else None for x in splittable_variables]
else:
self._splittable_variables = [None for _ in range(self._n_features)]
self._max_values = [None] * self._n_features
self._X_column_cache = [None] * self._n_features
self._max_value_cache = [None] * self._n_features
self._X_cache = None
#print("-exit bartpy/bartpy/data.py CovariateMatrix __init__")
@property
def mask(self) -> np.ndarray:
#print("enter bartpy/bartpy/data.py CovariateMatrix mask")
#print("-exit bartpy/bartpy/data.py CovariateMatrix mask")
return self._mask
@property
def values(self) -> np.ndarray:
#print("enter bartpy/bartpy/data.py CovariateMatrix values")
#print("-exit bartpy/bartpy/data.py CovariateMatrix values")
return self._X
def get_column(self, i: int) -> np.ndarray:
#print("enter bartpy/bartpy/data.py CovariateMatrix get_column")
if self._X_cache is None:
self._X_cache = self.values[~self.mask, :]
#print("-exit bartpy/bartpy/data.py CovariateMatrix get_column")
return self._X_cache[:, i]
def splittable_variables(self) -> List[int]:
"""
List of columns that can be split on, i.e. that have more than one unique value
Returns
-------
List[int]
List of column numbers that can be split on
"""
#print("enter bartpy/bartpy/data.py CovariateMatrix splittable_variables")
for i in range(0, self._n_features):
if self._splittable_variables[i] is None:
self._splittable_variables[i] = is_not_constant(self.get_column(i))
output = [i for (i, x) in enumerate(self._splittable_variables) if x is True]
#print("-exit bartpy/bartpy/data.py CovariateMatrix splittable_variables")
return output
@property
def n_splittable_variables(self) -> int:
#print("enter bartpy/bartpy/data.py CovariateMatrixn_splittable_variables")
output = len(self.splittable_variables())
#print("-exit bartpy/bartpy/data.py CovariateMatrixn_splittable_variables")
return output
def is_at_least_one_splittable_variable(self) -> bool:
#print("enter bartpy/bartpy/data.py CovariateMatrix is_at_least_one_splittable_variable")
if any(self._splittable_variables):
#print("-exit bartpy/bartpy/data.py CovariateMatrix is_at_least_one_splittable_variable")
return True
else:
output = len(self.splittable_variables()) > 0
#print("-exit bartpy/bartpy/data.py CovariateMatrix is_at_least_one_splittable_variable")
return output
def random_splittable_variable(self) -> str:
"""
Choose a variable at random from the set of splittable variables
Returns
-------
str - a variable name that can be split on
"""
#print("enter bartpy/bartpy/data.py CovariateMatrix random_splittable_variable")
if self.is_at_least_one_splittable_variable():
output = np.random.choice(np.array(self.splittable_variables()), 1)[0]
#print("-exit bartpy/bartpy/data.py CovariateMatrix random_splittable_variable")
return output
else:
raise NoSplittableVariableException()
#print("-exit bartpy/bartpy/data.py CovariateMatrix random_splittable_variable")
def is_column_unique(self, i: int) -> bool:
"""
Identify whether feature contains only unique values, i.e. it has no duplicated values
Useful to provide a faster way to calculate the probability of a value being selected in a variable
Returns
-------
List[int]
"""
#print("enter bartpy/bartpy/data.py CovariateMatrix is_column_unique")
if self._unique_columns[i] is None:
self._unique_columns[i] = len(np.unique(self.get_column(i))) == self._n_obsv
output = self._unique_columns[i]
#print("-exit bartpy/bartpy/data.py CovariateMatrix is_column_unique")
return output
def max_value_of_column(self, i: int):
#print("enter bartpy/bartpy/data.py CovariateMatrix max_value_of_column")
if self._max_value_cache[i] is None:
self._max_value_cache[i] = self.get_column(i).max()
output = self._max_value_cache[i]
#print("-exit bartpy/bartpy/data.py CovariateMatrix max_value_of_column")
return output
def random_splittable_value(self, variable: int) -> Any:
"""
Return a random value of a variable
Useful for choosing a variable to split on
Parameters
----------
variable - str
Name of the variable to split on
Returns
-------
Any
Notes
-----
- Won't create degenerate splits, all splits will have at least one row on both sides of the split
"""
#print("enter bartpy/bartpy/data.py CovariateMatrix random_splittable_value")
if variable not in self.splittable_variables():
raise NoSplittableVariableException()
max_value = self.max_value_of_column(variable)
candidate = np.random.choice(self.get_column(variable))
while candidate == max_value:
candidate = np.random.choice(self.get_column(variable))
#print("-exit bartpy/bartpy/data.py CovariateMatrix random_splittable_value")
return candidate
def proportion_of_value_in_variable(self, variable: int, value: float) -> float:
#print("enter bartpy/bartpy/data.py CovariateMatrix proportion_of_value_in_variable")
if self.is_column_unique(variable):
output = 1. / self.n_obsv
#print("-exit bartpy/bartpy/data.py CovariateMatrix proportion_of_value_in_variable")
return output
else:
output = float(np.mean(self.get_column(variable) == value))
#print("-exit bartpy/bartpy/data.py CovariateMatrix proportion_of_value_in_variable")
return output
def update_mask(self, other: SplitCondition) -> np.ndarray:
#print("enter bartpy/bartpy/data.py CovariateMatrix update_mask")
if other.operator == gt:
column_mask = self.values[:, other.splitting_variable] <= other.splitting_value
elif other.operator == le:
column_mask = self.values[:, other.splitting_variable] > other.splitting_value
else:
raise TypeError("Operator type not matched, only {} and {} supported".format(gt, le))
output = self.mask | column_mask
#print("-exit bartpy/bartpy/data.py CovariateMatrix update_mask")
return output
@property
def variables(self) -> List[int]:
#print("enter bartpy/bartpy/data.py CovariateMatrix variables")
output = list(range(self._n_features))
#print("-exit bartpy/bartpy/data.py CovariateMatrix variables")
return output
@property
def n_obsv(self) -> int:
#print("enter bartpy/bartpy/data.py CovariateMatrix n_obsv")
#print("-exit bartpy/bartpy/data.py CovariateMatrix n_obsv")
return self._n_obsv
class Target(object):
def __init__(self, y, mask, n_obsv, normalize, y_sum=None):
#print("enter bartpy/bartpy/data.py Target __init__")
if normalize:
self.original_y_min, self.original_y_max = y.min(), y.max()
self._y = self.normalize_y(y)
else:
self._y = y
#print("######################################### Target._mask=", mask)
self._mask = mask
self._inverse_mask_int = (~self._mask).astype(int)
self._n_obsv = n_obsv
self.normalize = normalize
if y_sum is None:
self.y_sum_cache_up_to_date = False
self._summed_y = None
else:
self.y_sum_cache_up_to_date = True
self._summed_y = y_sum
#print("-exit bartpy/bartpy/data.py Target __init__")
@staticmethod
def normalize_y(y: np.ndarray) -> np.ndarray:
"""
Normalize y into the range (-0.5, 0.5)
Useful for allowing the leaf parameter prior to be 0, and to standardize the sigma prior
Parameters
----------
y - np.ndarray
Returns
-------
np.ndarray
Examples
--------
>>> Data.normalize_y([1, 2, 3])
array([-0.5, 0. , 0.5])
"""
#print("enter bartpy/bartpy/data.py Target normalize_y")
y_min, y_max = np.min(y), np.max(y)
output = -0.5 + ((y - y_min) / (y_max - y_min))
#print("-exit bartpy/bartpy/data.py Target normalize_y")
return output
def unnormalize_y(self, y: np.ndarray) -> np.ndarray:
#print("enter bartpy/bartpy/data.py Target unnormalize_y")
if self.normalize == True:
distance_from_min = y - (-0.5)
total_distance = (self.original_y_max - self.original_y_min)
output = self.original_y_min + (distance_from_min * total_distance)
else:
output=y
#print("-exit bartpy/bartpy/data.py Target unnormalize_y")
return output
@property
def unnormalized_y(self) -> np.ndarray:
#print("enter bartpy/bartpy/data.py Target unnormalized_y")
if self.normalize == True:
output = self.unnormalize_y(self.values)
else:
output = self.values
#print("-exit bartpy/bartpy/data.py Target unnormalized_y")
return output
@property
def normalizing_scale(self) -> float:
#print("enter bartpy/bartpy/data.py Target normalizing_scale")
if self.normalize == True:
output = self.original_y_max - self.original_y_min
else:
output = 1.0
#print("-exit bartpy/bartpy/data.py Target normalizing_scale")
return output
def summed_y(self) -> float:
#print("enter bartpy/bartpy/data.py Target summed_y")
if self.y_sum_cache_up_to_date:
#print("-exit bartpy/bartpy/data.py Target summed_y")
return self._summed_y
else:
self._summed_y = np.sum(self._y * self._inverse_mask_int) ############### THIS IS HOW THE MASK IS USED!!!!!!!
self.y_sum_cache_up_to_date = True
#print("-exit bartpy/bartpy/data.py Target summed_y")
return self._summed_y
def update_y(self, y) -> None:
#print("enter bartpy/bartpy/data.py Target update_y")
#if y is not None:
# #print("############################################################# len(y)=", len(y))
# #print("#########################################self.y_sum_cache_up_to_date=", self.y_sum_cache_up_to_date)
self._y = y
self.y_sum_cache_up_to_date = False
#print("-exit bartpy/bartpy/data.py Target update_y")
@property
def values(self):
#print("enter bartpy/bartpy/data.py Target values")
#print("-exit bartpy/bartpy/data.py Target values")
return self._y
class PropensityScore(object):
def __init__(self, p, mask, n_obsv, p_sum=None):
#print("enter bartpy/bartpy/data.py PropensityScore __init__")
self._p = p
#print("######################################### PropensityScore._mask=", mask)
self._mask = mask
self._inverse_mask_int = (~self._mask).astype(int)
self._n_obsv = n_obsv
if p_sum is None:
self.p_sum_cache_up_to_date = False
self._summed_p = None
else:
self.p_sum_cache_up_to_date = True
self._summed_p = p_sum
#print("-exit bartpy/bartpy/data.py PropensityScore __init__")
def summed_p(self) -> float:
#print("enter bartpy/bartpy/data.py PropensityScore summed_p")
return np.sum(self._p * self._inverse_mask_int)
#if self.p_sum_cache_up_to_date:
# #print("-exit bartpy/bartpy/data.py PropensityScore summed_p")
# return self._summed_p
#else:
# self._summed_p = np.sum(self._p * self._inverse_mask_int)
# self.p_sum_cache_up_to_date = True
# #print("-exit bartpy/bartpy/data.py PropensityScore summed_p")
# return self._summed_p
def update_p(self, p) -> None:
#print("enter bartpy/bartpy/data.py PropensityScore update_p")
self._p = p
self.p_sum_cache_up_to_date = False
#print("-exit bartpy/bartpy/data.py PropensityScore update_p")
@property
def values(self):
#print("enter bartpy/bartpy/data.py PropensityScore values")
#print("-exit bartpy/bartpy/data.py PropensityScore values")
return self._p
class TreatmentAssignment(object):
def __init__(self, W, mask, n_obsv, W_sum=None):
#print("enter bartpy/bartpy/data.py TreatmentAssignment __init__")
self._W = W
#print("######################################### TreatmentAssignment._mask=", mask)
self._mask = mask
self._inverse_mask_int = (~self._mask).astype(int)
self._n_obsv = n_obsv
if W_sum is None:
self.W_sum_cache_up_to_date = False
self._summed_W = None
else:
self.W_sum_cache_up_to_date = True
self._summed_W = W_sum
#print("-exit bartpy/bartpy/data.py TreatmentAssignment __init__")
def summed_W(self) -> float:
#print("enter bartpy/bartpy/data.py TreatmentAssignment summed_W")
if self.W_sum_cache_up_to_date:
#print("-exit bartpy/bartpy/data.py TreatmentAssignment summed_W")
return self._summed_W
else:
self._summed_W = np.sum(self._W * self._inverse_mask_int)
self.W_sum_cache_up_to_date = True
#print("-exit bartpy/bartpy/data.py TreatmentAssignment summed_W")
return self._summed_W
def update_W(self, W) -> None:
#print("enter bartpy/bartpy/data.py TreatmentAssignment update_W")
self._W = W
self.W_sum_cache_up_to_date = False
#print("-exit bartpy/bartpy/data.py TreatmentAssignment update_W")
@property
def values(self):
#print("enter bartpy/bartpy/data.py TreatmentAssignment values")
#print("-exit bartpy/bartpy/data.py TreatmentAssignment values")
return self._W
class Data(object):
"""
Encapsulates the data within a split of feature space.
Primarily used to cache computations on the data for better performance
Parameters
----------
X: np.ndarray
The subset of the covariate matrix that falls into the split
y: np.ndarray
The subset of the target array that falls into the split
normalize: bool
Whether to map the target into -0.5, 0.5
cache: bool
Whether to cache common values.
You really only want to turn this off if you're not going to the resulting object for anything (e.g. when testing)
"""
def __init__(self,
X: np.ndarray,
y: np.ndarray,
mask: Optional[np.ndarray]=None,
normalize: bool=False,
unique_columns: List[int]=None,
splittable_variables: Optional[List[Optional[bool]]]=None,
y_sum: float=None,
n_obsv: int=None,
W: np.ndarray=None,
#W_sum: float=None,
p: np.ndarray=None,
#p_sum: float=None,
#h_of_X: np.ndarray=None,
#y_tilde_g_sum: float=None,
#g_of_X: np.ndarray=None,
#y_tilde_h_sum: float=None,
):
#print("enter bartpy/bartpy/data.py Data __init__")
if mask is None:
mask = np.zeros_like(y).astype(bool)
self._mask: np.ndarray = mask
if n_obsv is None:
n_obsv = (~self.mask).astype(int).sum()
self._n_obsv = n_obsv
#print("Initializing data with n_obs = ", n_obsv)
self._X = CovariateMatrix(X, mask, n_obsv, unique_columns, splittable_variables)
self._y = Target(y, mask, n_obsv, normalize, y_sum)
condition_1 = W is not None
condition_2 = p is not None
if condition_1 and condition_2: ################ NEED TO ADD THE TargetCGMg and TargetCGMH Here
#self._y_tilde_g = TargetCGMg(
# y=y,
# mask=mask,
# n_obsv=n_obsv,
# normalize=normalize,
# y_tilde_g_sum=y_tilde_g_sum,
# W=W,
# p=p,
# h_of_X=h_of_X)
self._W = TreatmentAssignment(W, mask, n_obsv, W_sum=0) ###### WILL WANT TO PASS self._y
self._p = PropensityScore(p, mask, n_obsv, p_sum=0)
else:
self._W=None
self._p=None
#print("-exit bartpy/bartpy/data.py Data __init__")
@property
def W(self) -> np.ndarray:
#print("enter bartpy/bartpy/data.py Data p")
#print("-exit bartpy/bartpy/data.py Data p")
return self._W
@property
def p(self) -> np.ndarray:
#print("enter bartpy/bartpy/data.py Data p")
#print("-exit bartpy/bartpy/data.py Data p")
return self._p
@property
def y(self) -> Target:
#print("enter bartpy/bartpy/data.py Data y")
#print("-exit bartpy/bartpy/data.py Data y")
return self._y
#@property
#def y_tilde_g(self) -> TargetCGMg:
# #print("enter bartpy/bartpy/data.py Data y_tilde_g")
# #print("-exit bartpy/bartpy/data.py Data y_tilde_g")
# return self._y_tilde_g
#@property
#def y_tilde_h(self) -> TargetCGMH:
# #print("enter bartpy/bartpy/data.py Data y_tilde_h")
# #print("-exit bartpy/bartpy/data.py Data y_tilde_h")
# return self._y_tilde_h
@property
def X(self) -> CovariateMatrix:
#print("enter bartpy/bartpy/data.py Data X")
#print("-exit bartpy/bartpy/data.py Data X")
return self._X
@property
def mask(self) -> np.ndarray:
#print("enter bartpy/bartpy/data.py Data mask")
#print("-exit bartpy/bartpy/data.py Data mask")
return self._mask
def update_y(self, y: np.ndarray) -> None:
#print("enter bartpy/bartpy/data.py Data update_y")
self._y.update_y(y)
#print("-exit bartpy/bartpy/data.py Data update_y")
#def update_y_tilde_g(self, y_tilde_g: np.ndarray) -> None:
# #print("enter bartpy/bartpy/data.py Data update_y_tilde_g")
# self._y_tilde_g.update_y_tilde_g(y_tilde_g)
# #print("-exit bartpy/bartpy/data.py Data update_y_tilde_g")
#
#def update_y_tilde_h(self, y_tilde_h: np.ndarray) -> None:
# #print("enter bartpy/bartpy/data.py Data update_y_tilde_h")
# self._y_tilde_h.update_y_tilde_h(y_tilde_h)
# #print("-exit bartpy/bartpy/data.py Data update_y_tilde_h")
#
#def update_y_tilde_g_h_function(self, h_of_X: np.ndarray) -> None:
# #print("enter bartpy/bartpy/data.py Data update_y_tilde_g")
# #self._y_tilde_g.update_y_tilde_g_h_function(h_of_X)
# #print("-exit bartpy/bartpy/data.py Data update_y_tilde_g")
# pass
#
#def update_y_tilde_h_g_function(self, g_of_X: np.ndarray) -> None:
# #print("enter bartpy/bartpy/data.py Data update_y_tilde_h_g_function")
# #self._y_tilde_h.update_y_tilde_h_g_function(h_of_X)
# #print("-exit bartpy/bartpy/data.py Data update_y_tilde_h_g_function")
# pass
def update_p(self, p: np.ndarray) -> None:
#print("enter bartpy/bartpy/data.py Data update_p")
self._p.update_p(p)
#print("-exit bartpy/bartpy/data.py Data update_p")
def update_W(self, W: np.ndarray) -> None:
#print("enter bartpy/bartpy/data.py Data update_W")
self._W.update_W(W)
#print("-exit bartpy/bartpy/data.py Data update_W")
def __add__(self, other: SplitCondition) -> 'Data':
#print("enter bartpy/bartpy/data.py Data __add__")
updated_mask = self.X.update_mask(other)
hasattr(self, 'W')
if (self.W is not None) and (self.p.values is not None):
output = Data(self.X.values,
self.y.values,
updated_mask,
normalize=False,
unique_columns=self._X._unique_columns,
splittable_variables=self._X._splittable_variables,
y_sum=other.carry_y_sum,
n_obsv=other.carry_n_obsv,
W=self.W.values,
#W_sum=other.carry_W_sum,
p=self.p.values,
#p_sum=other.carry_p_sum,
#h_of_X=other.y_tilde_g.h_of_X,
#y_tilde_g_sum=other.carry_y_tilde_g_sum,
#g_of_X: np.ndarray=None, y_tilde_h_sum: float=None,
)
else:
output = Data(self.X.values,
self.y.values,
updated_mask,
normalize=False,
unique_columns=self._X._unique_columns,
splittable_variables=self._X._splittable_variables,
y_sum=other.carry_y_sum,
n_obsv=other.carry_n_obsv)
#print("##################################################### self.X.values.shape", self.X.values.shape)
#print("-exit bartpy/bartpy/data.py Data __add__")
return output
|
23,674 | e00f8046f1c1eac5838b6c54870f0cda7a55f935 | from peewee import *
from database import *
from cryptolens.exchanges.exchange import ExchangeInfo
class Asset(Model):
name = CharField()
fiat = BooleanField(default=False)
creation = DateTimeField(constraints=[SQL('DEFAULT CURRENT_TIMESTAMP')])
class Meta:
database = db
class Symbol(Model):
name = CharField()
class Meta:
database = db
class AssetSymbol(Model):
asset = ForeignKeyField(Asset)
symbol = ForeignKeyField(Symbol)
class Meta:
database = db
class Market(Model):
exchange = ForeignKeyField(ExchangeInfo)
marketAsset = ForeignKeyField(Asset, related_name='market_asset')
baseAsset = ForeignKeyField(Asset, related_name='base_asset')
creation = DateTimeField(constraints=[SQL('DEFAULT CURRENT_TIMESTAMP')])
class Meta:
database = db
|
23,675 | c9befe390e769e3a2b7c8e1dcc93d3d5f8e69238 | from rest_framework import serializers
from .models import *
class VisitorInfoSerializer(serializers.ModelSerializer):
class Meta:
model = VisitorsInfo
fields = ('id', 'name', 'phoneNumber', 'email',)
class MessageSerializer(serializers.ModelSerializer):
visitor_id = serializers.ReadOnlyField(source='visitor.id')
class Meta:
model = Message
fields = ('id', 'date', 'messageContent', 'visitor_id',)
class BlogInfoSerializer(serializers.ModelSerializer):
class Meta:
model = BlogInfo
fields = ('id','title', 'intro', 'date', 'content',)
class AccessControlSerializer(serializers.ModelSerializer):
class Meta:
model = AccessControl
fields = ('date','ip_address',)
|
23,676 | 4942e42ed34847bf53038ab31a0b2b747964aa79 | class Solution:
def removeDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
count = 0
for n in nums:
if count < 2 or n > nums[count-2]:
nums[count] = n
count += 1
return count
def removeDeplicates2(self,nums):
if not nums:
return 0
count = 0
for i in range(1,len(nums)):
if nums[count] != nums[i] or (count>0 and nums[count] != nums[count-1]) or count==0:
count += 1
nums[count] = nums[i]
return count+1 |
23,677 | bcb6ac7e861e511874f772d289f8773a46f41a4e | import re
pattern = re.compile(r'<DOCNO>(.*?)</DOCNO>(.*?)<URL>(.*?)</URL>',re.S)
datacontent = open('../data/SogouT.mini.txt')
l = datacontent.readline()
while l !='':
if '<DOC>' in l:
content = l
content += datacontent.readline()
content += datacontent.readline()
m = pattern.search(content)
if m :
print m.group(1)
print m.group(3)
else:
print 'error'
print content
l = datacontent.readline() |
23,678 | 82e78db9e511b70241e73158e72fd86b8d013c43 | """
JCPDSTools version
Todo:
"""
__version__ = "0.0.8"
"""
0.0.8 Strengthen help, disable dioptas JCPDS button.
0.0.7 Block dioptas JCPDS output function due to crash of peakpo.
0.0.6 Add Dioptas JCPDS output. However, it is less tested than other functions.
0.0.5 Default comment writings and filenames change
0.0.4 Clean up UI.
0.0.3 Add some error checking functions for JCPDS
"""
|
23,679 | cede09c4128797dfdc61f3976aa663c767f4ae00 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-15 20:03
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('taggit', '0002_auto_20150616_2121'),
('jobs', '0008_job_tags'),
]
operations = [
migrations.CreateModel(
name='TagVariant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('variant', models.CharField(max_length=255)),
('tag', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='taggit.Tag')),
],
),
]
|
23,680 | 89af435d6d2b22c430a51d2e6079c114b2976e93 | #coding:utf8
#英文单词练习模式
from ui_ch_wordwindow import Ui_ch_wordwindow
from PyQt5 import QtWidgets,QtCore
from PyQt5.QtWidgets import QMessageBox
from class_thread_show_time_speed import thread_show_time_speed
import datetime
import os
import re
#继承QWidget类,以及基本ui类
class ui_ch_wordwindow(QtWidgets.QWidget,Ui_ch_wordwindow):
degree1_signal = QtCore.pyqtSignal(str) #定义简单难度的字符信号
degree2_signal = QtCore.pyqtSignal(str) #定义中等难度的字符信号
degree3_signal = QtCore.pyqtSignal(str) #定义困难难度的字符信号
def __init__(self,mainwindow):
super(ui_ch_wordwindow,self).__init__()
self.setupUi(self) #初始化基本ui
self.mainwindow = mainwindow
#给输入框textchange事件绑定word_judge函数
self.ch_word_input.textChanged.connect(self.word_judge)
self.number = 15 #设置单词库上限 默认15
self.restart = True
self.thread = thread_show_time_speed(self) #实例化线程
self.load_data() #欲载入练习数据
#给菜单栏难度选择绑定信号发射函数
self.mainwindow.action_ch_word_low.triggered.connect(self.d1_emit)
self.mainwindow.action_ch_word_mid.triggered.connect(self.d2_emit)
self.mainwindow.action_ch_word_high.triggered.connect(self.d3_emit)
#给难度发射信号绑定load_data函数
self.degree1_signal.connect(self.load_data)
self.degree2_signal.connect(self.load_data)
self.degree3_signal.connect(self.load_data)
#d系列信号的发射函数
def d1_emit(self):
self.degree1_signal.emit('low')
self.mainwindow.action_ch_word_mid.setChecked(False)
self.mainwindow.action_ch_word_high.setChecked(False)
def d2_emit(self):
self.degree2_signal.emit('mid')
self.mainwindow.action_ch_word_low.setChecked(False)
self.mainwindow.action_ch_word_high.setChecked(False)
def d3_emit(self):
self.degree3_signal.emit('high')
self.mainwindow.action_ch_word_low.setChecked(False)
self.mainwindow.action_ch_word_mid.setChecked(False)
#载入练习数据函数,默认参数degree
def load_data(self,degree='low'):
self.data = [] #初始化单词库
try:
word_txt = open('./source/ch_word/ch_word_'+degree+'.txt','r')
count = 0
for line in word_txt:
line = line.replace('\n','')
line = line.split(' ')
self.data+=[line]
count+=1
if count == self.number: break
word_txt.close()
except:
print('error')
self.prepare_work()
#判断输入框的正确与否,错误就不可以继续输入,并且要提醒修改
def word_judge(self):
input_text = self.ch_word_input.text()
model_text = self.ch_word_set.text()
if self.restart:
if input_text == '': return
self.restart = False
#开启thread,显示时间和打字速度。
self.thread = thread_show_time_speed(self)
self.thread.start()
if model_text[:len(input_text)] != input_text:
self.warning_set.setText('*输入错误, 注意修改')
stylesheet = '#ch_word_input{border:1px solid red}'
self.setStyleSheet(stylesheet)
self.ch_word_input.setMaxLength(len(input_text))
else:
stylesheet = '#ch_word_input{border:1px}'
self.setStyleSheet(stylesheet)
self.warning_set.setText('')
self.ch_word_input.setMaxLength(len(model_text))
if model_text == input_text:
self.word_count+=1
if self.word_count == len(self.data):
self.save_rank()
self.thread.wait()
r = QMessageBox.information(self,('Congradulations!'),('\n\n\t恭喜你通关了一次单词练习!\t\t\t\n\n\t\t再接再励吧!\t\n\n\t(Ok:再来一次 | Cancel:选择其他模式)\t\n'),QMessageBox.StandardButtons(QMessageBox.Ok|QMessageBox.Cancel))
self.thread.terminate()
if r == QMessageBox.Cancel:
self.hide()
self.mainwindow.radio_blank_menu.setChecked(True)
self.restart = True
return
else:
self.time_used_set.setText('0时0分0秒')
self.type_speed_set.setText('0字/秒')
self.word_count = 0
self.restart = True
self.thread.add_type_number(len(model_text)+2)
self.ch_word_input.setText('')
self.ch_word_set.setText(self.data[self.word_count][0])
self.ch_pinyin_set.setText(self.data[self.word_count][1])
#通关完一次后保存记录
def save_rank(self):
#得到本次通关的记录,存入temp_data中,[0]为用时,1为速度,2为存储的日期,3为总秒数
temp_data = []
marktime = datetime.datetime.now().strftime("%Y.%m.%d_%H:%M:%S")
speed = self.type_speed_set.text()
time = self.time_used_set.text()
pat = '([0-9]{,2})时([0-9]{,2})分([0-9]{,2})秒'
r = re.compile(pat).findall(time)[0]
whole_seconds = int(r[0])*3600+int(r[1])*60+int(r[2])
temp_data.append(time)
temp_data.append(speed)
temp_data.append(marktime)
temp_data.append(str(whole_seconds))
file_path = './source/rank/ch_word_'
if self.mainwindow.action_ch_word_low.isChecked():
rank_degree = 'low'
elif self.mainwindow.action_ch_word_mid.isChecked():
rank_degree = 'mid'
elif self.mainwindow.action_ch_word_high.isChecked():
rank_degree = 'high'
file_path = file_path + rank_degree + '.txt'
data = [] #data是所有纪录数据的list形式。二维数组
if os.path.exists(file_path):
file = open(file_path,'r')
for line in file:
line = line.replace('\n','')
line = line.split(' ')
data.append(line)
file.close()
insert = False
for i in range(len(data)):
if int(data[i][3]) >= int(temp_data[3]):
data.insert(i , temp_data)
insert = True
break
if len(data) == 0:
data.append(temp_data)
insert = True
if insert == False and len(data) < 5:
data.append(temp_data)
file = open(file_path,'w')
count = 0
for i in range(len(data)):
line = ' '.join(data[i])
line+='\n'
file.write(line)
count+=1
if count == 5: break
file.close()
#每次重新开始进行的预准备工作
def prepare_work(self):
self.ch_word_input.setText('')
if self.thread:
self.thread.terminate()
self.word_count = 0 #初始化单词计数器
self.ch_word_set.setText(self.data[self.word_count][0])
self.ch_pinyin_set.setText(self.data[self.word_count][1])
self.ch_word_input.setText('')
self.ch_word_input.setFocus()
self.time_used_set.setText('0时0分0秒')
self.type_speed_set.setText('0字/秒')
#重新show方法,每次调用的时候都重制一次当前界面
def show(self):
super().show()
self.prepare_work()
#重写hide方法,每次调用都会终止线程
def hide(self):
super().hide()
self.ch_word_input.setText('')
if self.thread.running:
self.thread.terminate()
if __name__ == '__main__':
import sys
app = QtWidgets.QApplication(sys.argv)
window = ui_ch_wordwindow()
window.show()
sys.exit(app.exec_())
|
23,681 | ffd4a2bbd01b5b743764ccf6eea1c1cfc9e0109f | from .serializers import UserSerializer
from django.contrib.auth import get_user_model
from rest_framework.views import APIView
from rest_framework.generics import get_object_or_404
from rest_framework.response import Response
from rest_framework import viewsets
from rest_framework.permissions import AllowAny, IsAdminUser, IsAuthenticated
User = get_user_model()
class UserViewSet(viewsets.ModelViewSet):
serializer_class = UserSerializer
queryset = User.objects.all().order_by('-id')
def get_permissions(self):
permission = AllowAny() if self.action in ('create',) else IsAdminUser()
print(self.action)
return [permission]
class UserView(APIView):
def get(self, request):
user = User.objects.all()
serializer = UserSerializer(user, many=True)
return Response({"user": serializer.data})
def post(self, request):
user = request.data.get("user")
# Create an article from the above data
serializer = UserSerializer(data=user)
if serializer.is_valid(raise_exception=True):
user_saved = serializer.save()
return Response({"success": "User '{}' created successfully".format(user_saved.title)})
def put(self, request, id):
saved_user = get_object_or_404(User.objects.all(), id=id)
data = request.data.get('user')
serializer = UserSerializer(instance=saved_user, data=data, partial=True)
if serializer.is_valid(raise_exception=True):
user_saved = serializer.save()
return Response({
"success": "User '{}' updated successfully".format(user_saved.title)
})
def delete(self, request, id):
# Get object with this pk
user = get_object_or_404(User.objects.all(), id=id)
user.delete()
return Response({
"message": "User with id `{}` has been deleted.".format(id)
}, status=204)
|
23,682 | 220544097c96590e6545d37b6eb07cd2cd7c719c | def medias_por_inicial (x):
d={}
for k,v in x.items():
if k[1:2]==k[1:2]:
d[v]=v
|
23,683 | 28f1007fb7137de01ed474b37e12e06a21b773b3 |
#calss header
class _ACCOMPLISHING():
def __init__(self,):
self.name = "ACCOMPLISHING"
self.definitions = accomplish
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['accomplish']
|
23,684 | 20bb574a15ffa00887a74971cd9011a75c6e6aa0 | import MySQLdb
import ObelixTestSpace
import sys
from ObelixTestSpace import ObelixTestSpace
class ObelixServer(object):
"""description of class"""
def __init__(self, asterixIp, obelixIp):
self.AsterixLocation = asterixIp
self.IpAddress = obelixIp
def LoadTopography(self):
''' Loads Topography from Asterix
'''
try:
db = MySQLdb.connect(
self.AsterixLocation,
'scripterIn',
'scripter',
'asterixtestnetwork')
cursor = db.cursor()
SelectSql = "SELECT ServerID, ServerName, SlotCount FROM obelixservertable WHERE ServerIPAddress='" + self.IpAddress + "'"
cursor.execute(SelectSql)
ServerDetails = cursor.fetchall()
for Field in ServerDetails:
self.PrimaryKey = Field[0]
self.ServerName = Field[1]
self.SlotCount = Field[2]
print(self.ServerName)
db.close()
self.TestDeviceList = []
for Slot in range(1, self.SlotCount):
TestSpace = ObelixTestSpace(self.PrimaryKey, self.IpAddress, Slot)
self.TestDeviceList.append(TestSpace)
except MySQLdb.Error as err:
print(err)
|
23,685 | f1242107011f7a05312cb27456c8dc054e85c7ea | #--coding:utf-8--
import cv2
import numpy as np
import os
def gamma_trans(img,gamma):#gamma函数处理
gamma_table=[np.power(x/255.0,gamma)*255.0 for x in range(256)]#建立映射表
gamma_table=np.round(np.array(gamma_table)).astype(np.uint8)#颜色值为整数
#图片颜色查表。另外可以根据光强(颜色)均匀化原则设计自适应算法。
return cv2.LUT(img,gamma_table)
def nothing(x):
pass
wndName='Exposure'
cv2.namedWindow(wndName,0)#将显示窗口的大小适应于显示器的分辨率
cv2.createTrackbar('Value of Gamma',wndName,100,300,nothing)#使用滑动条动态调节参数gamma
data_base_dir="images"#输入文件夹的路径
outfile_dir="out"#输出文件夹的路径
processed_number=0#统计处理图片的数量
print ("press enter to make sure your operation and process the next picture")
for file in os.listdir(data_base_dir):#遍历目标文件夹图片
read_img_name=data_base_dir+'//'+file.strip()#取图片完整路径
image=cv2.imread(read_img_name, 1)#读入图片
while(1):
#gamma取值
value_of_gamma=cv2.getTrackbarPos('Value of Gamma',wndName)
#压缩gamma范围,以进行精细调整
value_of_gamma=value_of_gamma*0.01
# 大于1曝光度下降,大于0小于1曝光度增强
#image_gamma_correct=gamma_trans(image,value_of_gamma)
yuv = cv2.cvtColor(image, cv2.COLOR_BGR2YUV)
yuv[:,:,0]=gamma_trans(yuv[:,:,0],value_of_gamma)
image_gamma_correct = cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR)
cv2.imshow(wndName,image_gamma_correct)
k=cv2.waitKey(1)
if k==13:
processed_number+=1
#out_img_name=outfile_dir+'//'+file.strip()
#cv2.imwrite(out_img_name,image_gamma_correct)
print ("The number of photos which were processed is ",processed_number)
break |
23,686 | f1e4668f5e00e682dc441715b5aa0a11704fa843 | #!/usr/bin/python3
import base64
import codecs
import math
import socket
import sys
import time
import zlib
server = 'irc.root-me.org'
port = 6667
nickname = 'Py_Botv2'
channel = '#root-me_challenge'
bot = 'Candy'
user = "USER %s %s %s %s\n" % (nickname, nickname, nickname, nickname)
nick = "NICK %s \n" % nickname
ircsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def valid():
if len(sys.argv) == 1:
print("No arguments passed. I'm lost and don't know where I am.")
exit()
def send(command):
ircsock.send(bytes(command + "\n", "utf-8"))
def receive():
inbound = ircsock.recv(2048).decode("utf-8")
return inbound
def buildConnection():
# Connect
ircsock.connect((server, port))
print("Connected to: %s:%s" % (server, port))
# Login
send(user)
send(nick)
while 1:
text = receive()
text = str(text)
if "is now your displayed host" in text:
print(text)
break
else:
print(text)
print("Logged in...")
# Join
send("JOIN " + channel)
print("Joined " + channel)
def main():
buildConnection()
while 1:
text = receive()
text = str(text)
if "PRIVMSG " + nickname in text:
print(text)
elif "PING :" in text:
print(text)
send("PONG :pingis\n")
else:
valid()
todo = sys.argv[1]
if todo == 'ep1':
send("PRIVMSG Candy :!ep1")
datarecv = receive()
print(datarecv)
# Do work bay bay
# Redacted, no spoilers/answers here.
returned = receive()
print(returned)
break
elif todo == 'ep2':
send("PRIVMSG Candy :!ep2")
datarecv = receive()
print(datarecv)
# Redacted, no spoilers/answers here.
returned = receive()
print(returned)
break
elif todo == 'ep3':
send("PRIVMSG Candy :!ep3")
datarecv = receive()
print(datarecv)
# Redacted, no spoilers/answers here.
returned = receive()
print(returned)
elif todo == 'ep4':
send("PRIVMSG Candy :!ep4")
datarecv = receive()
print(datarecv)
# Redacted, no spoilers/answers here.
returned = receive()
print(returned)
else:
print("I honestly don't know what you want me to do. ¯\_(ツ)_/¯ ")
break
ircsock.close()
main()
|
23,687 | 61073831291aa2a7d8bd84a4fbcfc10e5dc17d5f | import numpy as np
def values_to_distribution_memory(values):
"""
Transform a list of values into a 2D numpy array (aka memory)
of probaility distributions.
Int -> 1.0 at int index, 0.0 everywhere else
None -> Uniformly distributed 1.0/len(values)
E.g. [2, 1, None]
->
nparray
[
[0.0, 0.0, 1.0],
[0.0, 1.0, 0.0],
[0.333, 0.333, 0.333]
]
"""
length = len(values)
memory = np.zeros([length, length], dtype=np.float32)
uniform_density = 1.0 / float(length)
for i, val in enumerate(values):
if val != None:
memory[i][val] = 1.0
else:
memory[i].fill(uniform_density)
return memory
def batch_to_distribution_memories(batch):
"""
Transform a batch of value lists into a batch
of 2D numpy arrays of probability distributions.
"""
batch_memory = []
for value_array in batch:
batch_memory.append(values_to_distribution_memory(value_array))
return batch_memory
def next_batch(memory_dim, batch_length, task):
"""
Generate a batch of input/target memory pairs,
where the task specifies the target transformation.
"""
batch_inputs = []
batch_targets = []
difficulty = 0.0
for _ in xrange(batch_length):
# Values as integers
random_input = task.generate_random_input(difficulty, memory_dim)
target_output = task.run(random_input)
# Values as MxM memories
batch_inputs.append(values_to_distribution_memory(random_input))
batch_targets.append(values_to_distribution_memory(target_output))
return batch_inputs, batch_targets
|
23,688 | 4a024a64de956225b40d1a5f9e90fe5beeaf8de4 | # Copyright (C) 2020 Klika Tech, Inc. or its affiliates. All Rights Reserved.
# Use of this source code is governed by an MIT-style license that can be found in the LICENSE file
# or at https://opensource.org/licenses/MIT.
import os
import tempfile
from datetime import datetime
from tm4j_reporter_api.tm4j_api import tm4j_api
from tm4j_reporter_api.tm4j_exceptions import tm4j_response_exceptions
from tm4j_reporter_robot.tm4j_robot_helpers import tm4j_config_helpers, tm4j_test_cycle_helpers
class TM4JRobotListener(object):
ROBOT_LISTENER_API_VERSION = 2
def __init__(
self,
tm4j_access_key=None,
tm4j_project_key=None,
tm4j_parallel_execution_support=False,
tm4j_shared_test_cycle_key_file_path=f"{tempfile.gettempdir()}/TEST_CYCLE_KEY",
tm4j_test_cycle_name=f"Robot run {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
):
self.tm4j_access_key = os.environ.get("TM4J_ACCESS_KEY", tm4j_access_key)
self.tm4j_project_key = os.environ.get("TM4J_PROJECT_KEY", tm4j_project_key)
self.tm4j_parallel_execution_support = os.environ.get(
"TM4J_PARALLEL_EXECUTION_SUPPORT", tm4j_parallel_execution_support
)
self.tm4j_shared_test_cycle_key_file_path = os.environ.get(
"TM4J_SHARED_TEST_CYCLE_KEY_FILE_PATH", tm4j_shared_test_cycle_key_file_path
)
self.tm4j_test_cycle_name = os.environ.get("TM4J_TEST_CYCLE_NAME", tm4j_test_cycle_name)
self.tm4j_test_cycle_key = None
def end_test(self, name: str, attributes: dict) -> None:
"""
Report test execution based on robot test attributes using TM4J Reporter API library.
:param name: name of Robot Framework event
:type name: str
:param attributes: dictionary contains test case details
:type attributes: dict
:return: None
:rtype: None
"""
test_case_key = tm4j_config_helpers.get_tm4j_test_case_key(attributes)
tm4j_api.configure_tm4j_api(api_access_key=self.tm4j_access_key, project_key=self.tm4j_project_key)
try:
if not self.tm4j_test_cycle_key:
self.tm4j_test_cycle_key = tm4j_test_cycle_helpers.get_tm4j_test_cycle_key(
parallel_execution=self.tm4j_parallel_execution_support,
test_cycle_key_file_path=self.tm4j_shared_test_cycle_key_file_path,
tm4j_test_cycle_name=self.tm4j_test_cycle_name
)
tm4j_api.create_test_execution_result(
test_cycle_key=self.tm4j_test_cycle_key,
test_case_key=test_case_key,
execution_status=attributes["status"],
actual_end_date=datetime.strptime(attributes["endtime"], "%Y%m%d %H:%M:%S.%f").strftime(
"%Y-%m-%dT%H:%M:%SZ"
),
execution_time=attributes["elapsedtime"],
comment=attributes["message"],
)
except tm4j_response_exceptions.TM4JResponseException as e:
print(f"Sorry, test execution reporting for {test_case_key} didn't go well because of: {e.message}")
return None
|
23,689 | f320c028c1758e5d28a686d3f86de6e8409fa69c | # Author: Kevin Waters
# Date: 17 June 2020
# Description: This program implements a Selection Sort algorithm.
import random
import time
def swap(theList, i, j):
"""
Swaps the position of parameter i with parameter j.
:param theList: Python list
:param i: Position in list.
:param j: Position in list.
:return: None
"""
temp = theList[i]
theList[i] = theList[j]
theList[j] = temp
def selectionSort(theList):
"""
Sorts the Python list using the selection sort algorithm.
:param theList: Python list
:return: None
"""
N = len(theList)
i = N - 1
stopEarly = False
for i in range(N - 1):
smallestPosition = i
for j in range(i + 1, N):
if theList[smallestPosition] > theList[j]:
smallestPosition = j
swap(theList, i, smallestPosition)
def main():
myList = [random.randint(0, 1000) for i in range(300)]
print("The sorted list = " + str(sorted(myList)))
print("The unsorted list = " + str(myList))
print("")
startTime = time.time()
selectionSort(myList)
stopTime = time.time()
print("After selection sort = " + str(myList))
print("Time to selection sort = " + format(stopTime - startTime, "6.4f") + "seconds")
if __name__ == "__main__":
main()
|
23,690 | bd16f41a7d2cbb9d78fbd3fee1ee542508404739 | class ConversionUtils:
#将Byte(B)转换为Megabyte(M)
def bytes2Megabytes(bytesNum):
return bytesNum / 1024 / 1024
#将Megabyte(M)转换为Byte(B)
def megabytes2Bytes(megaNum):
return megaNum * 1024 * 1024 |
23,691 | 578c2814543cfb0a74308bfcc89ba7d5132a5d8c | from django.conf.urls.static import static
from farmeryy import settings
from django_otp.admin import OTPAdminSite
from django_otp.plugins.otp_totp.models import TOTPDevice
from django.contrib.auth.models import User
from django.urls import path
from farmeryyapp import views
from post.views import AddPostView
from farmeryyapp.views import OTPAdmin
from django.urls import path,include
from .apiviews import user_list,product_list,user_create,category_list,producttype_list,product2_list,rating_list
from rest_framework import routers
from rest_framework.authtoken.views import obtain_auth_token
from farmeryyapp import apiviews,adminviews
from farmeryyapp.apiviews import InfoViewSet,LoginViewSet,ProductSerializer,Category1ViewSet,ProductTypeViewSet,Product2ViewSet,RatingViewSet
router=routers.DefaultRouter()
router.register('info',apiviews.InfoViewSet,basename="info")
#router.register('users',apiviews.UserViewSet,basename="user")
router.register('login',apiviews.LoginViewSet,basename="login")
router.register('product',apiviews.ProductViewSet,basename="product")
router.register('category',apiviews.Category1ViewSet,basename="category")
router.register('producttype',apiviews.ProductTypeViewSet,basename="producttype")
router.register('product2',apiviews.Product2ViewSet,basename="product2")
router.register('rating',apiviews.RatingViewSet,basename="rating")
admin_site=OTPAdmin(name='OTPAdmin')
admin_site.register(User)
admin_site.register(TOTPDevice)
urlpatterns=[
path('home',views.index, name='home'),
path('home1',views.home1, name='home1'),
path('',views.home,name='homedesign'),
# path('',include('rest_framework.urls')),
path("homeadmin_template",views.homeadmin_template, name="homeadmin_template"),
path('about',views.about, name="about"),
path('admine/', admin_site.urls),
path('how-it-works',views.work, name="how-it-works"),
path('catalog/',views.catalog, name="catalog"),
path('shop',views.shop, name="shop"),
path('trial',views.trial, name="trial"),
path('trialform',views.trialform, name="trialform"),
path('info/cre',user_create ,name="info/cre"),
#path('info/',views.user_list, name="info"),
#path('register',views.register, name="register"),
path('login',views.loginn, name="login"),
# path('indexsign',views.indexsign, name="indexsign"),
path('loogout', views.loogout,name="loogout"),
path('mylogin',views.mylogin, name="mylogin"),
path('register',views.register, name="register"),
# path('loginform',views.loginform, name="loginform"),
path('loginform',views.loginform, name="loginform"),
#product producr product
path('product',views.product, name="product"),
path('viewproduct',views.viewproduct, name="viewproduct"),
path('delete/<str:product_id>/', views.delete_product, name="delete"),
# path('edit_product/<str:product_id>/', views.edit_product, name="edit_product"),
#-----------------------------------------------------------------------------------------------------------------------------------------
# api url
path('api/',include(router.urls)),
# path('api/auth/',obtain_auth_token),
path('api/auth/',obtain_auth_token),
path('api/product/',product_list , name="product"),
# path('api/catalog/',apiviews.catlogsave, name="api/catalog"),
path('api/category/',category_list, name="category"),
path('api/producttype/',producttype_list, name="api/producttype"),
path('api/product2/',product2_list, name="api/product2"),
path('api/rating/',rating_list, name="api/rating"),
path('info/',user_list , name="info"),
#-------------------------------adminviews.py.....................................................
path('product',adminviews.product, name="product"),
path('product_save',adminviews.product_save, name="product_save"),
path('product_details/<str:myid>/',adminviews.product_details, name="product_details"),
path('edit_product/<str:product_id>/', adminviews.edit_product,name="edit_product"),
path('product_edit_save', adminviews.product_edit_save),
path('delete_product/<str:product_id>/', adminviews.delete_product),
path('team',adminviews.team, name="team"),
path('blog/',views.blog, name="post-list"),
path('search/',views.search, name="search"),
path('post/<pk>/',views.post, name="post-detail"),
path('add_post/',AddPostView.as_view() ,name="add_post"),
path('team_save', adminviews.team_save),
path('viewteam',adminviews.viewteam,name="viewteam"),
path('teamHome',views.teamHome, name="team"),
path('category',adminviews.category, name="category"),
path('category_save', adminviews.category_save),
path('viewcategory',adminviews.viewcategory, name="viewcategory"),
path('viewFruitGrocery',views.viewFruitGrocery, name="viewFruitGrocery"),
path('edit_category/<str:category_id>/', adminviews.edit_category,name="edit_category"),
path('edit_category_save', adminviews.edit_category_save),
path('delete_category/<str:category_id>/', adminviews.delete_category),
path('subcategory',adminviews.subcategory, name="subcategory"),
path('subcategory_save',adminviews.subcategory_save, name="subcategory_save"),
path('viewsubcategory',adminviews.viewsubcategory, name="viewsubcategory"),
path('fruitGrocery',adminviews.fruitGrocery, name="fruitGrocery"),
path('fruitGrocery_save', adminviews.fruitGrocery_save),
path('fetch_api/<category_id>', adminviews.fetch_api, name="fetch_api"),
path('edit_subcategory/<str:subcategory_id>/',adminviews.edit_subcategory, name="edit_subcategory"),
path('delete_subcategory/<str:subcategory_id>/',adminviews.delete_subcategory, name="delete_subcategory"),
path('loogout', views.loogout,name="loogout"),
path('howit_work',adminviews.howit_work, name="howit_work"),
path('howit_work_save',adminviews.howit_work_save, name="howit_work_save"),
path('viewworkdata',adminviews.viewworkdata, name="viewworkdata"),
path('delete_viewworkdata/<str:work_id>/', adminviews.delete_viewworkdata,name="delete_viewworkdata"),
path('sliderhome',adminviews.sliderhome, name="sliderhome"),
path('sliderhome_save',adminviews.sliderhome_save, name="sliderhome_save"),
path('delete_slider/<str:sliderhome_id>/', adminviews.delete_slider,name="delete_slider"),
path('viewslider',adminviews.viewslider, name="viewslider"),
path('content',adminviews.content, name="content"),
path('content_save',adminviews.content_save, name="content_save"),
path('contentview',adminviews.contentview, name="contentview"),
path('delete_content/<str:circl_id/', adminviews.delete_content),
path('homeabout',adminviews.homeabout, name="homeabout"),
path('homeabout_save',adminviews.homeabout_save, name="homeabout_save"),
path('offerhomeproduct', adminviews.offerhomeproduct),
path('offerhomeproduct_save', adminviews.offerhomeproduct_save),
] |
23,692 | 2f089646f796d3d5afaaf5b0226f5c20477502b4 | from vbench.benchmark import Benchmark
from datetime import datetime
common_setup = """from pandas_vb_common import *
"""
#----------------------------------------------------------------------
# Series constructors
setup = common_setup + """
data = np.random.randn(100)
index = Index(np.arange(100))
"""
series_constructor_ndarray = \
Benchmark("Series(data, index=index)", setup=setup,
name='series_constructor_ndarray')
|
23,693 | f644629e4a20cec894eaac4ac53e877f5220c593 | from typing import List
import psycopg2
from entities.user import User
from daos.user_dao import UserDao
from exceptions.exceptions import ResourceNotFoundError
from utils.connection_util import connection
class UserDaoPostgres(UserDao):
def add_user(self, user: User) -> User:
sql = """insert into users (username, user_password, first_name, last_name, emp_or_mgr) values (%s, %s, %s, %s, %s) returning user_id"""
cursor = connection.cursor()
try:
cursor.execute(sql, (user.username, user.password, user.first_name, user.last_name, user.emp_or_mgr))
user_id = cursor.fetchone()[0]
user.user_id = user_id
return user
except psycopg2.Error:
raise ValueError("A user with that username already exists")
finally:
connection.commit()
def get_single_user(self, user_id: int) -> User:
sql = """select * from users where user_id = %s"""
cursor = connection.cursor()
cursor.execute(sql, [user_id])
record = cursor.fetchone()
try:
return User(*record)
except TypeError:
raise ResourceNotFoundError(f'Resource with given ID {user_id} not found')
def get_all_users(self) -> List[User]:
sql = """select * from users"""
cursor = connection.cursor()
cursor.execute(sql)
records = cursor.fetchall()
return [User(*record) for record in records]
def update_user(self, user: User) -> User:
self.get_single_user(user.user_id) #Check the account exists
sql = """update users set username=%s, user_password=%s, first_name=%s, last_name=%s, emp_or_mgr=%s where user_id = %s"""
cursor = connection.cursor()
cursor.execute(sql, (user.username, user.password, user.first_name, user.last_name, user.emp_or_mgr, user.user_id))
connection.commit()
return user
def delete_user(self, user_id: int) -> bool:
self.get_single_user(user_id) #Check the account exits
sql = '''delete from users where user_id = %s'''
cursor = connection.cursor()
cursor.execute(sql, [user_id])
connection.commit()
return True |
23,694 | 11a237fe8edef6df8807303302d251869827d6f7 | # Copyright 2010-present Basho Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import six
from erlastic import encode, decode
from erlastic.types import Atom
from riak import RiakError
from riak.codecs import Codec, Msg
from riak.pb.messages import MSG_CODE_TS_TTB_MSG
from riak.ts_object import TsColumns
from riak.util import bytes_to_str, unix_time_millis, \
datetime_from_unix_time_millis
udef_a = Atom('undefined')
rpberrorresp_a = Atom('rpberrorresp')
tsgetreq_a = Atom('tsgetreq')
tsgetresp_a = Atom('tsgetresp')
tsqueryreq_a = Atom('tsqueryreq')
tsqueryresp_a = Atom('tsqueryresp')
tsinterpolation_a = Atom('tsinterpolation')
tsputreq_a = Atom('tsputreq')
tsputresp_a = Atom('tsputresp')
tsdelreq_a = Atom('tsdelreq')
timestamp_a = Atom('timestamp')
class TtbCodec(Codec):
'''
Erlang term-to-binary Encoding and decoding methods for TcpTransport
'''
def __init__(self, **unused_args):
super(TtbCodec, self).__init__(**unused_args)
def parse_msg(self, msg_code, data):
if msg_code != MSG_CODE_TS_TTB_MSG:
raise RiakError("TTB can't parse code: {}".format(msg_code))
if len(data) > 0:
decoded = decode(data)
self.maybe_err_ttb(decoded)
return decoded
else:
return None
def maybe_err_ttb(self, err_ttb):
resp_a = err_ttb[0]
if resp_a == rpberrorresp_a:
errmsg = err_ttb[1]
# errcode = err_ttb[2]
raise RiakError(bytes_to_str(errmsg))
def encode_to_ts_cell(self, cell):
if cell is None:
return []
else:
if isinstance(cell, datetime.datetime):
ts = unix_time_millis(cell)
# logging.debug('encoded datetime %s as %s', cell, ts)
return ts
elif isinstance(cell, bool):
return cell
elif isinstance(cell, six.text_type) or \
isinstance(cell, six.binary_type) or \
isinstance(cell, six.string_types):
return cell
elif (isinstance(cell, six.integer_types)):
return cell
elif isinstance(cell, float):
return cell
else:
t = type(cell)
raise RiakError("can't serialize type '{}', value '{}'"
.format(t, cell))
def encode_timeseries_keyreq(self, table, key, is_delete=False):
key_vals = None
if isinstance(key, list):
key_vals = key
else:
raise ValueError("key must be a list")
mc = MSG_CODE_TS_TTB_MSG
rc = MSG_CODE_TS_TTB_MSG
req_atom = tsgetreq_a
if is_delete:
req_atom = tsdelreq_a
# TODO FUTURE add timeout as last param
req = req_atom, table.name, \
[self.encode_to_ts_cell(k) for k in key_vals], udef_a
return Msg(mc, encode(req), rc)
def validate_timeseries_put_resp(self, resp_code, resp):
if resp is None and resp_code == MSG_CODE_TS_TTB_MSG:
return True
if resp is not None:
return True
else:
raise RiakError("missing response object")
def encode_timeseries_put(self, tsobj):
'''
Returns an Erlang-TTB encoded tuple with the appropriate data and
metadata from a TsObject.
:param tsobj: a TsObject
:type tsobj: TsObject
:rtype: term-to-binary encoded object
'''
if tsobj.columns:
raise NotImplementedError('columns are not used')
if tsobj.rows and isinstance(tsobj.rows, list):
req_rows = []
for row in tsobj.rows:
req_r = []
for cell in row:
req_r.append(self.encode_to_ts_cell(cell))
req_rows.append(tuple(req_r))
req = tsputreq_a, tsobj.table.name, [], req_rows
mc = MSG_CODE_TS_TTB_MSG
rc = MSG_CODE_TS_TTB_MSG
return Msg(mc, encode(req), rc)
else:
raise RiakError("TsObject requires a list of rows")
def encode_timeseries_query(self, table, query, interpolations=None):
q = query
if '{table}' in q:
q = q.format(table=table.name)
tsi = tsinterpolation_a, q, []
req = tsqueryreq_a, tsi, False, udef_a
mc = MSG_CODE_TS_TTB_MSG
rc = MSG_CODE_TS_TTB_MSG
return Msg(mc, encode(req), rc)
def decode_timeseries(self, resp_ttb, tsobj,
convert_timestamp=False):
"""
Fills an TsObject with the appropriate data and
metadata from a TTB-encoded TsGetResp / TsQueryResp.
:param resp_ttb: the decoded TTB data
:type resp_ttb: TTB-encoded tsqueryrsp or tsgetresp
:param tsobj: a TsObject
:type tsobj: TsObject
:param convert_timestamp: Convert timestamps to datetime objects
:type tsobj: boolean
"""
if resp_ttb is None:
return tsobj
self.maybe_err_ttb(resp_ttb)
# NB: some queries return a BARE 'tsqueryresp' atom
# catch that here:
if resp_ttb == tsqueryresp_a:
return tsobj
# The response atom is the first element in the response tuple
resp_a = resp_ttb[0]
if resp_a == tsputresp_a:
return
elif resp_a == tsgetresp_a or resp_a == tsqueryresp_a:
resp_data = resp_ttb[1]
if len(resp_data) == 0:
return
elif len(resp_data) == 3:
resp_colnames = resp_data[0]
resp_coltypes = resp_data[1]
tsobj.columns = self.decode_timeseries_cols(
resp_colnames, resp_coltypes)
resp_rows = resp_data[2]
tsobj.rows = []
for resp_row in resp_rows:
tsobj.rows.append(
self.decode_timeseries_row(resp_row, resp_coltypes,
convert_timestamp))
else:
raise RiakError(
"Expected 3-tuple in response, got: {}".format(resp_data))
else:
raise RiakError("Unknown TTB response type: {}".format(resp_a))
def decode_timeseries_cols(self, cnames, ctypes):
cnames = [bytes_to_str(cname) for cname in cnames]
ctypes = [str(ctype) for ctype in ctypes]
return TsColumns(cnames, ctypes)
def decode_timeseries_row(self, tsrow, tsct, convert_timestamp=False):
"""
Decodes a TTB-encoded TsRow into a list
:param tsrow: the TTB decoded TsRow to decode.
:type tsrow: TTB dncoded row
:param tsct: the TTB decoded column types (atoms).
:type tsct: list
:param convert_timestamp: Convert timestamps to datetime objects
:type tsobj: boolean
:rtype list
"""
row = []
for i, cell in enumerate(tsrow):
if cell is None:
row.append(None)
elif isinstance(cell, list) and len(cell) == 0:
row.append(None)
else:
if convert_timestamp and tsct[i] == timestamp_a:
row.append(datetime_from_unix_time_millis(cell))
else:
row.append(cell)
return row
|
23,695 | cbf31786712197b255d7f204509eb617206f1d33 | import os
import sublime
import sublime_plugin
class InsertFilePathCommand(sublime_plugin.TextCommand):
def run(self, edit):
file_path = self.view.file_name()
replace_text_in_selections(self.view, edit, file_path)
def is_enabled(self):
return bool(self.view.file_name() and len(self.view.file_name()) > 0)
class InsertRelativePathCommand(sublime_plugin.TextCommand):
def run(self, edit):
projectFolders = self.view.window().folders()
self.path = self.view.file_name()
for folder in projectFolders:
if folder in self.view.file_name():
self.path = self.path.replace(folder, '')
break
replace_text_in_selections(self.view, edit, self.path)
def is_enabled(self):
if self.view.window().folders():
return bool(self.view.file_name())
return False
class InsertFileNameCommand(sublime_plugin.TextCommand):
def run(self, edit):
file_name = os.path.basename(self.view.file_name())
replace_text_in_selections(self.view, edit, file_name)
def is_enabled(self):
return bool(self.view.file_name() and len(self.view.file_name()) > 0)
class InsertFileDirectoryCommand(sublime_plugin.TextCommand):
def run(self, edit):
file_directory = os.path.dirname(self.view.file_name())
replace_text_in_selections(self.view, edit, file_directory)
def is_enabled(self):
return bool(self.view.file_name() and len(self.view.file_name()) > 0)
class InsertRelativeDirectoryCommand(sublime_plugin.TextCommand):
def run(self, edit):
projectFolders = self.view.window().folders()
self.directory = os.path.dirname(self.view.file_name())
for folder in projectFolders:
if folder in self.view.file_name():
self.directory = self.directory.replace(folder, '')
break
replace_text_in_selections(self.view, edit, self.directory)
def is_enabled(self):
if self.view.window().folders():
return bool(self.view.file_name())
return False
class CopyFileNameCommand(sublime_plugin.TextCommand):
def run(self, edit):
file_name = os.path.basename(self.view.file_name())
sublime.set_clipboard(file_name)
sublime.status_message("Copieddddd file name: %s" % file_name)
def is_enabled(self):
return bool(self.view.file_name() and len(self.view.file_name()) > 0)
class CopyFileDirectoryCommand(sublime_plugin.TextCommand):
def run(self, edit):
file_directory = os.path.dirname(self.view.file_name())
sublime.set_clipboard(file_directory)
sublime.status_message("Copied file directory: %s" % file_directory)
def is_enabled(self):
return bool(self.view.file_name() and len(self.view.file_name()) > 0)
class CopyRelativePathCommand(sublime_plugin.TextCommand):
def run(self, edit):
projectFolders = self.view.window().folders()
self.path = self.view.file_name()
for folder in projectFolders:
if folder in self.view.file_name():
self.path = self.path.replace(folder, '')[1:]
break
sublime.set_clipboard(self.path)
sublime.status_message("Copied file directory: %s" % self.path)
def is_enabled(self):
return bool(self.view.file_name() and len(self.view.file_name()) > 0)
def replace_text_in_selections(view, edit, text):
"""Replace every selection with the passed text"""
for region in view.sel():
view.replace(edit, region, text)
|
23,696 | fe8caf74d74c5252893708ebd7a79511f17721d5 | import numpy as np
class Activation:
def func(self, x: float) -> float: raise NotImplementedError
def grad(self, x: float) -> float: raise NotImplementedError
class RELU(Activation):
def func(self, x: float) -> float: return np.max([0, x])
def grad(self, x: float) -> float: return 0 if x <= 0 else 1
class LINEAR(Activation):
def func(self, x: float) -> float: return x
def grad(self, x: float) -> float: return 1.
class TANH(Activation):
def func(self, x: float) -> float: return np.tanh(x)
def grad(self, x: float) -> float: return 1. - np.tanh(x) * np.tanh(x)
class SIGMOID(Activation):
def func(self, x: float) -> float: return 1. / (1 + np.exp(-x))
def grad(self, x: float) -> float: return 1. / (1 + np.exp(-x)) * \
(1 - 1 / (1 + np.exp(-x)))
|
23,697 | cf8d8f8e87b97ef9e795213eaf2574ad4c7cab17 | from gmpy2 import divm, mpz, mul, powmod
from pwn import remote
import sys
import time
host = "127.0.0.1"
port = 8779
server = remote(host, port)
#key material
n = 104525132490556452593202847360958867443850727021139374664119771884926217842051539965479047872905144890766357397753662519890618428457072902974515214064289896674717388849969373481670774897894594962128470900125169816586277785525675183392237296768481956391496477386266086799764706674035243519651786099303959008271
e = 65537
def byte_to_int(str):
return int(str.hex(), 16)
def hex_to_byte(hex):
return bytes.fromhex(("0" if len(hex) % 2 else "") + hex)
def encrypt(data):
return pow(byte_to_int(data), e, n)
def try_sign(spell):
server.send("sign " + spell.hex() + "\n")
line = server.recvuntil("\n").decode("utf-8")
if line.startswith("Incorrect"):
server.recvuntil("\n")
return None
#strip off \r\n
return line[:-2].encode("utf-8")
def try_cast(spell, sig):
server.send(" ".join(["cast", sig.hex(), spell.hex()]) + "\n")
line = server.recvuntil("\n").decode("utf-8")
if line.startswith("Incorrect"):
server.recvuntil("\n")
return False
elif line.startswith("You"):
return True
#strip off \r\n
return server.recvuntil("\n")[:-2]
class UE(BaseException):
def __init__(self):
pass
def main():
spell = b"hocus pocus"
c = int(spell.hex(), 16)
r = 1
sig_c_prime = None
while sig_c_prime is None:
try:
c_prime = mul(c, powmod(r, e, n)) % n
msg = hex_to_byte(hex(c_prime)[2:])
if any([x in list(map(ord, [y for y in "\0 \t\r\n"])) for x in msg]):
raise UE()
resp = try_sign(msg)
if resp is not None:
sig_c_prime = int(resp, 16)
break
except KeyboardInterrupt:
raise
except UE:
pass
r += 1
sig = hex( divm(sig_c_prime, r, n) )[2:]
print("signature:", hex_to_byte(sig))
flag = try_cast(spell, hex_to_byte(sig))
print("FLAG:", flag)
if __name__ == "__main__":
print(server.recvuntil("\\\\\r\n"))
try_str = b"hocus pocus"
assert(try_cast(try_str, hex_to_byte(try_sign(try_str).decode("utf-8"))))
main()
|
23,698 | 1cd2c40012eb3693c8609f82a68145c0c3c008c0 | """This module contains tests for bucket list methods
"""
import unittest
from bucket_app.buckets import BucketList
class TestBucket(unittest.TestCase):
"""TestCase class for BucketList.
"""
def setUp(self):
self.my_bucket = BucketList("My resolutions", "In Progress")
def test_create_bucket_list_return(self):
"""
Tests if a bucket list method is returned by the create bucket list method.
"""
bucket = BucketList("", "")
bucket = bucket.create_bucket_list("Name", "Completed")
self.assertIsInstance(bucket, BucketList)
def test_create_bucket_list_name(self):
"""
Tests if the create bucket list method checks for a name
"""
bucket = BucketList("", "")
bucket = bucket.create_bucket_list("")
self.assertEqual(bucket, "Please provide a name for your bucket list", )
if __name__ == '__main__':
unittest.main()
|
23,699 | 28d3be052b3987147cb1a71ebbd383a22a628991 | from lib.quickbooks.entity import Entity
class Customer(Entity):
qodbc_table = 'Customer'
mysql_table = 'customer'
field_map = (
('ListID', 'qb_id'),
('TimeCreated', 'time_created'),
('TimeModified', 'time_modified'),
('Name', 'name'),
('FullName', 'full_name'),
('IsActive', 'is_active'),
('ClassRefFullName', 'class'),
('CompanyName', 'company_name'),
('FirstName', 'first_name'),
('MiddleName', 'middle_name'),
('LastName', 'last_name'),
('Phone', 'phone'),
('AltPhone', 'phone_alternate'),
('Email', 'email'),
('Cc', 'email_alternate'),
('CustomerTypeRefFullName', 'customer_type'),
('TermsRefFullName', 'terms'),
('Balance', 'balance'),
('TotalBalance', 'total_balance'),
('OpenBalance', 'open_balance'),
('OpenBalanceDate', 'open_balance_date'),
('SalesTaxCodeRefFullName', 'sales_tax_code'),
('TaxCodeRefFullName', 'tax_code'),
('ItemSalesTaxRefFullName', 'item_sales_tax'),
('Notes', 'notes'),
('BillAddressAddr1', 'billing_street_1'),
('BillAddressAddr2', 'billing_street_2'),
('BillAddressAddr3', 'billing_street_3'),
('BillAddressAddr4', 'billing_street_4'),
('BillAddressAddr5', 'billing_street_5'),
('BillAddressCity', 'billing_city'),
('BillAddressState', 'billing_state'),
('BillAddressProvince', 'billing_province'),
('BillAddressCounty', 'billing_county'),
('BillAddressPostalCode', 'billing_postcode'),
('BillAddressCountry', 'billing_country'),
('BillAddressNote', 'billing_note'),
('BillAddressBlockAddr1', 'billing_block_1'),
('BillAddressBlockAddr2', 'billing_block_2'),
('BillAddressBlockAddr3', 'billing_block_3'),
('BillAddressBlockAddr4', 'billing_block_4'),
('BillAddressBlockAddr5', 'billing_block_5')
)
update_fields = (
'time_modified',
'name',
'full_name',
'is_active',
'class',
'company_name',
'first_name',
'middle_name',
'last_name',
'phone',
'phone_alternate',
'email',
'email_alternate',
'customer_type',
'terms',
'balance',
'total_balance',
'open_balance',
'open_balance_date',
'sales_tax_code',
'tax_code',
'item_sales_tax',
'notes',
'billing_street_1',
'billing_street_2',
'billing_street_3',
'billing_street_4',
'billing_street_5',
'billing_city',
'billing_state',
'billing_province',
'billing_county',
'billing_postcode',
'billing_country',
'billing_note',
'billing_block_1',
'billing_block_2',
'billing_block_3',
'billing_block_4',
'billing_block_5',
)
custom_mysql_fields = ('company_file', )
def append_custom_data(self, raw):
return [each + (self.company_file,) for each in raw] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.