text stringlengths 38 1.54M |
|---|
import csv
def read_csv_file(file_name):
result = []
with open(file_name, 'r') as file:
reader = csv.reader(file)
for row in reader:
result.append(row)
return result
def filter_by_one_agrument(file, column, content):
result = []
for row in file:
if content == row[column]:
result.append(row)
return result
def filter_by_one_argument_that_startswith(file, column, startswith):
result = []
for row in file:
if row[column][0:len(startswith)] == startswith:
result.append(row)
return result
def filter_by_one_argument_that_contains(file, column, content):
result = []
for row in file:
if content in row[column]:
result.append(row)
return result
def filter_by_one_argument_greater_than(file, column, gt):
result = []
for row in file:
if float(row[column]) > gt:
result.append(row)
return result
def filter_by_one_argument_less_than(file, column, lt):
result = []
for row in file:
if float(row[column]) < lt:
result.append(row)
return result
def filter_by_with_order_by(file, column):
for i in range(0, len(file) - 1):
for j in range(i + 1, len(file)):
if file[i][column].isdigit():
if float(file[i][column]) > float(file[j][column]):
file[i], file[j] = file[j], file[i]
else:
if file[i][column].lower() > file[j][column].lower():
file[i], file[j] = file[j], file[i]
return file
def filter(file_name, **kwargs):
file = read_csv_file(file_name)
keys = kwargs.keys()
try:
header = file[0]
file = file[1:]
except Exception as e:
print(e)
else:
for key in keys:
if key in header:
file = filter_by_one_agrument(
file, header.index(key), kwargs[key])
else:
if key == 'order_by':
file = filter_by_with_order_by(
file, header.index(kwargs[key]))
else:
keyword = key.split('__')[1]
k = key.split('__')[0]
if keyword == 'startswith':
file = filter_by_one_argument_that_startswith(
file, header.index(k), kwargs[key])
if keyword == 'contains':
file = filter_by_one_argument_that_contains(
file, header.index(k), kwargs[key])
if keyword == 'gt':
file = filter_by_one_argument_greater_than(
file, header.index(k), kwargs[key])
if keyword == 'lt':
file = filter_by_one_argument_less_than(
file, header.index(k), kwargs[key])
finally:
return file
def count(file_name, **kwargs):
return len(filter(file_name, **kwargs))
def first(file_name, **kwargs):
return filter(file_name, **kwargs)[0]
def last(file_name, **kwargs):
return filter(file_name, **kwargs)[len(filter(file_name, **kwargs)) - 1]
def print_file_rows(file):
if type(file[0]) is str:
print(", ".join(file))
else:
for row in file:
print(", ".join(row))
def main():
print_file_rows(filter("empty_file.csv",
full_name="Diana Harris", favourite_color="lime"))
print(count("example_data.csv",
full_name="Diana Harris", favourite_color="lime"))
print_file_rows(first("example_data.csv",
full_name="Diana Harris", favourite_color="lime"))
print_file_rows(last("example_data.csv",
full_name="Diana Harris", favourite_color="lime"))
if __name__ == '__main__':
main()
|
#Runtime : O(n)
# here we maintain two arrays
# one call left, that maintains
# the product of all elements
# before a specific index
# similarly we maintin an array
# called right that maintains
# a product of all the elements
# after that specific index.
def solution(array):
left = [0]*len(array)
right = [0]*len(array)
ans = [0]*len(array)
left[0] = array[0]
for i in range(1,len(array)):
left[i] = array[i]*left[i-1]
# print(left)
right[-1] = array[-1]
for i in range(len(array)-2, -1, -1):
right[i] = array[i]*right[i+1]
# print(right)
ans[0] = right[1]
ans[-1] = left[-2]
for i in range(1, len(array)-1):
ans[i] = left[i-1]*right[i+1]
return ans
array = [1,2,2,3,3,4]
print(solution(array)) |
"""
Given two binary trees, write a function to check if they are the same or not.
Two binary trees are considered the same if they are structurally identical and the nodes have the same value.
Input: 1 1
/ \ / \
2 3 2 3
[1,2,3], [1,2,3]
Output: true
Input: 1 1
/ \
2 2
[1,2], [1,null,2]
Output: false
@author a.k
"""
from data_structures.trees.BinarySearchTree import BST, TreeNode
def is_same_tree(t1: TreeNode, t2: TreeNode) -> bool:
"""
Returns whether t1 and t2 are the same trees
:param t1: tree 1
:param t2: tree 2
:Time: O(N)
:Space: O(N) (best case O(log(n)) since num recursion stacks proportional to height)
:return: whether they are the same
"""
if not t1 and not t2: # both nulls are same
return True
elif not t1 or not t2: # if one is null, not the same
return False
elif t1.val != t2.val: # if value mismatch not the same
return False
else: # if value match check left and right
return is_same_tree(t1.left, t2.left) and is_same_tree(t1.right, t2.right)
if __name__ == '__main__':
t1 = BST()
t2 = BST()
for i in range(11):
t1.insert(i)
t2.insert(i)
assert is_same_tree(t1.root, t2.root)
t1.insert(66)
assert not is_same_tree(t1.root, t2.root)
t1 = BST()
t2 = BST()
t2.insert(44)
assert not is_same_tree(t1.root, t2.root)
t1 = BST()
t2 = BST()
assert is_same_tree(t1.root, t2.root)
print('PASSED')
|
__author__ = 'Oleksandr Shapran'
'''
Задание 6_2
Набор 1
fu, tofu, snafu
Набор 2
futz, fusillade, functional, discombobulated
Задача: напишите регулярное выражение, которое будет соответствовать всем словам из
первого набора и ни одному из второго.
'''
import re
def main():
string = """fu, tofu, snafu
futz, fusillade, functional, discombobulated"""
result = re.findall(r"\b\w*fu\b", string)
for x in result:
print(x)
if __name__ == "__main__":
main()
|
'''
@author: frank
'''
import unittest
import time
from sftpbackupstorage import sftpbackupstorage
from zstacklib.utils import http
from zstacklib.utils import jsonobject
from zstacklib.utils import uuidhelper
class Test(unittest.TestCase):
CALLBACK_URL = 'http://localhost:%s/testcallback' % sftpbackupstorage.SftpBackupStorageAgent.PORT
def callback(self, req):
rsp = jsonobject.loads(req[http.REQUEST_BODY])
print "install_url: %s" % rsp.installUrl
def testName(self):
server = sftpbackupstorage.SftpBackupStorageAgent()
server.http_server.register_sync_uri('/testcallback', self.callback)
server.http_server.start_in_thread()
time.sleep(2)
cmd = sftpbackupstorage.ConnectCmd()
cmd.storagePath = "/tmp"
#url = sftpbackupstorage._build_url_for_test([sftpbackupstorage.SftpBackupStorageAgent.CONNECT_PATH])
url = 'http://localhost:7171%s' % sftpbackupstorage.SftpBackupStorageAgent.CONNECT_PATH
print url
rsp = http.json_dump_post(url, cmd)
cmd = sftpbackupstorage.DownloadCmd()
cmd.accountUuid = uuidhelper.uuid()
cmd.bits = 64
cmd.description = "Test"
cmd.format = sftpbackupstorage.SftpBackupStorageAgent.IMAGE_TEMPLATE
cmd.guestOsType = "rpm"
cmd.hypervisorType = "KVM"
cmd.imageUuid = uuidhelper.uuid()
cmd.name = "test"
cmd.timeout = 60
cmd.url = "http://yum.puppetlabs.com/el/6/products/i386/puppetlabs-release-6-6.noarch.rpm"
cmd.urlScheme = "http"
url = 'http://localhost:7171%s' % sftpbackupstorage.SftpBackupStorageAgent.DOWNLOAD_IMAGE_PATH
print url
rsp = http.json_dump_post(url, cmd, headers={http.TASK_UUID:uuidhelper.uuid(), http.CALLBACK_URI:self.CALLBACK_URL})
print "post back"
time.sleep(20)
server.http_server.stop()
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() |
# -*- coding: UTF-8 -*-
import sys
from jobControl import runner
from util import project_dir_manager, conf_parser, assert_message, hdfs_util, option_util
def run(args):
conf_file = args[1]
conf = conf_parser.ConfParser(conf_file)
conf.load('DeOscillation') # 加载去震荡默认的参数配置模块
# Stable Point
print 'stable point start'
stable_input = args[2] # os.path.join(input_dir,'%sTotal%s.csv' % (month, user_type))
print stable_input
stable_output = args[3] # os.path.join(output_dir, '%sStable%s.csv' % (month, user_type))
stable_params = list()
stable_params.append(conf.load_to_dict('StablePoint').get('oscillation.stable.point.time.threshold', '15'))
cluster = conf.load_to_dict('cluster')
cluster['input_path'] = stable_input
cluster['output_path'] = stable_output
cluster['params'] = stable_params
cluster['main_class'] = conf.load_to_dict('StablePoint').get('main_class')
cluster['driver'] = conf.load_to_dict('StablePoint').get('driver')
stable_task = runner.SparkJob(**cluster)
stable_task.run()
print 'stable point end'
if __name__ == '__main__':
run(sys.argv)
|
# From: https://gist.github.com/mrluanma/1480728
flatten = lambda lst: reduce(lambda l, i: l + flatten(i) if isinstance(i, (list, tuple)) else l + [i], lst, [])
import matplotlib.pyplot as plt
from itertools import *
import operator
from toolz import *
from toolz.curried import *
from efprob.efprob_qu import *
import sympy as sym
from sympy.interactive.printing import init_printing
# !pip install more-itertools
from more_itertools import *
def R(theta, dim=2):
"""
Rotation array.
----
theta: a scalar or a list.
dim: 2 (default) or 3
return: the rotation array(s) of the shape (dim, dim).
"""
if (type(theta) != list): theta = [theta]
rs = list()
for t in theta:
r = np.array([[np.cos(t), -np.sin(t)],
[np.sin(t), np.cos(t)]])
if dim==3:
r = np.pad(r, ((0,1),(0,1)), 'constant')
r[-1, -1] = 1
rs.append(r)
if len(rs)==1: rs = rs[0]
return rs
def sqrt_diag(E):
if type(E) != list: E = [E]
v, s, vh = np.linalg.svd(E)
Ms = [v.dot(np.diag(np.sqrt(s)).dot(vh)) for v,s,vh in zip(v,s,vh)]
if len(Ms) == 1: Ms = Ms[0]
return Ms
# def prints(iterator, func= lambda arg: arg):
# for x in iterator:
# print(func(x))
def prints(*iterator, func= lambda arg: arg):
for x in iterator:
print(func(x))
|
from .submodule_reference_interface import SubmoduleReferenceInterface
from .process_interface import ProcessInterface
class SubmoduleInterface(SubmoduleReferenceInterface, ProcessInterface):
"""SubmoduleInterface
Represents a submodule
"""
def __init__(self):
self._module_reference = None # ModuleReferenceInterface
def __str__(self):
return "----> SUBMODULE: " + self.get_name()
def clean(self):
pass
|
from django.db import models
from django.urls import reverse
# Create your models here.
class Account(models.Model):
userName = models.CharField(max_length = 120)
userPassword = models.CharField(max_length = 120)
userFirstName = models.CharField(max_length = 120)
userLastName = models.CharField(max_length = 120)
userEmail = models.CharField(max_length = 120) |
from sequ import sequenceacq
import sys
#mp = open("x,")
gogo = sys.argv[1]
gout = sys.argv[2]
inside = sys.argv[3]
file = open(gogo,"r")
lines = file.readlines()
lines = list(lines)
dic={}
temp = ""
for i in lines:
i = i.split(" ")
i[0] = i[0] + "-" + i[1]
if int(i[8]) > int(i[9]):
k = i[8]
i[8] = i[9]
i[9] = k
if temp != i[0]:
dic[i[0]] = [0,0]
dic[i[0]][0] = i[8]
dic[i[0]][1] = i[9]
temp = i[0]
camp = ""
if temp == i[0]:
if abs(int(i[8]) - int(dic[i[0]][0]))>20000:
for j in i:
print(j,end = ",")
print()
continue
else:
if int(i[8]) < int(dic[i[0]][0]):
dic[i[0]][0] = i[8]
if int(i[9]) > int(dic[i[0]][1]):
dic[i[0]][1] = i[9]
print(dic)
file.close()
nam = list(dic.keys())
k = open(gout,"w")
for i in nam:
a = inside
b = i.split("-")[1]
m = i.split("-")[0]
c = int(dic[i][0]) - 1000
d = int(dic[i][1]) + 1000
sequenceacq(a,b,c,d,m,k)
k.close()
|
from save_experience import *
from models import *
from utils import *
from keras.utils import plot_model
from keras import callbacks
from keras.models import model_from_json
from callbacks import LossHistory, saveEveryNModels
from data_generator import DataGenerator
import h5py
def trainModel(dict):
print("Loading data ...")
set_path = os.path.join(dict["result_path"],dict["exp_name"],'Sets')
train_set_x = loadImages('../training_files_fixes.csv' , dict["input_path"])
train_set_x_label_1 = loadGTs('../training_files_fixes.csv', dict["labels_path_1"])
train_set_x_label_2 = loadGTs('../training_files_fixes.csv', dict["labels_path_2"] )
valid_set_x = loadImages('../validation_files_fixes.csv' , dict["input_path"])
valid_set_x_label_1 = loadGTs('../validation_files_fixes.csv', dict["labels_path_1"])
valid_set_x_label_2 = loadGTs('../validation_files_fixes.csv', dict["labels_path_2"] )
print("Normalizing ...")
train_set_x = normalize(train_set_x)
valid_set_x = normalize(valid_set_x)
train_set_x_label_1 = normalize(train_set_x_label_1)
train_set_x_label_2 = normalize(train_set_x_label_2)
valid_set_x_label_1 = normalize(valid_set_x_label_1)
valid_set_x_label_2 = normalize(valid_set_x_label_2)
if dict["datagen"]["name"] == 'spinal2D':
datagen = DataGenerator(train_set_x, [train_set_x_label_1 , train_set_x_label_2], dict["datagen"]["params"], batch_size=dict["batch_size"], shuffle=True, plotgenerator = 5)
if dict["network"] == "GpunetBn":
model = get_gpunet_bn(dict["loss"], dict['loss_factor'])
if dict['finetune']:
print('With finetune on exp : ' + dict["finetune_exp"])
model.load_weights(os.path.join('../../../Results/Segmentation_prediction',dict["finetune_exp"], 'best_weights.hdf5'))
## load best_weights
else:
print('No finetune')
print('saving model ...')
plot_model(model,show_shapes = True,to_file=dict["result_path"] +'/'+ dict["exp_name"] + '/model.png')
#To do : save the model in a JSON file
model_json = model.to_json()
with open(os.path.join(dict["result_path"], dict["exp_name"], 'model.json'),'w') as json_file:
json_file.write(model_json)
best_weights_path = os.path.join(dict["result_path"], dict["exp_name"] , 'best_weights.hdf5')
saveBestModel = callbacks.ModelCheckpoint(best_weights_path, monitor='val_loss',verbose=1, save_best_only=True, mode='auto')
history = LossHistory(os.path.join(dict["result_path"],dict["exp_name"]), loss=dict["loss"] , batch_size = dict["batch_size"])
## To do : faire un fichier models, ou on stocke tous les models, avec le meme nom que l exp sauf le num
## save_models_path = os.path.join(result_path, exp_name , 'best_weights.hdf5')
step_per_epoch = len(train_set_x) / dict["batch_size"]
print(step_per_epoch)
if dict["datagen"]["name"] is not None:
print('Training exp #'+(dict["exp_name"])[0]+ ' using data generator : '+dict["datagen"]["name"])
model.fit_generator(datagen.flow(train_set_x, [train_set_x_label_1 , train_set_x_label_2], batch_size = dict["batch_size"], shuffle=True),
steps_per_epoch = step_per_epoch, epochs= dict["nb_epoch"],
verbose = 1, validation_data=(valid_set_x, [valid_set_x_label_1 , valid_set_x_label_2]),
callbacks=[saveBestModel, history],
max_q_size=1
)
else:
print('training experience #' + (dict["exp_name"])[0]+ ' without data generator')
model.fit(train_set_x, train_set_x_label, batch_size = dict["batch_size"], epochs=dict["nb_epoch"], validation_data=(valid_set_x , [valid_set_x_label_1 , valid_set_x_label_2]), callbacks=[saveBestModel, history])
def launchExperience(dict):
# generateTrainingAndValidSetsCSV(dict["percent_of_training_file"], dict["exp_name"])
trainModel(dict)
if __name__ == '__main__':
exp = "exp1"
exp_dico = {}
exp_dico[exp] = {}
exp_dico[exp]["loss"] = 'dice_loss' # mean_squared_error , dice_loss
exp_dico[exp]["network"] = 'GpunetBn' # smallRegNet simpleGpunet GpunetBn
exp_dico[exp]["batch_size"] = 10
exp_dico[exp]["loss_factor"] = 15
exp_dico[exp]["nb_epoch"] = 400
exp_dico[exp]["percent_of_training_file"] = 0.8
exp_dico[exp]["finetune"] = False
exp_dico[exp]["finetune_exp"] = ''
exp_dico[exp]["result_path"] = '../../../Results/Segmentation_prediction/'
exp_dico[exp]["input_path"] = '../../../DATA/data/training_distance_map/reduced_images_1024_256/'
exp_dico[exp]["labels_path_1"] = '../../../DATA/labels/training_distance_map/distance_map_white_12_1024_256/'
exp_dico[exp]["labels_path_2"] = '../../../DATA/labels/training_distance_map/distance_map_white_2_1024_256/'
exp_dico[exp]["input_shape"] = 'reducedImages-512-128'
exp_dico[exp]["labels_shape"] = 'distanceMapWhite-3-512-128'
exp_dico[exp]["datagen"] = {}
exp_dico[exp]["datagen"]["name"] = 'spinal2D'# 'spinal2D' , None
exp_dico[exp]["exp_name"] = 'network=' + exp_dico[exp]["network"] + '_loss=' + exp_dico[exp]["loss"] + '_datagen=' + exp_dico[exp]["datagen"]["name"] +'_inputShape='+ exp_dico[exp]["input_shape"] + '_labelsShape=' + exp_dico[exp]["labels_shape"]
exp_dico[exp]["py_list"] = ['metrics.py','models.py','image_augmentation.py']
exp_dico[exp]["sh_list"] = []
exp_dico[exp]["exp_name"] = saveExperience(exp_dico[exp]["result_path"],exp_dico[exp]["exp_name"],exp_dico[exp]["py_list"],exp_dico[exp]["sh_list"])
datagen_params = {}
datagen_params["augmentation"] = {}
datagen_params["augmentation"]['augmentation_choices'] = [True] # True if you want to use random_tranform
datagen_params["augmentation"]['random_transform'] = {}
datagen_params["augmentation"]['random_transform']['horizontal_switch']= True
datagen_params["augmentation"]['random_transform']['vertical_switch']= False
datagen_params["augmentation"]['random_transform']['width_shift_range']=True
datagen_params["augmentation"]['random_transform']['height_shift_range']=True
datagen_params["augmentation"]['random_transform']['rotate']= True
datagen_params["augmentation"]['random_transform']['light']= False
datagen_params["augmentation"]['random_transform']['gaussian']= True
datagen_params["augmentation"]['random_transform']['dark']=False
datagen_params["augmentation"]['save_folder']= exp_dico[exp]["result_path"] +'/' + exp_dico[exp]["exp_name"] +'/Augmented_images/'
exp_dico[exp]["datagen"]["params"] = datagen_params
exp = "exp1"
exp_dico = {}
exp_dico[exp] = {}
exp_dico[exp]["loss"] = 'dice_loss' # mean_squared_error , dice_loss
exp_dico[exp]["network"] = 'GpunetBn' # smallRegNet simpleGpunet GpunetBn
exp_dico[exp]["batch_size"] = 10
exp_dico[exp]["loss_factor"] = 10
exp_dico[exp]["nb_epoch"] = 400
exp_dico[exp]["percent_of_training_file"] = 0.8
exp_dico[exp]["finetune"] = False
exp_dico[exp]["finetune_exp"] = ''
exp_dico[exp]["result_path"] = '../../../Results/Segmentation_prediction/'
exp_dico[exp]["input_path"] = '../../../DATA/data/training_distance_map/reduced_images_1024_256/'
exp_dico[exp]["labels_path_1"] = '../../../DATA/labels/training_distance_map/distance_map_white_12_1024_256/'
exp_dico[exp]["labels_path_2"] = '../../../DATA/labels/training_distance_map/distance_map_white_2_1024_256/'
exp_dico[exp]["input_shape"] = 'reducedImages-512-128'
exp_dico[exp]["labels_shape"] = 'distanceMapWhite-3-512-128'
exp_dico[exp]["datagen"] = {}
exp_dico[exp]["datagen"]["name"] = 'spinal2D'# 'spinal2D' , None
exp_dico[exp]["exp_name"] = 'network=' + exp_dico[exp]["network"] + '_loss=' + exp_dico[exp]["loss"] + '_datagen=' + exp_dico[exp]["datagen"]["name"] +'_inputShape='+ exp_dico[exp]["input_shape"] + '_labelsShape=' + exp_dico[exp]["labels_shape"]
exp_dico[exp]["py_list"] = ['metrics.py','models.py','image_augmentation.py']
exp_dico[exp]["sh_list"] = []
exp_dico[exp]["exp_name"] = saveExperience(exp_dico[exp]["result_path"],exp_dico[exp]["exp_name"],exp_dico[exp]["py_list"],exp_dico[exp]["sh_list"])
datagen_params = {}
datagen_params["augmentation"] = {}
datagen_params["augmentation"]['augmentation_choices'] = [True] # True if you want to use random_tranform
datagen_params["augmentation"]['random_transform'] = {}
datagen_params["augmentation"]['random_transform']['horizontal_switch']= True
datagen_params["augmentation"]['random_transform']['vertical_switch']= False
datagen_params["augmentation"]['random_transform']['width_shift_range']=True
datagen_params["augmentation"]['random_transform']['height_shift_range']=True
datagen_params["augmentation"]['random_transform']['rotate']= True
datagen_params["augmentation"]['random_transform']['light']= False
datagen_params["augmentation"]['random_transform']['gaussian']= True
datagen_params["augmentation"]['random_transform']['dark']=False
datagen_params["augmentation"]['save_folder']= exp_dico[exp]["result_path"] +'/' + exp_dico[exp]["exp_name"] +'/Augmented_images/'
exp_dico[exp]["datagen"]["params"] = datagen_params
exp = "exp2"
exp_dico = {}
exp_dico[exp] = {}
exp_dico[exp]["loss"] = 'dice_loss' # mean_squared_error , dice_loss
exp_dico[exp]["network"] = 'GpunetBn' # smallRegNet simpleGpunet GpunetBn
exp_dico[exp]["batch_size"] = 10
exp_dico[exp]["loss_factor"] = 7
exp_dico[exp]["nb_epoch"] = 400
exp_dico[exp]["percent_of_training_file"] = 0.8
exp_dico[exp]["finetune"] = False
exp_dico[exp]["finetune_exp"] = ''
exp_dico[exp]["result_path"] = '../../../Results/Segmentation_prediction/'
exp_dico[exp]["input_path"] = '../../../DATA/data/training_distance_map/reduced_images_1024_256/'
exp_dico[exp]["labels_path_1"] = '../../../DATA/labels/training_distance_map/distance_map_white_12_1024_256/'
exp_dico[exp]["labels_path_2"] = '../../../DATA/labels/training_distance_map/distance_map_white_2_1024_256/'
exp_dico[exp]["input_shape"] = 'reducedImages-512-128'
exp_dico[exp]["labels_shape"] = 'distanceMapWhite-3-512-128'
exp_dico[exp]["datagen"] = {}
exp_dico[exp]["datagen"]["name"] = 'spinal2D'# 'spinal2D' , None
exp_dico[exp]["exp_name"] = 'network=' + exp_dico[exp]["network"] + '_loss=' + exp_dico[exp]["loss"] + '_datagen=' + exp_dico[exp]["datagen"]["name"] +'_inputShape='+ exp_dico[exp]["input_shape"] + '_labelsShape=' + exp_dico[exp]["labels_shape"]
exp_dico[exp]["py_list"] = ['metrics.py','models.py','image_augmentation.py']
exp_dico[exp]["sh_list"] = []
exp_dico[exp]["exp_name"] = saveExperience(exp_dico[exp]["result_path"],exp_dico[exp]["exp_name"],exp_dico[exp]["py_list"],exp_dico[exp]["sh_list"])
datagen_params = {}
datagen_params["augmentation"] = {}
datagen_params["augmentation"]['augmentation_choices'] = [True] # True if you want to use random_tranform
datagen_params["augmentation"]['random_transform'] = {}
datagen_params["augmentation"]['random_transform']['horizontal_switch']= True
datagen_params["augmentation"]['random_transform']['vertical_switch']= False
datagen_params["augmentation"]['random_transform']['width_shift_range']=True
datagen_params["augmentation"]['random_transform']['height_shift_range']=True
datagen_params["augmentation"]['random_transform']['rotate']= True
datagen_params["augmentation"]['random_transform']['light']= False
datagen_params["augmentation"]['random_transform']['gaussian']= True
datagen_params["augmentation"]['random_transform']['dark']=False
datagen_params["augmentation"]['save_folder']= exp_dico[exp]["result_path"] +'/' + exp_dico[exp]["exp_name"] +'/Augmented_images/'
exp_dico[exp]["datagen"]["params"] = datagen_params
exp = "exp3"
exp_dico = {}
exp_dico[exp] = {}
exp_dico[exp]["loss"] = 'dice_loss' # mean_squared_error , dice_loss
exp_dico[exp]["network"] = 'GpunetBn' # smallRegNet simpleGpunet GpunetBn
exp_dico[exp]["batch_size"] = 10
exp_dico[exp]["loss_factor"] = 5
exp_dico[exp]["nb_epoch"] = 400
exp_dico[exp]["percent_of_training_file"] = 0.8
exp_dico[exp]["finetune"] = False
exp_dico[exp]["finetune_exp"] = ''
exp_dico[exp]["result_path"] = '../../../Results/Segmentation_prediction/'
exp_dico[exp]["input_path"] = '../../../DATA/data/training_distance_map/reduced_images_1024_256/'
exp_dico[exp]["labels_path_1"] = '../../../DATA/labels/training_distance_map/distance_map_white_12_1024_256/'
exp_dico[exp]["labels_path_2"] = '../../../DATA/labels/training_distance_map/distance_map_white_2_1024_256/'
exp_dico[exp]["input_shape"] = 'reducedImages-512-128'
exp_dico[exp]["labels_shape"] = 'distanceMapWhite-3-512-128'
exp_dico[exp]["datagen"] = {}
exp_dico[exp]["datagen"]["name"] = 'spinal2D'# 'spinal2D' , None
exp_dico[exp]["exp_name"] = 'network=' + exp_dico[exp]["network"] + '_loss=' + exp_dico[exp]["loss"] + '_datagen=' + exp_dico[exp]["datagen"]["name"] +'_inputShape='+ exp_dico[exp]["input_shape"] + '_labelsShape=' + exp_dico[exp]["labels_shape"]
exp_dico[exp]["py_list"] = ['metrics.py','models.py','image_augmentation.py']
exp_dico[exp]["sh_list"] = []
exp_dico[exp]["exp_name"] = saveExperience(exp_dico[exp]["result_path"],exp_dico[exp]["exp_name"],exp_dico[exp]["py_list"],exp_dico[exp]["sh_list"])
datagen_params = {}
datagen_params["augmentation"] = {}
datagen_params["augmentation"]['augmentation_choices'] = [True] # True if you want to use random_tranform
datagen_params["augmentation"]['random_transform'] = {}
datagen_params["augmentation"]['random_transform']['horizontal_switch']= True
datagen_params["augmentation"]['random_transform']['vertical_switch']= False
datagen_params["augmentation"]['random_transform']['width_shift_range']=True
datagen_params["augmentation"]['random_transform']['height_shift_range']=True
datagen_params["augmentation"]['random_transform']['rotate']= True
datagen_params["augmentation"]['random_transform']['light']= False
datagen_params["augmentation"]['random_transform']['gaussian']= True
datagen_params["augmentation"]['random_transform']['dark']=False
datagen_params["augmentation"]['save_folder']= exp_dico[exp]["result_path"] +'/' + exp_dico[exp]["exp_name"] +'/Augmented_images/'
exp_dico[exp]["datagen"]["params"] = datagen_params
## dictionnary that contains the information for the data DataGenerator
for key in exp_dico:
print(exp_dico[key]["input_shape"] , exp_dico[key]["labels_shape"])
saveAsJson(exp_dico[key])
loss_factor = exp_dico[key]['loss_factor']
launchExperience(exp_dico[key])
|
sign = 1
sum = 0
for i in range(1,20,2):
sum = sum + (sign * 4.0) / i
sign = -sign
print(sum) |
import numpy as np
import os
import matplotlib.pyplot as plt
import pywt
from PIL import Image
# enter relative path from the file of execution
# returns 3D numpy array of the image
def load_image(path) :
try:
img = np.array(Image.open(path))
except IOError:
print(path)
img = 0
return img
# enter the channels [R, G, B] to get respective channel images
def show_image(img, path = '', channel = 0):
if channel == 'R':
img[:,:,1] *= 0
img[:,:,2] *= 0
elif channel == 'G':
img[:,:,0] *= 0
img[:,:,2] *= 0
elif channel == 'B':
img[:,:,1] *= 0
img[:,:,0] *= 0
img = Image.fromarray(img)
if len(path)!=0 :
img.save(path, format = 'png')
img.show()
|
import os
import utfutil
def main():
# Absolute Path Stop Test
stop_file = 'junk_test.out'
# Clean up the stop file if it already exists.
if os.path.exists(stop_file): os.remove(stop_file)
current_directory = os.getcwd()
stop_file = os.path.join(current_directory, stop_file)
repy_args = ['--stop', stop_file,
'--status', 'foo',
'restrictions.default',
'stop_testsleepwrite.py']
result = (out, error) = utfutil.execute_repy(repy_args)
if os.path.exists(stop_file): os.remove(stop_file)
if out or error:
print 'FAIL'
if __name__ == '__main__':
main()
|
from time import sleep
bar=[[],[],[]]
numOfDisks=1
barName=['left','middle','right']
def move(bfrom, to, disk):
moveTo=0
while True:
if moveTo!=bfrom and moveTo!=to:
break
else:
moveTo+=1
if disk==1:
del bar[bfrom][-1]
bar[to].append(disk)
draw()
print('moving disk1 to '+barName[to]+'\n')
sleep(0.1)
else:
move(bfrom,moveTo,disk-1)
del bar[bfrom][-1]
bar[to].append(disk)
draw()
print('moving disk'+str(disk)+' to '+barName[to]+'\n')
sleep(0.1)
move(moveTo,to,disk-1)
screen={}
def draw():
for y in range(numOfDisks):
for c in range(3):
try:
screen[c]=str(bar[c][numOfDisks-y-1])
except IndexError:
screen[c]='|'
print('\t'+screen[0]+'\t'+screen[1]+'\t'+screen[2])
print('\t-----------------')
numOfDisks = int(input('how many disks would you like to have?\n>>'))
for i in range(numOfDisks):
bar[0].append(numOfDisks-i)
draw()
move(0,1,numOfDisks)
print('congrats! it\'s all done!')
|
#Ageel 9/9/2019
#100 Days of Python
#Day 20 - Sets
fruit = {"apple","banana","cherry","Banana","apple"}
fruit.add("mango")
fruit.update({"pineapple","orange"})
for x in fruit:
print(x)
print( "Onion is a fruit ?" + str("onion" in fruit)) |
"""
cables.py: Module is used to implement cable section analysis and an event study
"""
__author__ = "Chakraborty, S."
__copyright__ = ""
__credits__ = []
__license__ = "MIT"
__version__ = "1.0."
__maintainer__ = "Chakraborty, S."
__email__ = "shibaji7@vt.edu"
__status__ = "Research"
import numpy as np
import pandas as pd
from loguru import logger
from scubas.datasets import PROFILES
from scubas.models import OceanModel
from scubas.utils import RecursiveNamespace, frexp102str
class CableSection(object):
"""
This class holds a cable section of a big cable
Parameters:
-----------
"""
def __init__(
self,
sec_id,
directed_length=dict(
length=None,
length_north=None,
length_east=None,
edge_locations=dict(
initial=dict(lat=0.0, lon=0.0), final=dict(lat=0.0, lon=0.0)
),
),
):
self.sec_id = sec_id
self.directed_length = RecursiveNamespace(**directed_length)
self.components = []
self.compute_lengths()
return
def check_location(self, loc):
"""
Check lat/lon exists in file or not
"""
tag = True if (hasattr(loc, "lat") and hasattr(loc, "lon")) else False
return tag
def compute_lengths(self):
"""
Compute all the length of the Cable Section
"""
dl = self.directed_length
if hasattr(dl, "length") and dl.length:
self.length_north, self.length_east = (
dl.length / np.sqrt(2),
dl.length / np.sqrt(2),
)
elif (hasattr(dl, "length_north") and dl.length_north is not None) or (
hasattr(dl, "length_east") and dl.length_east is not None
):
logger.info("Cable length from directed length")
self.length_north = dl.length_north if hasattr(dl, "length_north") else 0.0
self.length_east = dl.length_east if hasattr(dl, "length_east") else 0.0
self.length = np.sqrt(self.length_east**2 + self.length_north**2)
elif (
hasattr(dl, "edge_locations")
and self.check_location(dl.edge_locations.initial)
and self.check_location(dl.edge_locations.final)
):
# PARAMETERS OF THE WGS84 EARTH MODEL
a = 6378.137 # Equatorial radius
b = 6356.752 # Polar Radius
e = np.sqrt(0.00669437999014) # Eccentricity
lamb = 0.5 * (
self.directed_length.edge_locations.initial.lat
+ self.directed_length.edge_locations.final.lat
)
self.length_north = (
111.133 - 0.56 * np.cos(np.deg2rad(2 * lamb))
) * np.abs(
self.directed_length.edge_locations.final.lat
- self.directed_length.edge_locations.initial.lat
)
self.length_east = (
(111.5065 - 0.1872 * np.cos(np.deg2rad(2 * lamb)))
* np.cos(np.deg2rad(lamb))
* np.abs(
self.directed_length.edge_locations.initial.lon
- self.directed_length.edge_locations.final.lon
)
)
self.length = np.sqrt(self.length_north**2 + self.length_east**2)
else:
logger.warning("No cable edge information available")
self.components = ["X", "Y"]
self.cable_lengths = {"X": self.length_north, "Y": self.length_east}
return
class TransmissionLine(CableSection):
"""
This class is dedicated for DSTL.
Parameters:
-----------
"""
def __init__(
self,
sec_id,
directed_length=dict(
length=None,
length_north=None,
length_east=None,
edge_locations=dict(
initial=dict(lat=0.0, lon=0.0), final=dict(lat=0.0, lon=0.0)
),
),
elec_params=dict(
site=PROFILES.CS,
width=1.0,
flim=[1e-6, 1e0],
),
active_termination=dict(
right=None,
left=None,
),
):
"""
Properties:
-----------
"""
# Compute phyiscal properties of the cable section
super().__init__(sec_id, directed_length=directed_length)
self.elec_params = RecursiveNamespace(**elec_params)
self.active_termination = RecursiveNamespace(**active_termination)
# Extract electrical properties of the cable
(
self.C,
self.R,
self.Z,
self.Y,
self.gma,
self.Z0,
) = self.calc_trasmission_line_parameters()
self.end_pot = RecursiveNamespace(**dict())
return
def to_str(self):
"""
Create a string of properties for display
"""
o = "Z: %s (Ohm/km)\n" % (frexp102str(self.Z * 1e3))
o += "Y: %s (S/km)\n" % (frexp102str(self.Y * 1e3))
o += "Z0: %s (Ohm)\n" % (frexp102str(self.Z0))
o += "gma: %s (/km)\n" % (frexp102str(self.gma * 1e3))
o += "Ad: %s (km)" % (frexp102str(1e-3 / self.gma))
return o
def compile_oml(self, bfield_data_files=[], p=None, csv_file_date_name="Date"):
"""
Create ocean model
"""
self.model = OceanModel(
self.elec_params.site,
flim=self.elec_params.flim,
)
if bfield_data_files and len(bfield_data_files) > 0:
self.model.read_Bfield_data(
bfield_data_files, csv_file_date_name=csv_file_date_name
)
self.model.to_Efields(p=p)
self.compute_eqv_pi_circuit()
return self
def add_active_termination(self):
"""
Adding active terminations
"""
terminators = [
self.active_termination.right,
self.active_termination.left,
]
for at in terminators:
if at:
C, R, Z, Y, gma, Z0 = self.calc_trasmission_line_parameters(at)
Jn = dict()
for a in self.components:
E = np.array(self.Efield[a]) * 1.0e-3 / 1.0e3
Jn[a] = E / Z # Assuming input mV/km convert to V/m
setattr(at, "Yn", 1.0 / Z0)
setattr(at, "Jn", Jn)
setattr(at, "Z0", Z0)
setattr(at, "R", R)
setattr(at, "C", C)
setattr(at, "Z", Z)
setattr(at, "Y", Y)
setattr(at, "gma", gma)
return
def calc_trasmission_line_parameters(self, site=None, width=None):
"""
Compute the transmission line parameters
"""
width = width if width else self.elec_params.width
site = site if site else self.elec_params.site
logger.info(f"Cable width: {width}")
if site.name == "Land":
C = width * ((site.get_thicknesses(0) / site.get_resistivities(0)))
R = (
(site.get_thicknesses(1) * site.get_resistivities(1))
+ (site.get_thicknesses(2) * site.get_resistivities(2))
) / width
else:
C = width * (
(site.get_thicknesses(1) / site.get_resistivities(1))
+ (site.get_thicknesses(0) / site.get_resistivities(0))
) # in m/ohm
R = (
(site.get_thicknesses(2) * site.get_resistivities(2))
+ (site.get_thicknesses(3) * site.get_resistivities(3))
) / width # in m*ohm
Z, Y = 1.0 / C, 1.0 / R # in Ohm-m and S/m
gma, Z0 = np.sqrt(Z * Y), np.sqrt(Z / Y) # in /m and Ohm
return C, R, Z, Y, gma, Z0
def compute_eqv_pi_circuit(self, Efield=None, components=None):
"""
Calculate equivalent pi circuit model.
X component is Nort (n), Y component East (e)
dE: Dataframe containing E-field
components: [X and Y] for E-fields
"""
Efield = Efield if Efield is not None else self.model.Efield
components = components if components is not None else self.model.components
self.Ye, self.Yp2, self.Ie = {}, {}, {}
for a in components:
L = self.cable_lengths[a]
L *= 1000.0 # Convert km to m
E = (
np.array(Efield[a]) * 1.0e-3 / 1.0e3
) # Assuming input mV/km convert to V/m
self.Ye[a] = 1.0 / (self.Z0 * np.sinh(self.gma * L))
self.Yp2[a] = (np.cosh(self.gma * L) - 1) * self.Ye[a]
self.Ie[a] = E / self.Z
self.Efield = Efield
self.components = components
self.add_active_termination()
self.compute_Vj(Efield.index.tolist())
return
def compute_Vj(self, time):
"""
Calculate total electric potential induced along the cable segment.
Vj = Ej_n(t)Lj_n + Ej_e(t)Lj_e
"""
self.V = pd.DataFrame()
self.V["Time"] = time
self.V["Vj"] = 0.0
for a in self.components:
lx = self.cable_lengths[a]
self.V["Vj"] += (
np.array(self.Efield[a]) * lx
) # Potential in mV: E(mV/km) length: km
self.V = self.V.set_index("Time")
return
def _pot_alongCS_(self, Vi=None, Vk=None, ln=1000):
"""
Caclulate potentials along the cable section
"""
Vi = Vi if Vi is not None else self.end_pot.Vi
Vk = Vk if Vk is not None else self.end_pot.Vk
L = self.length * 1e3
x = np.linspace(0, L, ln + 1)
V = (
(Vk * np.exp(self.gma * L) - Vi)
* np.exp(-self.gma * (L - x))
/ (np.exp(self.gma * L) - np.exp(-self.gma * L))
) + (
(Vi * np.exp(self.gma * L) - Vk)
* np.exp(-self.gma * x)
/ (np.exp(self.gma * L) - np.exp(-self.gma * L))
)
return V, x / 1.0e3
class Cable(object):
"""
This class holds a cable
Parameters:
-----------
cable: Cable parameters
Efields: Dataframe of E field
Bfields: Dataframe for B Field
components: Components for B or E fields
"""
def __init__(
self,
cable_sections,
components,
):
self.cable_sections = cable_sections
self.components = components
self.nodes = len(self.cable_sections) + 1
self.node_ids = np.arange(self.nodes)
self.left_edge, self.right_edge = 0, self.node_ids[-1]
self.nodes = {}
self.compile()
return
def compile(self):
"""
Run nodal analysis for the cable
"""
self.run_nodal_analysis()
self.solve_admitance_matrix()
self.consolidate_final_result()
U0, U1 = self._pot_end_cable_()
# Total parameter calculations
self.tot_params = pd.DataFrame()
self.tot_params["Time"] = self.cable_sections[0].Efield.index.tolist()
self.tot_params["V(v)"] = 0.0
for a in self.components:
self.tot_params["E." + a] = 0.0
for i, tl in enumerate(self.cable_sections):
self.tot_params["E.%s.%02d" % (a, i)] = np.array(tl.Efield[a])
self.tot_params["E." + a] += np.array(tl.Efield[a])
for i, tl in enumerate(self.cable_sections):
self.tot_params["V(v).%02d" % (i)] = np.array(tl.V.Vj) / 1e3
self.tot_params["V(v)"] += np.array(tl.V.Vj) / 1e3
self.tot_params["Vt(v)"] = U0 - U1 + np.array(self.tot_params["V(v)"])
self.tot_params["U0"], self.tot_params["U1"] = U0, U1
self.tot_params = self.tot_params.set_index("Time")
return
def run_nodal_analysis(self):
logger.info(f"Eq. nodal analysis.")
sections = self.cable_sections
for nid in self.node_ids:
self.nodes[nid] = {}
logger.info(f"Node:{nid}")
for a in self.components:
node = RecursiveNamespace(**dict())
Yii = np.zeros_like(self.node_ids, dtype=float)
if nid == self.left_edge:
Ji = -1.0 * sections[nid].Ie[a]
Yii[nid : nid + 2] = np.array(
[
sections[nid].Ye[a] + sections[nid].Yp2[a],
-sections[nid].Ye[a],
]
)
if sections[nid].active_termination.left:
Yii[nid] = Yii[nid] + sections[nid].active_termination.left.Yn
Ji = (
sections[nid].active_termination.left.Jn[a]
- sections[nid].Ie[a]
)
elif nid == self.right_edge:
Ji = sections[-1].Ie[a]
Yii[nid - 1 : nid + 1] = np.array(
[
-sections[-1].Ye[a],
sections[-1].Yp2[a] + sections[-1].Ye[a],
]
)
if sections[-1].active_termination.right:
Yii[nid] = Yii[nid] + sections[-1].active_termination.right.Yn
Ji = Ji - sections[-1].active_termination.right.Jn[a]
else:
Ji = sections[nid - 1].Ie[a] - sections[nid].Ie[a]
Yii[nid - 1 : nid + 2] = np.array(
[
-sections[nid - 1].Ye[a],
sections[nid - 1].Ye[a]
+ sections[nid].Ye[a]
+ sections[nid - 1].Yp2[a]
+ sections[nid].Yp2[a],
-sections[nid].Ye[a],
]
)
setattr(node, "Ji", Ji)
setattr(node, "Yii", Yii)
self.nodes[nid][a] = node
return
def solve_admitance_matrix(self):
"""
Solve: [V] = inv([Y]).[J]
"""
self.V = {}
logger.info(f"Solving admitance matrix.")
for a in self.components:
logger.info(f"Solving for component {a}.")
J, Y = [], []
for nid in self.node_ids:
n = self.nodes[nid][a]
J.append(n.Ji)
Y.append(n.Yii)
J, Y = np.array(J), np.array(Y)
logger.info(f"Sh(J):{J.shape}, Sh(Y):{Y.shape}")
iY = np.linalg.inv(Y)
self.V[a] = np.matmul(iY, J)
logger.info(f"Sh(V):{self.V[a].shape}")
logger.info(f"Set V[a] in each cable sections")
for k, cs in enumerate(self.cable_sections):
Vi, Vk = (
self.V[self.components[0]][k, :],
self.V[self.components[0]][k + 1, :],
)
if len(self.components) == 2:
Vi += self.V[self.components[1]][k, :]
Vk += self.V[self.components[1]][k + 1, :]
setattr(cs.end_pot, "Vi", Vi)
setattr(cs.end_pot, "Vk", Vk)
return
def consolidate_final_result(self):
"""
Estimated Vi,k are the voltages at the end
of the cable sections. Here we store estimated
voltages in csv format, and line parameters
(R, C, gma, L, Z0, Ln, Le, Ye[n,e], Yp2[n,e], Ie[n,e])
in json format.
"""
o = {"nodes": {}, "cables": {}}
logger.info(f"Consolidate all results.")
for bid, tx in enumerate(self.cable_sections):
bid += 1
o["cables"][bid] = {
"R": tx.R,
"C": tx.C,
"gma": tx.gma,
"Z0": tx.Z0,
"ln": tx.length_north,
"le": tx.length_east,
"len_km": tx.length,
"Ye": {},
"Yp2": {},
"Ie": {},
}
for a in self.components:
o["cables"][bid]["Ye"][a] = tx.Ye[a]
o["cables"][bid]["Yp2"][a] = tx.Yp2[a]
o["cables"][bid]["Ie"][a] = tx.Ie[a].tolist()
for nid in self.node_ids:
nid = str(nid)
for a in self.components:
n = self.nodes[int(nid)][a]
o["nodes"][nid] = {a: {}}
o["nodes"][nid][a]["Ji"] = n.Ji.tolist()
o["nodes"][nid][a]["Yii"] = n.Yii.tolist()
self.result = o
return
def save(self, folder):
"""
Save all analyzed data including
Nodal Analysis
"""
with open(folder + "est_cable_props.json", "w") as f:
f.write(json.dumps(self.result, sort_keys=True, indent=4))
self.tot_params.to_csv(folder + "sim-params.csv", float_format="%g")
return
def _pot_endCS_byComp_(self, cable_section_id, comp, unit="V", timestamp=None):
"""
Provide the voltage at the ends of the
cable to calculate total voltage by each components
"""
unit = 1.0 if unit == "V" else 1000.0
U0, U1 = (
np.round(self.V[comp][cable_section_id, :] * u, 2),
np.round(self.V[comp][cable_section_id + 1, :] * u, 2),
)
logger.info(
f"Max(V) at the end of Section-{b}(Component-{comp}), {np.max(U0)} {np.max(U1)}"
)
if timestamp:
U0, U1 = U0[timestamp], U1[timestamp]
return U0, U1
def _pot_endCS_(self, cable_section_id, unit="V", timestamp=None):
"""
Provide the voltage at the ends of the
cable to calculate total voltage
"""
U0, U1 = self._pot_alongCS_byComp_(
cable_section_id, self.components[0], unit, timestamp
)
if len(self.components) == 2:
u0, u1 = self._pot_alongCS_byComp_(
cable_section_id, self.components[1], unit, timestamp
)
U0 += u0
U1 += u1
return U0, U1
def _pot_end_cable_byComp_(self, comp="X", unit="V", timestamp=None):
"""
Provide the voltage at the ends of the
cable to calculate total voltage by each components
"""
u = 1.0 if unit == "V" else 1000.0
U0, U1 = np.round(self.V[comp][0, :] * u, 2), np.round(
self.V[comp][-1, :] * u, 2
)
logger.info(f"Max(V) at the end (Component-{comp}), {np.max(U0)} {np.max(U1)}")
if timestamp:
U0, U1 = U0[timestamp], U1[timestamp]
return U0, U1
def _pot_end_cable_(self, unit="V", timestamp=None):
"""
Provide the voltage at the ends of the
cable to calculate total voltage
"""
U0, U1 = self._pot_end_cable_byComp_(self.components[0], unit, timestamp)
if len(self.components) == 2:
u0, u1 = self._pot_end_cable_byComp_(self.components[1], unit, timestamp)
U0 += u0
U1 += u1
return U0, U1
def _pot_along_cable_(self, timestamp, unit="V"):
""" """
Vcable, Lcable = [], []
for cid, csec in enumerate(self.cable_sections):
V, Lx = csec._pot_alongCS_()
Vcable.extend(V.tolist())
if cid == 0:
Lcable = Lx.tolist()
else:
Lcable.extend((Lx + Lcable[-1]).tolist())
return Vcable, Lcable
|
from PyQt5 import QtWidgets
from PyQt5 import QtCore
import pyqtgraph as pg
import numpy as np
import socket
from ctypes import *
import time
from MainWindow import Ui_MainWindow
# PLC UDP Data Types import
from RxUdp import RxUdp
from TxUdp import TxUdp
class RemoteInterface(QtWidgets.QMainWindow, Ui_MainWindow):
def __init__(self):
super(RemoteInterface, self).__init__()
self.gui = Ui_MainWindow()
self.gui.setupUi(self)
# Udp Socket
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.bind(('192.168.90.60', 50060))
# UPD data comm with PLC
self.txData = TxUdp()
self.rxData = RxUdp()
# Udp Read/Write thread
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.update)
self.timer.start(50)
# Initial time
self.t0 = time.time()
# Start GUI
self.show()
def update(self):
# Elapsed time
t = self.t0 - time.time()
# Read data from udp
data, addr = self.sock.recvfrom(1024)
memmove(addressof(self.rxData), data, sizeof(self.rxData))
# Incerement counter and set Udp Key
self.txData.iUdpKey = 46505228
self.txData.iCounter = self.txData.iCounter + 1
# Apply sine motion to heave for EM1500
self.txData.em1500_surge_cmd = 0.1*np.sin(0.05*2.0*np.pi*t)
self.txData.em1500_heave_cmd = 0.2*np.sin(0.1*2.0*np.pi*t)
# Send data to PLC
self.sock.sendto(self.txData, ('192.168.90.50', 50050))
def closeEvent(self, event):
self.timer.stop()
|
# Generated by Django 3.2.3 on 2021-05-15 21:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='hamilton',
name='address',
field=models.CharField(blank=True, max_length=1000),
),
migrations.AlterField(
model_name='hamilton',
name='city',
field=models.CharField(blank=True, max_length=1000),
),
migrations.AlterField(
model_name='hamilton',
name='mapaddress',
field=models.CharField(blank=True, max_length=1000),
),
migrations.AlterField(
model_name='hamilton',
name='pharmacy',
field=models.CharField(blank=True, max_length=1000),
),
migrations.AlterField(
model_name='hamilton',
name='province',
field=models.CharField(blank=True, max_length=1000),
),
migrations.AlterField(
model_name='toronto',
name='address',
field=models.CharField(blank=True, max_length=1000),
),
migrations.AlterField(
model_name='toronto',
name='appointment_details',
field=models.CharField(blank=True, max_length=1000),
),
migrations.AlterField(
model_name='toronto',
name='city',
field=models.CharField(blank=True, max_length=1000),
),
migrations.AlterField(
model_name='toronto',
name='hours1',
field=models.CharField(blank=True, max_length=1000),
),
migrations.AlterField(
model_name='toronto',
name='hours2',
field=models.CharField(blank=True, max_length=1000),
),
migrations.AlterField(
model_name='toronto',
name='hours3',
field=models.CharField(blank=True, max_length=1000),
),
migrations.AlterField(
model_name='toronto',
name='mapaddress',
field=models.CharField(blank=True, max_length=1000),
),
migrations.AlterField(
model_name='toronto',
name='pharmacy',
field=models.CharField(blank=True, max_length=1000),
),
migrations.AlterField(
model_name='toronto',
name='phone',
field=models.CharField(blank=True, max_length=1000),
),
migrations.AlterField(
model_name='toronto',
name='postal_code',
field=models.CharField(blank=True, max_length=1000),
),
migrations.AlterField(
model_name='toronto',
name='province',
field=models.CharField(blank=True, max_length=1000),
),
migrations.AlterField(
model_name='toronto',
name='vaccine1',
field=models.CharField(blank=True, max_length=1000),
),
migrations.AlterField(
model_name='toronto',
name='vaccine2',
field=models.CharField(blank=True, max_length=1000),
),
migrations.AlterField(
model_name='toronto',
name='website',
field=models.CharField(blank=True, max_length=1000),
),
]
|
import flask
import serial
from time import sleep
import sys
COM_PORT = 'COM3' # 請自行修改序列埠名稱
BAUD_RATES = 115200
ser = serial.Serial(COM_PORT, BAUD_RATES)
app = flask.Flask(__name__)
@app.route('/', methods=['GET'])
def home():
print('connected')
return "<h1>Hello</h1>"
@app.route('/red', methods=['GET'])
def turn():
ser.write(b'scr1e') #ser的write (b'scr1e')的b是變成byte
return "<h1>Red On</h1>"
@app.route('/red10ms', methods=['GET'])
def red10ms():
ser.write(b'sdr010e')
return "<h1>red10ms</h1>"
@app.route('/red5ms', methods=['GET'])
def red5ms():
ser.write(b'sdr005e')
return "<h1>red5ms</h1>"
@app.route('/yellow', methods=['GET'])
def yellow():
ser.write(b'scy1e')
return "<h1>yellow ON</h1>"
@app.route('/yellow10ms', methods=['GET'])
def yellow10ms():
ser.write(b'sdy010e')
return "<h1>yellow10ms</h1>"
@app.route('/yellow5ms', methods=['GET'])
def yellow5ms():
ser.write(b'sdy005e')
return "<h1>yellow5ms</h1>"
@app.route('/blue', methods=['GET'])
def blue():
ser.write(b'scb1e')
return "<h1>blue On</h1>"
@app.route('/blue10ms', methods=['GET'])
def blue10ms():
ser.write(b'sdb010e')
return "<h1>blue10ms</h1>"
@app.route('/blue5ms', methods=['GET'])
def blue5ms():
ser.write(b'sdb005e')
return "<h1>blue5ms</h1>"
@app.route('/redoff', methods=['GET'])
def redoff():
ser.write(b'scr0e')
return "<h1>redoff</h1>"
@app.route('/yellowoff', methods=['GET'])
def yellowoff():
ser.write(b'scy0e')
return "<h1>yellowoff</h1>"
@app.route('/blueoff', methods=['GET'])
def blueoff():
ser.write(b'scb0e')
return "<h1>blueoff</h1>"
@app.route('/zino', methods=['GET'])
def zino():
return '{"success":"true","result":{"resource_id":"O-A0003-001","fields":[{"id":"lat","type":"Double"}]}'
app.run(host="0.0.0.0", port=8090) |
def day_twelve_one():
array = [[-14, -4, -11, 0, 0, 0], [-9, 6, -7, 0, 0, 0], [4, 1, 4, 0, 0, 0], [2, -14, -9, 0, 0, 0]]
# array = [[-8, -10, 0,0,0,0],[5,5,10,0,0,0],[2,-7,3,0,0,0], [9,-8,-3,0,0,0]]
# array = [[-1, 0, 2,0,0,0],[2,-10,-7,0,0,0],[4,-8,8,0,0,0], [3,5,-1,0,0,0]]
i = 0
while i < 1000:
if array[0][0] > array[1][0]:
array[0][3] += -1
array[1][3] += 1
if array[0][0] > array[2][0]:
array[0][3] += -1
array[2][3] += 1
if array[0][0] > array[3][0]:
array[0][3] += -1
array[3][3] += 1
if array[0][0] < array[1][0]:
array[0][3] += 1
array[1][3] += -1
if array[0][0] < array[2][0]:
array[0][3] += 1
array[2][3] += -1
if array[0][0] < array[3][0]:
array[0][3] += 1
array[3][3] += -1
if array[1][0] > array[2][0]:
array[1][3] += -1
array[2][3] += 1
if array[1][0] > array[3][0]:
array[1][3] += -1
array[3][3] += 1
if array[2][0] > array[3][0]:
array[2][3] += -1
array[3][3] += 1
if array[1][0] < array[2][0]:
array[1][3] += 1
array[2][3] += -1
if array[1][0] < array[3][0]:
array[1][3] += 1
array[3][3] += -1
if array[2][0] < array[3][0]:
array[2][3] += 1
array[3][3] += -1
if array[0][1] > array[1][1]:
array[0][4] += -1
array[1][4] += 1
if array[0][1] > array[2][1]:
array[0][4] += -1
array[2][4] += 1
if array[0][1] > array[3][1]:
array[0][4] += -1
array[3][4] += 1
if array[1][1] > array[2][1]:
array[1][4] += -1
array[2][4] += 1
if array[1][1] > array[3][1]:
array[1][4] += -1
array[3][4] += 1
if array[2][1] > array[3][1]:
array[2][4] += -1
array[3][4] += 1
if array[0][1] < array[1][1]:
array[0][4] += 1
array[1][4] += -1
if array[0][1] < array[2][1]:
array[0][4] += 1
array[2][4] += -1
if array[0][1] < array[3][1]:
array[0][4] += 1
array[3][4] += -1
if array[1][1] < array[2][1]:
array[1][4] += 1
array[2][4] += -1
if array[1][1] < array[3][1]:
array[1][4] += 1
array[3][4] += -1
if array[2][1] < array[3][1]:
array[2][4] += 1
array[3][4] += -1
if array[0][2] > array[1][2]:
array[0][5] += -1
array[1][5] += 1
if array[0][2] > array[2][2]:
array[0][5] += -1
array[2][5] += 1
if array[0][2] > array[3][2]:
array[0][5] += -1
array[3][5] += 1
if array[1][2] > array[2][2]:
array[1][5] += -1
array[2][5] += 1
if array[1][2] > array[3][2]:
array[1][5] += -1
array[3][5] += 1
if array[2][2] > array[3][2]:
array[2][5] += -1
array[3][5] += 1
if array[0][2] < array[1][2]:
array[0][5] += 1
array[1][5] += -1
if array[0][2] < array[2][2]:
array[0][5] += 1
array[2][5] += -1
if array[0][2] < array[3][2]:
array[0][5] += 1
array[3][5] += -1
if array[1][2] < array[2][2]:
array[1][5] += 1
array[2][5] += -1
if array[1][2] < array[3][2]:
array[1][5] += 1
array[3][5] += -1
if array[2][2] < array[3][2]:
array[2][5] += 1
array[3][5] += -1
for number in array:
number[0] = number[0] + number[3]
number[1] = number[1] + number[4]
number[2] = number[2] + number[5]
print(array)
i += 1
answer = 0
for number in array:
answer = answer + (abs(number[0]) + abs(number[1]) + abs(number[2])) * (
abs(number[3]) + abs(number[4]) + abs(number[5]))
print(answer)
return
# day_twelve_one()
|
#/usr/bin/env python3
"""
This program is for testing DynamoDB easily.
written by sudsator (Nov 2019)
usage : python get_by_updatedt.py <min:update_date_time> <max:update_date_time>
for example > python get_by_updatedt.py 1970010100000 19700101000010
reference :
http://tohoho-web.com/python/index.html
https://python.civic-apps.com/char-ord/
https://note.nkmk.me/python-unix-time-datetime/
https://python.ms/sub/misc/division/#%EF%BC%93%E3%81%A4%E3%81%AE%E3%82%84%E3%82%8A%E6%96%B9
https://qiita.com/Scstechr/items/c3b2eb291f7c5b81902a
https://qiita.com/UpAllNight/items/a15367ca883ad4588c05
https://note.nkmk.me/python-timeit-measure/
https://dev.classmethod.jp/cloud/aws/lambda-python-dynamodb/
https://qiita.com/nagataaaas/items/531b1fc5ce42a791c7df
https://w.amazon.com/bin/view/AWS/Teams/Proserve/Japan/Initiatives/DesignCookbook/
https://gitlab.com/yamabass80/python_packaging_note/blob/master/Readme.md
"""
import logging
import boto3
import json
import datetime
import re
import random, string
import sys
import timeit
from boto3.dynamodb.conditions import Key, Attr
TABLE_NAME = "TEST_TABLE"
REGION_NAME ="us-east-1"
LOGGER = logging.getLogger()
LOGGER.setLevel(logging.INFO)
DYNAMO = boto3.resource('dynamodb',region_name=REGION_NAME)
ord_alph = lambda i : i+ord("a")
chr_alph = lambda i : chr(i+ord("a"))
# get a number as random
def randomnumber(n):
randlst = [random.choice(string.digits) for i in range(n)]
return ''.join(randlst)
# get characters as random
def randomname(n):
randlst = [random.choice(string.ascii_letters + string.digits) for i in range(n)]
return ''.join(randlst)
# item as json -> string
def str_anitem(anitem):
if not ('contract_info' in anitem):
anitem['contract_info'] = ""
return anitem['user_id'] + '\t' \
+ anitem['update_date_time'] + '\t' \
+ anitem['contract_info'] [0:20]
# display an item
def display_anitem(anitem):
print(str_anitem(anitem))
# display times
def display_items(items):
for it in items:
print(str_anitem(it))
# array data -> json
def item_tok(user_id, update_date_time, dummy, contract_info):
anitem = {
'user_id':user_id,
'update_date_time':update_date_time,
'dummy':dummy,
'contract_info':contract_info
}
return anitem
# make userid string
def gen_userid(i):
num_ch=ord("z")-ord("a")+1
userid=""
i=i+(num_ch+1)*(num_ch+1)
while i>0:
userid = userid+chr_alph( i % num_ch )
i = i // num_ch
return userid
# make timestump
def strdatetime(i):
dt=datetime.datetime.fromtimestamp(i)
return re.sub('[- :]','',str(dt))
# main
def get_by_updatedt(st_time, en_time):
try:
table_name = "BGL_Tokuten2"
table = DYNAMO.Table(TABLE_NAME)
response = table.query(
IndexName='dummy-update_date_time-index',
KeyConditionExpression = Key('dummy').eq('dummy') & Key('update_date_time').between(st_time, en_time)
)
print("ok")
display_items(response['Items'])
#LOGGER.info("Completed registration")
print("complete!")
return "end"
except Exception as error:
LOGGER.error(error)
raise error
args = sys.argv
#get_by_updatedt(args[1],args[2])
results = timeit.timeit(lambda: get_by_updatedt(args[1],args[2]), number=1)
print("exec time[sec]", results)
|
import random
class MapTile:
def __init__(self,x,y,loot):
self.x = x
self.y = y
self.loot = []
def map_location(self,x,y):
if x < 0 or y < 0:
return "This is impossible, try again" # coordinates can not be less than 0
try:
return map_of_world[y][x] # [y] selects row of map [x] selects the cell in that row
except IndexError:
return None # if coordinates do not exist
class StartTile(MapTile):
def intro(self):
return "Hi you are now in some map on a computer somewhere, be careful, there are enemies lurking" # starting room tile prints introduction text
class LootRoom(MapTile):
def __init__(self,x,y):
LootRoom.map_location(1,2)
class Room1(MapTile):
def __init__(self,x,y):
Room1.map_location(1,1)
class Room2(MapTile):
def __init__(self,x,y):
Room2.map_location(2,1)
class FinalRoom(MapTile):
def __init__(self,x,y):
FinalRoom.map_location(0,0)
map_of_world = [
[FinalRoom(1,0),None,None],
[(0,1),Room2,Room1,(3,1)],
[None,LootRoom,(2,2),StartTile]
] |
# Generated by Django 2.1.3 on 2019-11-16 16:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Eprint_users', '0027_auto_20191116_2007'),
]
operations = [
migrations.AddField(
model_name='hostsearch',
name='completed',
field=models.BooleanField(default=False),
),
]
|
from dataextract import Type
class Double:
def __init__(self, column, value, format = None):
self.column = column
self.value = self.convert(value)
def add_to_row(self, row):
return row.setDouble(self.column, self.value)
def convert(self, value):
return float(value)
def type(self):
return Type.DOUBLE |
# Corrigido
print('Exercício 014')
print()
# Bloco de entrada
c = float(input('Informe uma temperatura em °C: '))
print()
# Bloco de cálculo
f = ((9 * c) / 5) + 32
# Bloco de saída
print('{}°C são equivalentes a {}F.'.format(c, f))
print()
|
from service_message import *
from constants import *
from collections import deque
from threading import Event
from asyncoro import AsynCoro, Coro, AsynCoroThreadPool, logger
import multiprocessing
import logging
class Message_Router():
_instance = None
@classmethod
def instance(cls):
if not cls._instance:
cls._instance = Message_Router( )
return cls._instance
_commands = None
exit = False
def __init__(self):
self._services = {}
logger.setLevel(logging.INFO)
# thread pool -- will burn up if services use the thread for blocking & totally
# kill application communication. If you have to block for I/O then you better
# be using async in the destination service
#for i in range(2 * multiprocessing.cpu_count()) :
#self._coro_dispatcher = \
self._dispatcher_coro = Coro(self._message_dispatcher)
def _message_dispatcher(self, coro=None):
coro.set_daemon()
thread_pool = AsynCoroThreadPool(2 * multiprocessing.cpu_count())
while True:
try:
message = yield coro.receive()
if self.exit: #abandon any work & just cleanly exit
break
yield thread_pool.async_task(coro, self._dispatch_message, message)
except:
show_error()
#raise
print "Coro(_message_dispatcher) exiting"
def _dispatch_message(self,message):
if message.dest_service in self._services.keys():
sw = Stopwatch()
self._services[message.dest_service].handle_message(message)
#if message.type == Message_Recv_Peer_Data.Type():
# logger.debug( "ROUTER: net receiving " + message.network_msg.Type())
#elif message.type == Message_Send_Peer_Data.Type():
# logger.debug( "ROUTER: net dispatching " + message.network_msg.Type())
#else:
# logger.debug( "ROUTER: dispatching " + message.Type())
# for long running tasks (CPU-bound) you should pass off to a dedicated thread or tune
# for I/O bound tasks you should queue the work until an I/O thread-pool thread can handle it
if sw.ms() > 50:
print "INVESTIGATE!!! %s(%s) took %0.3f ms!! Tuning may be required.'" % (message.dest_service, message.Type(), sw.ms())
else:
print "Unregistered service '" + message.service + "'"
def register_service(self, service_id, service):
if not service_id in self._services.keys():
self._services[service_id] = service
def route(self, message):
self._dispatch_message(message)
#if len(self._dispatcher_coro._msgs) > 100:
# print "Backlog is " + str(len(self._dispatcher_coro._msgs)) + " on message dispatch!!! Find blocking service."
#if not self.exit:
# self._dispatcher_coro.send(message)
def stop(self):
self.exit = True
# tell all my services to stop
for service in self._services.values():
try:
service.stop()
except:
show_error()
self._dispatcher_coro.send(None)
time.sleep(.1)
AsynCoro.instance().terminate()
def attach_console(self):
while True:
try:
cmd = raw_input()
except EOFError: #the user does not have a terminal
return
if cmd == "q" or cmd == "Q" or cmd == "quit" or cmd == "exit":
print "Exiting..."
break
try:
node_info = None
if SERVICE_NODE in self._services.keys():
node_info = self._services[SERVICE_NODE].get_console_node( )
args = []
splitted = cmd.split(' ',1)
if len(cmd) == 0: # default action
self.route(Message_Console_Command(SERVICE_NODE, "print", args, node_info))
elif len(splitted) == 1: # try to find a service with the command
for svc in self._services.values():
if splitted[0] in svc.attach_to_console( ):
splitted.insert(0,svc.service_id)
break
if len(splitted) > 1:
svcName = splitted[0] # consoleName
command = splitted[1] # commandName
# attempt to lookup service by consoleName
found = False
for svc in self._services.values():
if svc.cl_name == svcName or svc.service_id == svcName:
for i in range(2, len(splitted)):
args.append( splitted[i] )
svc.handle_message(Message_Console_Command(svc.service_id, command, args, node_info))
found = True
break
if not found: # see if a command was entered without a service name
for svc in self._services.values():
if svcName in svc.attach_to_console():
command = svcName
for i in range(1, len(splitted)):
args.append( splitted[i] )
svc.handle_message(Message_Console_Command(svc.service_id, command, args, node_info))
found = True
break
if not found:
print svcName + " is an unregistered service."
except:
show_error()
|
baseline = "foo"
mine = "10sec_gen_foo"
target = "10sec_new_foo"
with open(target, "w") as ot, open(baseline) as 1in, open(mine) as 2in:
for 1line in 1in:
2line = 2in.readline()
1c = 1line.split()
2c = 2line.split()
if 1c[0] != 2c[0]:
print("fatal error")
exit(1)
score = str((float(1c[1]) + float(2c[1])) *0.5)
ot.write(1c[0] + " " + score+"\n")
|
# Arbitrary Base Conversions
from ex104 import int_to_hex, hex_to_int
# Convert a number from base 10 to base new base
# @param number the base 10 number to convert
# @param new_base the base to convert to
# @return the string of digits in new base
def decimal_to_n(decimal: int, new_base: int):
out = ''
while decimal > 0:
out = str(int_to_hex(decimal % new_base)) + out
decimal = decimal // new_base
return out
# Convert a number from base 'base' to base 10
# @param number the base 'base' number, stored in a string
# @param base the base of the number to convert
# @return the base 10 number
def n_to_decimal(number: str, base: int):
decimal = 0
for i in range(len(number)):
decimal = decimal * base
decimal = decimal + hex_to_int(number[i])
return decimal
# Convert a number between two arbitrary bases
def main():
number = input('enter a number: ')
base = int(input('and its base: '))
new_base = int(input('enter the base you want to convert to: '))
if base == 10:
new_number = decimal_to_n(int(number), new_base)
print('the converted number in base {} is {}'.format(new_base, new_number))
else:
new_number = decimal_to_n(n_to_decimal(number, base), new_base)
print('the converted number in base {} is {}'.format(new_base, new_number))
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
coins = [
(50, 10),
(10, 10),
(5, 3)
]
def csum(l):
return sum(coin*num for coin, num in l)
class NoChange(Exception):
pass
def change(amount, coins=coins):
print("called with", amount, coins)
if amount == 0:
return []
if not coins:
raise NoChange
coin, num = coins[0]
i = min(amount//coin, num)
while i >= 0:
try:
return [(coin, i)]+change(amount-i*coin, coins=coins[1:])
except NoChange:
pass
i -= 1
raise NoChange
def bestchange(amount, coins=coins):
print("called with", amount, coins)
if not coins:
return []
coin, num = coins[0]
i = min(amount//coin, num)
answer = []
while i >= 0:
new = [(coin, i)]+bestchange(amount-i*coin, coins=coins[1:])
if csum(new) == amount:
return new
if (amount - csum(new)) < (amount - csum(answer)):
answer = new
i -= 1
print("difference is", amount-csum(answer))
return answer
if __name__ == '__main__':
#print(change(65))
print(bestchange(9, coins=[(5,10),(3,2)]))
|
import pandas as pd
import csv
DATA_DIR = r"G:\Documents\Drexel\Final_Project\cresci-2017.csv\datasets_full.csv\social_spambots_1.csv\social_spambots_1.csv"
CRESCI_TWEET_COLS = ['user_id', 'retweet_count', 'reply_count', 'favorite_count', 'num_hashtags',
'num_urls', 'num_mentions', 'created_at', 'source', 'retweeted']
CRESCI_USER_COLS = ['id', 'name', 'description', 'screen_name', 'followers_count', 'friends_count',
'favourites_count', 'listed_count', 'statuses_count','lang', 'location', 'geo_enabled',
'default_profile', 'default_profile_image', 'verified', 'created_at']
CRESCI_USER_COL_MAPPING = { 'id':'userid',
'followers_count':'followerscount',
'friends_count':'friendscount',
'name':'name',
'screen_name':'screenname',
'lang':'account_lang',
'location':'location',
'geo_enabled':'geoenabled',
'default_profile_image':'defaultprofileimage',
'default_profile':'defaultprofile',
'created_at':'account_createdat',
'statuses_count':'statusescount',
'favourites_count':'favouritescount',
'listed_count':'listedcount',
'verified':'verified'
}
CRESCI_TWEET_COL_MAPPING = {'id':'tweetid',
'user_id':'userid',
'retweet_count':'retweetcount',
'favorite_count':'likecount',
'reply_count':'replycount',
'num_hashtags':'hashtagcount',
'num_urls':'urlcount',
'num_mentions':'mentioncount',
'created_at':'tweet_createdat',
'source':'source'
}
def reformat_user_data(user_data):
user_data = user_data[CRESCI_USER_COLS]
# Rename columns to Standard Field format
user_data = user_data.rename(CRESCI_USER_COL_MAPPING, axis='columns')
return user_data
def reformat_tweet_data(tweet_data):
# Rename columns to Standard Field format
tweet_data = tweet_data.rename(CRESCI_TWEET_COL_MAPPING, axis='columns')
return tweet_data
def combine_users_tweets(user_data, tweet_data):
dataset = pd.merge(tweet_data, user_data, on='userid')
return dataset
def get_data(data_dir, skiprows, nrows):
tweet_data = pd.read_csv(data_dir + r'\tweets.csv', dtype=str, skiprows=range(1,skiprows),nrows=nrows)
tweet_data = reformat_tweet_data(tweet_data)
user_data = pd.read_csv(data_dir + r'\users.csv', dtype=str)
user_data = reformat_user_data(user_data)
# Merge tweet and user data
dataset = combine_users_tweets(user_data, tweet_data)
# dataset.drop(columns=['created_at'], axis=1)
print(len(dataset))
return dataset
if __name__ == "__main__":
processed_name = "SS1_processed.csv"
file_len = len(pd.read_csv(DATA_DIR+"/tweets.csv", usecols=["id"])) + 1
print("Num. Tweets: %d" % file_len)
chunk_size = 500000
for i in range(file_len//chunk_size + 1):
if i == 0:
header = True
else:
header = False
skip = i * chunk_size
print("Skip to %d" % skip)
data = get_data(DATA_DIR,skip,chunk_size-1)
with open(DATA_DIR + '/' + processed_name, 'a', encoding='utf-8', newline='') as f:
data.to_csv(f, header=header, index=False, encoding='utf-8')
file_len = len(pd.read_csv(DATA_DIR + '/' + processed_name, usecols=["userid"])) + 1
print("Num. Tweets: %d" % file_len) |
from unittest import TestCase
from pycec.commands import CecCommand
from pycec.const import CMD_POWER_STATUS, CMD_VENDOR, CMD_OSD_NAME, \
CMD_PHYSICAL_ADDRESS
from pycec.network import HDMIDevice
class TestHDMIDevice(TestCase):
def test_logical_address(self):
device = HDMIDevice(2)
self.assertEqual(device.logical_address, 2)
def test_update(self):
device = HDMIDevice(2)
cmd = CecCommand(
'02:%02x:4f:6e:6b:79:6f:20:48:54:58:2d:32:32:48:44:58' %
CMD_OSD_NAME[1])
device.update_callback(cmd)
self.assertEqual(device.osd_name, 'Onkyo HTX-22HDX')
cmd = CecCommand('02:%02x:01' % CMD_POWER_STATUS[1])
device.update_callback(cmd)
self.assertEqual(device.power_status, 1)
cmd = CecCommand('02:%02x:02' % CMD_POWER_STATUS[1])
device.update_callback(cmd)
self.assertEqual(device.power_status, 2)
cmd = CecCommand('02:%02x:18:C0:86' % CMD_VENDOR[1])
device.update_callback(cmd)
self.assertEqual(device.vendor_id, 0x18C086)
self.assertEqual(device.vendor, 'Broadcom')
cmd = CecCommand('02:%02x:C0:86:01' % CMD_PHYSICAL_ADDRESS[1])
device.update_callback(cmd)
self.assertEqual(device.physical_address.ascmd, 'c0:86')
self.assertEqual(device.physical_address.asattr, [0xC0, 0x86])
def test_is_on(self):
device = HDMIDevice(2)
device._power_status = 1
self.assertFalse(device.is_on)
device._power_status = 0
self.assertTrue(device.is_on)
def test_is_off(self):
device = HDMIDevice(2)
device._power_status = 1
self.assertTrue(device.is_off)
device._power_status = 0
self.assertFalse(device.is_off)
def test_type(self):
device = HDMIDevice(2)
device._type = 2
self.assertEqual(2, device.type)
def test_type_name(self):
device = HDMIDevice(2)
device._type = 0
self.assertEqual('TV', device.type_name)
device._type = 1
self.assertEqual('Recorder', device.type_name)
device._type = 2
self.assertEqual('UNKNOWN', device.type_name)
device._type = 3
self.assertEqual('Tuner', device.type_name)
device._type = 4
self.assertEqual('Playback', device.type_name)
device._type = 5
self.assertEqual('Audio', device.type_name)
device._type = 6
self.assertEqual('UNKNOWN', device.type_name)
device._type = 7
self.assertEqual('UNKNOWN', device.type_name)
def test_update_callback(self):
device = HDMIDevice(3)
device.update_callback(
CecCommand(CMD_PHYSICAL_ADDRESS[1], att=[0x11, 0x00, 0x02]))
self.assertEqual('1.1.0.0', str(device.physical_address))
self.assertEqual(2, device.type)
device.update_callback(
CecCommand(CMD_POWER_STATUS[1], att=[0x01]))
self.assertEqual(1, device.power_status)
self.assertTrue(device.is_off)
self.assertFalse(device.is_on)
device.update_callback(
CecCommand(CMD_POWER_STATUS[1], att=[0x00]))
self.assertEqual(0, device.power_status)
self.assertTrue(device.is_on)
self.assertFalse(device.is_off)
device.update_callback(
CecCommand(CMD_POWER_STATUS[1], att=[0x02]))
self.assertEqual(2, device.power_status)
self.assertFalse(device.is_on)
self.assertFalse(device.is_off)
device.update_callback(
CecCommand(CMD_OSD_NAME[1],
att=list(map(lambda x: ord(x), "Test4"))))
self.assertEqual("Test4", device.osd_name)
device.update_callback(
CecCommand(CMD_VENDOR[1], att=[0x00, 0x80, 0x45]))
self.assertEqual(0x008045, device.vendor_id)
self.assertEqual("Panasonic", device.vendor)
|
import asyncio
import os
import socket
import ccxt.async_support as ccxta
"""
三角套利demo2:寻找三角套利空间,包含下单模块,异步请求处理版
交易对:用一种资产(quote currency)去定价另一种资产(base currency),比如用比特币(BTC)去定价莱特币(LTC),
就形成了一个LTC/BTC的交易对,
交易对的价格代表的是买入1单位的base currency(比如LTC)
需要支付多少单位的quote currency(比如BTC),
或者卖出一个单位的base currency(比如LTC)
可以获得多少单位的quote currency(比如BTC)。
中间资产mid currency可以是USDT等稳定币
"""
# P1 quote_mid BTC/USDT
# P2 base_mid LTC/USDT
# P3 base_quote LTC/BTC
default_base_cur = 'LTC'
default_quote_cur = 'BTC'
default_mid_cur = 'USDT'
# delay 2 second
delay = 2
# 轮询订单次数query_times 3
query_times = 3
# good_exchange_name = ['binance', 'fcoin', 'gateio', 'huobipro', 'kucoin', 'okex']
# good_exchange_name = ['binance', 'fcoin', 'gateio', 'huobipro', 'kucoin', 'okex','bcex','bibox','bigone','bitfinex','bitforex',
# 'bithumb','bitkk','cex','coinbase','coinex','cointiger','exx','gdax','gemini','hitbtc','rightbtc',
# 'theocean','uex']
# good_coin = ['ETH', 'XRP', 'BCH', 'EOS', 'XLM', 'LTC', 'ADA', 'XMR', 'TRX', 'BNB', 'ONT', 'NEO', 'DCR']
good_exchange_name = ['uex']
# good_coin = ['ETH', 'XRP', 'BCH', 'EOS', 'XLM', 'LTC', 'ADA', 'XMR', 'TRX', 'BNB', 'ONT', 'NEO', 'DCR', 'LBA', 'RATING']
good_coin = ['ETH', 'XRP', 'BCH', 'EOS', 'RATING']
has_config_exchange = ['uex']
config_key = dict()
config_key['okex'] = ['okex_key','okex_secret']
# config_key['uex'] = ['uex_key','uex_secret']
config_key['uex'] = ['', '']
# 交易相关常量
# 订单交易量吃单比例
order_ratio = 0.5
# 账户资金保留比例
reserve_ratio_base = 0.3
reserve_ratio_quote = 0.3
reserve_ratio_mid = 0.3
# 最小成交量比例设定
min_trade_percent = 0.2
# 是否真正下单,默认否
order_flag = False
def set_proxy():
os.environ.setdefault('http_proxy', 'http://127.0.0.1:1080')
os.environ.setdefault('https_proxy', 'http://127.0.0.1:1080')
# 获取指定交易所列表
def get_exchange_list(good_list):
exchange_list = []
for exchange_name in good_list:
exchange = getattr(ccxta,exchange_name)()
if exchange:
exchange_list.append(exchange)
return exchange_list
# 设置交易所key
def set_exchange_key(exchange):
if exchange.id in has_config_exchange:
exchange.apiKey = config_key[exchange.id][0]
exchange.secret = config_key[exchange.id][1]
print('set_exchange_key name is {},key is {},secret is {}'.format(exchange.name,exchange.apiKey,exchange.secret))
else:
print('set_exchange_key name is {} no key'.format(exchange.name))
# 在指定交易所寻找三角套利机会,根据P3与P2/P1大小关系进行套利,暂不考虑滑点和手续费,目标保持base,quote数量不变,使mid数量增多
async def find_trade_chance(exchange,base='LTC',quote='BTC',mid='USDT'):
print('-----find_trade_chance开始在交易所{}寻找三角套利机会,base:{},quote:{},mid:{}'.format(exchange.name,base,quote,mid))
try:
await exchange.load_markets()
except Exception as e:
print('load_markets e is {} ,exchange is {}'.format(e.args[0],exchange.name))
await exchange.close()
return
cur_quote_mid = quote + '/' + mid
cur_base_mid = base+'/'+mid
cur_base_quote = base + '/' + quote
try:
book_quote_mid = await exchange.fetch_order_book(cur_quote_mid)
# time.sleep(delay)
await asyncio.sleep(delay)
book_base_mid = await exchange.fetch_order_book(cur_base_mid)
# time.sleep(delay)
await asyncio.sleep(delay)
book_base_quote = await exchange.fetch_order_book(cur_base_quote)
except Exception as e:
print('fetch_order_book e is {} ,exchange is {}'.format(e.args[0],exchange.name))
await exchange.close()
return
# P1
price_quote_mid_bid1 = book_quote_mid['bids'][0][0] if len(book_quote_mid['bids']) > 0 else None
price_quote_mid_ask1 = book_quote_mid['asks'][0][0] if len(book_quote_mid['asks']) > 0 else None
size_quote_mid_bid1 = book_quote_mid['bids'][0][1] if len(book_quote_mid['bids']) > 0 else None
size_quote_mid_ask1 = book_quote_mid['asks'][0][1] if len(book_quote_mid['asks']) > 0 else None
# P2
price_base_mid_bid1 = book_base_mid['bids'][0][0] if len(book_base_mid['bids']) > 0 else None
price_base_mid_ask1 = book_base_mid['asks'][0][0] if len(book_base_mid['asks']) > 0 else None
size_base_mid_bid1 = book_base_mid['bids'][0][1] if len(book_base_mid['bids']) > 0 else None
size_base_mid_ask1 = book_base_mid['asks'][0][1] if len(book_base_mid['asks']) > 0 else None
# P3
price_base_quote_bid1 = book_base_quote['bids'][0][0] if len(book_base_quote['bids']) > 0 else None
price_base_quote_ask1 = book_base_quote['asks'][0][0] if len(book_base_quote['asks']) > 0 else None
size_base_quote_bid1 = book_base_quote['bids'][0][1] if len(book_base_quote['bids']) > 0 else None
size_base_quote_ask1 = book_base_quote['asks'][0][1] if len(book_base_quote['asks']) > 0 else None
date_time = exchange.last_response_headers['Date']
print('P1: buy1:{},{},sell1:{},{}'.format(price_quote_mid_bid1,size_quote_mid_bid1,price_quote_mid_ask1,size_quote_mid_ask1))
print('P2: buy1:{},{},sell1:{},{}'.format(price_base_mid_bid1, size_base_mid_bid1, price_base_mid_ask1,size_base_mid_ask1))
print('P3: buy1:{},{},sell1:{},{}'.format(price_base_quote_bid1, size_base_quote_bid1, price_base_quote_ask1,size_base_quote_ask1))
#检查正循环套利
'''
三角套利的基本思路是,用两个市场(比如BTC/USDT,LTC/USDT)的价格(分别记为P1,P2),
计算出一个公允的LTC/BTC价格(P2/P1),如果该公允价格跟实际的LTC/BTC市场价格(记为P3)不一致,
就产生了套利机会
P3<P2/P1
操作:买-卖/买
价格条件提交:base_quote_ask1卖1 < base_mid_bid1买1/quote_mid_ask1卖1
交易量Q3:三者中取最小下单量,单位要统一为P3交易对的个数
利润:Q3*P1*(P2/P1-P3)
'''
balance = await exchange.fetch_balance()
free_base = balance[base]['free'] if balance[base]['free'] else 0
free_quote = balance[quote]['free'] if balance[quote]['free'] else 0
free_mid = balance[mid]['free'] if balance[mid]['free'] else 0
if price_base_quote_ask1 < price_base_mid_bid1/price_quote_mid_ask1:
# trade_size = min(size_base_quote_ask1,size_base_mid_bid1,size_quote_mid_ask1/price_base_quote_ask1)
trade_size = get_buy_size(free_base, free_quote, free_mid, size_base_quote_ask1, size_quote_mid_ask1,
price_base_quote_ask1, price_quote_mid_ask1)
price_diff = price_quote_mid_ask1*(price_base_mid_bid1/price_quote_mid_ask1 - price_base_quote_ask1)
profit = trade_size*price_diff
print('++++++发现正套利机会 profit is {},price_diff is {},trade_size is {},P3: {} < P2/P1: {},time:{}\n\n'.format(
profit, price_diff, trade_size, price_base_quote_ask1, price_base_mid_bid1/price_quote_mid_ask1, date_time))
# 开始正循环套利
if order_flag:
postive_trade(exchange, cur_base_quote, cur_base_mid, cur_quote_mid, trade_size, price_base_quote_ask1,
price_base_mid_bid1, price_quote_mid_ask1)
await exchange.close()
# 检查逆循环套利
'''
P3>P2/P1
操作:卖-买/卖
价格条件:base_quote_bid1买1 > base_mid_ask1卖1/quote_mid_bid1买1
交易量Q3:三者中取最小下单量
利润:Q3*P1*(P3-P2/P1)
'''
elif price_base_quote_bid1 > price_base_mid_ask1/price_quote_mid_bid1:
# trade_size = min(size_base_quote_bid1,size_base_mid_ask1,size_quote_mid_bid1/price_base_quote_bid1)
trade_size = get_sell_size(free_base, free_quote, free_mid, size_base_quote_bid1, size_base_mid_ask1,
price_base_mid_ask1, price_base_quote_ask1)
price_diff = price_quote_mid_bid1*(price_base_quote_bid1-price_base_mid_ask1/price_quote_mid_bid1)
profit = trade_size*price_diff
print('++++++发现逆套利机会 profit is {},price_diff is {},trade_size is {},P3: {} > P2/P1: {},time:{}\n\n'.format(
profit, price_diff, trade_size, price_base_quote_bid1, price_base_mid_ask1/price_quote_mid_bid1, date_time))
# 开始逆循环套利
if order_flag:
negative_trade(exchange, cur_base_quote, cur_base_mid, cur_quote_mid, trade_size, price_base_quote_bid1,
price_base_mid_ask1, price_quote_mid_bid1)
await exchange.close()
else:
print('在交易所{}没有找到三角套利机会,time:{}\n\n'.format(exchange.name,date_time))
await exchange.close()
'''
正循环套利
正循环套利的顺序如下:
先去LTC/BTC吃单买入LTC,卖出BTC,然后根据LTC/BTC的成交量,使用多线程,
同时在LTC/USDT和BTC/USDT市场进行对冲。LTC/USDT市场吃单卖出LTC,BTC/USDT市场吃单买入BTC。
P3<P2/P1
base_quote<quote_mid/quote_mid
操作:买-卖/买
'''
# 正循环套利
async def postive_trade(exchange, base_quote, base_mid, quote_mid, trade_size, price_base_quote_ask1, price_base_mid_bid1,
price_quote_mid_ask1):
print('开始正向套利 postive_trade base_quote:{}, base_mid:{}, quote_mid:{}, trade_size:{}, '
'price_base_quote_ask1:{}, price_base_mid_bid1:{}, price_quote_mid_ask1:{}'
.format(base_quote, base_mid, quote_mid, trade_size, price_base_quote_ask1, price_base_mid_bid1, price_quote_mid_ask1))
# 买入P3 base_quote
result = await exchange.create_order(base_quote, 'limit', 'buy', trade_size, price_base_quote_ask1)
retry = 0
already_hedged_amount = 0
while retry <= query_times:
if retry == query_times:
# cancel order
print('正向套利 postive_trade,达到轮询上限仍未完成交易,取消订单,retry is {}'.format(retry))
await exchange.cancel_order(result['id'], base_quote)
break
# time.sleep(delay)
await asyncio.sleep(delay)
# 延时delay后查询订单成交量
order = await exchange.fetch_order(result['id'], base_quote)
filled = order['filled']
amount = order['amount']
already_hedged_amount = filled
# 实际成交比例小于设定比例
if filled/amount < min_trade_percent:
retry += 1
continue
# 对冲卖P2 base_mid
hedge_sell(exchange, base_mid, filled, price_base_mid_bid1)
# 对冲买P1 quote_mid
hedge_buy(exchange, quote_mid, filled, price_quote_mid_ask1)
# 实际成交量完成目标,退出轮询
if already_hedged_amount >= trade_size:
print('正向套利 postive_trade 实际成交量完成目标,退出轮询')
break
else:
retry += 1
print('结束正向套利 postive_trade already_hedged_amount is {},trade_size is {}'.format(already_hedged_amount,trade_size))
await exchange.close()
'''
逆循环套利
逆循环套利的顺序如下:
先去LTC/BTC吃单卖出LTC,买入BTC,然后根据LTC/BTC的成交量,使用多线程,
同时在LTC/USDT和BTC/USDT市场进行对冲。
LTC/USDT市场吃单买入LTC,BTC/USDT市场吃单卖出BTC。
P3>P2/P1
base_quote>base_mid/quote_mid
操作:卖-买/卖
'''
# 逆循环套利
async def negative_trade(exchange, base_quote, base_mid, quote_mid, trade_size, price_base_quote_bid1, price_base_mid_ask1,
price_quote_mid_bid1):
print('开始逆循环套利 negative_trade base_quote:{}, base_mid:{}, quote_mid:{}, trade_size:{}, '
'price_base_quote_bid1:{}, price_base_mid_ask1:{}, price_quote_mid_bid1:{}'
.format(base_quote, base_mid, quote_mid, trade_size, price_base_quote_bid1, price_base_mid_ask1,
price_quote_mid_bid1))
# 卖出LTC 卖P3
result = exchange.create_order(base_quote, 'limit', 'sell', trade_size, price_base_quote_bid1)
retry = 0
already_hedged_amount = 0
while retry <= query_times:
if retry == query_times:
# cancel order
print('逆向套利 negative_trade,达到轮询上限仍未完成交易,取消订单,retry is {}'.format(retry))
await exchange.cancel_order(result['id'], base_quote)
break
# time.sleep(delay)
await asyncio.sleep(delay)
# 延时delay后查询订单成交量
order = await exchange.fetch_order(result['id'], base_quote)
filled = order['filled']
amount = order['amount']
already_hedged_amount = filled
# 实际成交比例小于设定比例
if filled / amount < min_trade_percent:
retry += 1
continue
# 对冲买LTC P2
hedge_buy(exchange, base_mid, filled, price_base_mid_ask1)
# 对冲卖BTC P1
hedge_sell(exchange, quote_mid, filled, price_quote_mid_bid1)
# 实际成交量完成目标,退出轮询
if already_hedged_amount >= trade_size:
print('逆向套利 negative_trade 实际成交量完成目标,退出轮询')
break
else:
retry += 1
print('结束逆向套利 negative_trade already_hedged_amount is {},trade_size is {}'.format(already_hedged_amount, trade_size))
await exchange.close()
# 对冲卖
async def hedge_sell(exchange, symbol, sell_size, price):
print('开始对冲卖 hedge_sell symbol:{},sell_size:{},price:{}'.format(symbol, sell_size, price))
result = await exchange.create_order(symbol, 'limit', 'sell', sell_size, price)
# time.sleep(delay/10)
await asyncio.sleep(delay/10)
# 延时delay/10秒后查询订单成交量
order = await exchange.fetch_order(result['id'], symbol)
filled = order['filled']
remaining = order['remaining']
# 未成交的市价交易
if filled < sell_size:
await exchange.create_order(symbol, 'market', 'sell', remaining)
print('对冲卖---- hedge_sell filled < sell_size 市价交易 symbol:{},filled:{},sell_size:{},remaining:{}'.format(symbol, filled, sell_size, remaining))
await exchange.close()
# 对冲买
async def hedge_buy(exchange, symbol, buy_size, price):
print('开始对冲买 hedge_buy symbol:{},buy_size:{},price:{}'.format(symbol, buy_size, price))
result = await exchange.create_order(symbol, 'limit', 'buy', buy_size, price)
# time.sleep(delay/10)
await asyncio.sleep(delay/10)
# 延时delay/10秒后查询订单成交量
order = await exchange.fetch_order(result['id'], symbol)
filled = order['filled']
remaining = order['remaining']
# 未成交的市价交易
if filled < buy_size:
await exchange.create_order(symbol, 'market', 'buy', remaining)
print('对冲买---- hedge_buy filled < sell_size 市价交易 symbol:{},filled:{},buy_size:{},remaining:{}'.format(symbol, filled, buy_size, remaining))
await exchange.close()
'''
P3<P2/P1
操作:买-卖/买
base:LTC, quote:BTC, mid:USDT
1. LTC/BTC卖方盘口吃单数量:ltc_btc_sell1_quantity*order_ratio_ltc_btc,其中ltc_btc_sell1_quantity 代表LTC/BTC卖一档的数量,
order_ratio_ltc_btc代表本策略在LTC/BTC盘口的吃单比例
2. LTC/USDT买方盘口吃单数量:ltc_usdt_buy1_quantity*order_ratio_ltc_usdt,其中order_ratio_ltc_usdt代表本策略在LTC/USDT盘口的吃单比例
3. LTC/BTC账户中可以用来买LTC的BTC额度及可以置换的LTC个数:
btc_available - btc_reserve,可以置换成
(btc_available – btc_reserve)/ltc_btc_sell1_price个LTC
其中,btc_available表示该账户中可用的BTC数量,btc_reserve表示该账户中应该最少预留的BTC数量
(这个数值由用户根据自己的风险偏好来设置,越高代表用户风险偏好越低)。
4. LTC/USDT账户中可以用来卖的LTC额度:
ltc_available – ltc_reserve
其中,ltc_available表示该账户中可用的LTC数量,ltc_reserve表示该账户中应该最少预留的LTC数量
(这个数值由用户根据自己的风险偏好来设置,越高代表用户风险偏好越低)。
5. BTC/USDT账户中可以用来买BTC的USDT额度及可以置换的BTC个数和对应的LTC个数:
usdt_available - usdt_reserve, 可以置换成
(usdt_available-usdt_reserve)/btc_usdt_sell1_price个BTC,
相当于
(usdt_available-usdt_reserve)/btc_usdt_sell1_price/ltc_btc_sell1_price
个LTC
其中:usdt_available表示该账户中可用的人民币数量,usdt_reserve表示该账户中应该最少预留的人民币数量
(这个数值由用户根据自己的风险偏好来设置,越高代表用户风险偏好越低)。
'''
# 获取下单买入数量 需要跟账户可用余额结合起来,数量单位统一使用base(P3:LTC)来计算
def get_buy_size(free_base, free_quote, free_mid, size_base_quote_ask1, size_base_mid_bid1, price_base_quote_ask1, price_quote_mid_ask1):
# 1. LTC/BTC卖方盘口吃单数量 P3 卖BTC得LTC
base_quote_to_buy_size = size_base_quote_ask1 * order_ratio
# 2. LTC/USDT买方盘口吃单数量 P2 卖LTC得USDT
base_mid_to_sell_size = size_base_mid_bid1 * order_ratio
# 3. LTC/BTC账户中可以用来买LTC的BTC额度及可以置换的LTC个数 P3 卖BTC得LTC
base_quote_can_buy_size = free_quote * (1-reserve_ratio_quote) / price_base_quote_ask1
# 4. LTC/USDT账户中可以用来卖的LTC额度 P2 卖LTC得USDT
base_mid_can_sell_size = free_base * (1-reserve_ratio_base)
# 5. BTC/USDT账户中可以用来买BTC的USDT额度及可以置换的BTC个数和对应的LTC个数 P1 卖USDT得BTC
quote_mid_can_buy_size = free_mid * (1 - reserve_ratio_mid) / price_quote_mid_ask1 / price_base_quote_ask1
return min(base_quote_to_buy_size, base_mid_to_sell_size, base_quote_can_buy_size, quote_mid_can_buy_size,
base_mid_can_sell_size)
'''
P3>P2/P1
操作:卖-买/卖
base:LTC, quote:BTC, mid:USDT
卖出的下单保险数量计算
假设BTC/USDT盘口流动性好
1. LTC/BTC买方盘口吃单数量:ltc_btc_buy1_quantity*order_ratio_ltc_btc,其中ltc_btc_buy1_quantity 代表LTC/BTC买一档的数量,
order_ratio_ltc_btc代表本策略在LTC/BTC盘口的吃单比例
2. LTC/USDT卖方盘口卖单数量:ltc_usdt_sell1_quantity*order_ratio_ltc_usdt,其中order_ratio_ltc_usdt代表本策略在LTC/USDT盘口的吃单比例
3. LTC/BTC账户中可以用来卖LTC的数量:
ltc_available - ltc_reserve,
其中,ltc_available表示该账户中可用的LTC数量,ltc_reserve表示该账户中应该最少预留的LTC数量
(这个数值由用户根据自己的风险偏好来设置,越高代表用户风险偏好越低)。
4. LTC/USDT账户中可以用来卖的usdt额度:
usdt_available – usdt_reserve,相当于
(usdt_available – usdt_reserve) / ltc_usdt_sell1_price个LTC
其中,usdt_available表示该账户中可用的人民币数量,usdt_reserve表示该账户中应该最少预留的人民币数量
(这个数值由用户根据自己的风险偏好来设置,越高代表用户风险偏好越低)。
5. BTC/USDT账户中可以用来卖BTC的BTC额度和对应的LTC个数:
btc_available - btc_reserve, 可以置换成
(btc_available-btc_reserve) / ltc_btc_sell1_price个LTC
其中:btc_available表示该账户中可用的BTC数量,btc_reserve表示该账户中应该最少预留的BTC数量
(这个数值由用户根据自己的风险偏好来设置,越高代表用户风险偏好越低)。
'''
# 获取下单卖出数量 需要跟账户可用余额结合起来,数量单位统一使用base(P3:LTC)来计算
def get_sell_size(free_base, free_quote, free_mid, size_base_quote_bid1, size_base_mid_ask1, price_base_mid_ask1, price_base_quote_ask1):
# 1 LTC/BTC 买方盘口吃单数量P3 卖LTC得BTC
base_quote_to_sell = size_base_quote_bid1 * order_ratio
# 2 LTC/USDT 卖方盘口吃单数量P2 卖USDT得LTC
base_mid_to_buy = size_base_mid_ask1 * order_ratio
# 3 LTC/BTC 账户LTC中可以用来卖出LTC的数量P3,卖LTC得BTC
base_quote_can_sell = free_base * (1-reserve_ratio_base)
# 4 LTC/USDT 账户USDT中可以用来购买LTC的数量P2,卖USDT得LTC
base_mid_can_buy = free_mid * (1-reserve_ratio_mid) / price_base_mid_ask1
# 5 BTC/USDT 账户中可以用来卖出BTC的数量,转换为LTC数量(卖BTC得LTC)P1,卖BTC得USDT
quote_mid_can_sell = free_quote * (1-reserve_ratio_quote) / price_base_quote_ask1
return min(base_quote_to_sell,base_mid_to_buy,base_quote_can_sell,base_mid_can_buy,quote_mid_can_sell)
def get_host_ip():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 80))
ip = s.getsockname()[0]
finally:
s.close()
return ip
if __name__ == '__main__':
print('before proxy ip is {}'.format(get_host_ip()))
set_proxy()
print('after proxy ip is {}'.format(get_host_ip()))
good_exchange_list = get_exchange_list(good_exchange_name)
for exchange in good_exchange_list:
set_exchange_key(exchange)
# 在good_coin作为base,quote=BTC,mid=USDT 在good_exchange_list交易所列表中寻找套利机会
for symbol in good_coin:
for exchange in good_exchange_list:
# find_trade_chance(exchange, symbol, default_quote_cur, default_mid_cur)
asyncio.get_event_loop().run_until_complete(
find_trade_chance(exchange, symbol, default_quote_cur, default_mid_cur))
|
from collections import defaultdict, deque
class Solution:
def minimumSemesters(self, N: int, relations: List[List[int]]) -> int:
pre = {i: set() for i in range(1, N + 1)}
after = {i: set() for i in range(1, N + 1)}
for i, j in relations:
pre[j].add(i)
after[i].add(j)
ans = 0
q = [i for i in range(1, N + 1) if not pre[i]]
todo = set(i for i in range(1, N + 1))
while q:
nq = []
for i in q:
todo.remove(i)
for j in after[i]:
pre[j].discard(i)
if not pre[j]:
nq.append(j)
q = nq
ans += 1
return ans if not todo else -1
|
import os
import glob
from google.cloud import storage
from sklearn.externals import joblib
os.environ["GOOGLE_APPLICATION_CREDENTIALS"]
def get_model(bucket_name):
"""Lists all the blobs in the bucket."""
storage_client = storage.Client()
bucket = storage_client.get_bucket(bucket_name)
models = []
blobs = (bucket.list_blobs())
for blob in blobs:
models.append(blob.name)
return models
def check_modle(bucket_name, model_name):
"""Check if its a valid model avialable in GCS bucket"""
models = get_model(bucket_name)
if (model_name in models):
print('{} is a valid model in GCS bucket'.format(model_name))
return
else:
raise SystemExit("Unexpected model {}! Add {} to GCS bucket and try again.".format(
model_name, model_name))
def load_model(bucket_name, source_model_name):
"""Load model from GCS bucket."""
if bucket_name:
storage_client = storage.Client()
bucket = storage_client.get_bucket(bucket_name) # Bucket name
blob = bucket.blob(source_model_name) # Source Model Name
# Dependencies for source model
cl_blob = bucket.blob('model_columns.pkl')
model = blob.download_to_filename(
'./model/{}'.format(source_model_name))
cl_model = cl_blob.download_to_filename(
'./model/{}'.format('model_columns.pkl'))
print('Blob {} downloaded to {}.'.format(
source_model_name, source_model_name))
model_directory = './model'
try:
# Find the newest model file in the directory
files = [x for x in os.listdir(
model_directory) if x.endswith(".pkl")]
list_of_files = glob.glob('./model/*.pkl')
newest = max(list_of_files, key=os.path.getctime)
print("Recently modified Docs", newest)
model_file_name = '%s' % (newest)
print("Model File name", model_file_name)
clf = joblib.load(model_file_name)
return clf
except Exception as e:
clf = None
raise FileNotFoundError(
"No model found in {} with suffix '.pkl'{}.".format(model_directory, e))
else:
print('Sorry, that model bucket does not exist!')
return 'Enter a valid modle name'
def upload_model(bucket_name, file_name, file_path):
"""Uploads a blob from the root directory."""
client = storage.Client()
bucket = client.get_bucket(bucket_name)
blob = bucket.blob(file_name)
blob.upload_from_filename(file_path) # filename to upload from local fs
print('Blob {} uploaded.'.format(file_name))
return blob.public_url
def delete_model(bucket_name, file_name, file_path):
"""Deletes a blob from the bucket."""
storage_client = storage.Client()
bucket = storage_client.get_bucket(bucket_name)
blob = bucket.blob(file_name)
blob.delete()
print('Blob {} deleted.'.format(file_name))
return 'Deleted Model'
|
e, s, m = map(int, input().split())
if e % 15 == 0:
e -= 15
if s % 28 == 0:
s-= 28
if m % 19 == 0:
m -= 19
year = 1
while True:
if year % 15 == e and year % 28 == s and year % 19 == m:
break
year += 1
print(year) |
from adminapp.views import bookings
from django.shortcuts import render,redirect
from carapp.models import Customers,Users,CarVariant,Category,JourneyStage,Feedback,Drivers,Payment,Bookings,Car
from django.core.mail import send_mail
from django.contrib import messages
from datetime import date
from django.core.paginator import Paginator
# Create your views here.
def homepage(req):
cars=CarVariant.objects.all()
return render(req,"index.html",locals())
def contactus(req):
return render(req,"contact.html")
def aboutus(req):
return render(req,"about-us.html")
def registerpage(req):
if(req.method=="POST"):
userid=req.POST.get("userid")
uname=req.POST.get("uname")
pwd=req.POST.get("pwd")
gender=req.POST.get("gender")
phone=req.POST.get("phone")
address=req.POST.get("address")
user=Customers(userid=userid,uname=uname,pwd=pwd,phone=phone,gender=gender,address=address)
user.save()
#send_mail('Registration Successfull','Your registration is now complete','smart.anandsingh@gmail.com',recipient_list=(userid,),fail_silently=False)
return redirect('/login')
return render(req,"register.html")
def loginpage(req):
if(req.method=="POST"):
userid=req.POST.get("userid")
pwd=req.POST.get("pwd")
user=Users.objects.filter(userid=userid,pwd=pwd)
if(len(user)>0):
print(user[0])
req.session["userid"]=user[0].userid
req.session["role"]='admin'
req.session["uname"]=user[0].uname
return redirect("/dashboard")
else:
cust=Customers.objects.filter(userid=userid,pwd=pwd)
print(cust)
if(len(cust)>0):
req.session["userid"]=userid
req.session["role"]='Customer'
req.session["uname"]=cust[0].uname
return redirect("/")
else:
messages.error(req, 'Invalid username or password')
return redirect("/login")
return render(req,"login.html")
def products(req,cid=0,size=''):
print(cid)
print(size)
cats=Category.objects.all()
if(cid>0):
cat=Category.objects.get(pk=cid)
cars=CarVariant.objects.filter(category=cat)
elif(size!='' and size=='m'):
cars=CarVariant.objects.filter(capacity__lte=5)
elif(size!='' and size=='f'):
cars=CarVariant.objects.filter(capacity__gt=5)
else:
cars=CarVariant.objects.all()
if 'userid' in req.session:
pass
#msg="You are given a Discount of 30% from Delta Driving on any Car Price. Thank You."
paginator=Paginator(cars,6)
page_number = req.GET.get('page')
page_obj = paginator.get_page(page_number)
return render(req,"products.html",locals())
def mybookings(req):
user=Customers.objects.get(pk=req.session.get("userid"))
bks=Bookings.objects.filter(customer=user)
return render(req,"mybookings.html",locals())
def bookingdetails(req,bid):
bk=Bookings.objects.get(pk=bid)
print(bk)
stages=JourneyStage.objects.filter(bookings=bk)
check=JourneyStage.objects.filter(bookings=bk,status='Closed')
paid=Payment.objects.filter(booking=bk,complete=True)
print(paid)
if(len(check)>0):
closed=True
if(len(paid)>0):
phistory=Payment.objects.filter(booking=bk)
complete=True
else:
bal=bk.billamount-bk.advance
print(bal)
return render(req,"bookdetails.html",locals())
def saveStage(req):
bid=req.POST.get("bid")
booking=Bookings.objects.get(pk=bid)
stage=req.POST.get("stage")
remarks=req.POST.get("remarks")
stage=JourneyStage(stage=stage,bookings=booking,remarks=remarks)
if req.POST.get("status") is not None:
stage.status='Closed'
stage.save()
return redirect(f"/bkdetails/{bid}")
def savePaymentAndFeedback(req):
bid=req.POST.get("bid")
bk=Bookings.objects.get(pk=bid)
nameoncard=req.POST.get("nameoncard")
cardno=req.POST.get("cardno")
balance=req.POST.get("balance")
pymt=Payment(booking=bk,complete=True,nameoncard=nameoncard,cardno=cardno,amount=balance,remarks='Final Payment')
pymt.save()
fb=Feedback(customer=bk.customer,feedback=req.POST.get("feedback"),ratings=req.POST.get("rating"))
fb.save()
bk.startreading=bk.car.reading
bk.returnreading=req.POST.get("reading")
bk.save()
car=bk.car
car.reading=req.POST.get("reading")
car.save()
messages.success(req,"Payment made successfully")
return redirect(f"/bkdetails/{bid}")
def prod_details(req,cid):
if(req.method=="POST"):
fromdate=req.POST.get("fromdate")
todate=req.POST.get("todate")
message=req.POST.get("message")
variant=CarVariant.objects.get(pk=req.POST.get("cid"))
user=Customers.objects.get(pk=req.session.get("userid"))
status='Pending'
advance=req.POST.get("advance")
pickuplocation=req.POST.get("pickuplocation")
billamount=req.POST.get("billamount")
bk=Bookings(fromdate=fromdate,todate=todate,message=message,variant=variant,customer=user,status=status,billamount=billamount, advance=advance,pickuplocation=pickuplocation)
bk.save()
bkinfo=Bookings.objects.latest('bid')
cardno=req.POST.get("cardno")
nameoncard=req.POST.get("nameoncard")
remarks='Booking done'
pymt=Payment(booking=bkinfo,amount=advance,nameoncard=nameoncard,cardno=cardno,remarks=remarks)
pymt.save()
messages.success(req,"Car booked successfully")
return redirect(f"/details/{cid}/")
car=CarVariant.objects.get(pk=cid)
if 'userid' in req.session:
user=Customers.objects.get(pk=req.session.get("userid"))
if Bookings.objects.filter(customer=user).count()>2:
#discount=car.price*0.10
pass
return render(req,"product_details.html",locals())
def usercancelbooking(req,bid):
bk=Bookings.objects.get(pk=bid)
bk.delete()
messages.success(req,"Booking Cancelled Successfully")
return redirect("/mybookings")
def logout(req):
req.session.flush()
return redirect("/") |
from token_dispenser import TokenDispenser
import grpc
from v1_pb2 import GetHomeGraphRequest
from v1_pb2_grpc import StructuresServiceStub
class GoogleAPIService:
def __init__(self, username: str, password: str):
self.token_dispener = TokenDispenser(username, password)
def get_devices(self) -> list:
# Get Access Token
token = self.token_dispener.dispense()
# Get Devices on Network
creds = grpc.access_token_call_credentials(token)
ssl = grpc.ssl_channel_credentials()
composite = grpc.composite_channel_credentials(ssl, creds)
channel = grpc.secure_channel('googlehomefoyer-pa.googleapis.com:443', composite)
service = StructuresServiceStub(channel)
grpc_res = service.GetHomeGraph(GetHomeGraphRequest())
return grpc_res.home.devices
|
from django.conf.urls import url
from shop_manager import views
urlpatterns = [
url(r'categories/(?P<category_name>[a-zA-Z0-9]+)/new', views.new_product, name="new_product"),
url(r'categories/(?P<category_name>[a-zA-Z0-9]+)/(?P<item_id>[0-9]+)', views.product, name="product"),
url(r'categories/(?P<category_name>[a-zA-Z0-9]+)', views.category, name="product"),
url(r'categories', views.categories, name="products"),
url(r'history', views.history, name="history"),
url(r'order/(?P<order_id>[0-9]+)', views.order, name="order"),
url(r'', views.open_orders, name="open_orders"),
] |
def minibatch_weighted_gradient_descent(X,y,theta,learning_rate=0.01,iterations=10,batch_size =20):
cov=np.cov(normal[:,[0,1]])
for i in range(len(cov)):
for j in range(len(cov)):
if (i!=j):
cov[i][j]=0
m = len(y)
cost_history = np.zeros(iterations)
theta_history = np.zeros((iterations,2))
n_batches = int(m/batch_size)
for it in range(iterations):
cost =0.0
indices = np.random.permutation(m)
X = X[indices]
y = y[indices]
for i in range(0,m,batch_size):
X_i = X[i:i+batch_size]
y_i = y[i:i+batch_size]
X_i = np.c_[np.ones(len(X_i)),X_i]
prediction = np.dot(X_i,theta)
theta = theta -(1/m)*learning_rate*cov[i][i]*( X_i.T.dot((prediction - y_i)))
cost += cal_cost(theta,X_i,y_i)
theta_history[it,:] =theta.T
cost_history[it] = cost
return theta, cost_history,theta_history
|
#-*- coding: utf-8 -*-
#
# Copyright © 2014 Jonathan Storm <the.jonathan.storm@gmail.com>
# This work is free. You can redistribute it and/or modify it under the
# terms of the Do What The Fuck You Want To Public License, Version 2,
# as published by Sam Hocevar. See the COPYING.WTFPL file for more details.
__author__ = 'jstorm'
import argparse
from transmedia.util import calculate_output_png_width_from_file
from transmedia.conversion import (ConvertBytesInFileToPngFile,
ConvertPixelsInPngFileToBytesFile)
if __name__ == '__main__':
# Parse arguments
parser = argparse.ArgumentParser(description='Transform bytes into PNG or vice versa')
parser.add_argument('-i', '--input', help='input file', required=True)
parser.add_argument('-o', '--output', help='output file', required=True)
args = parser.parse_args()
if args.input.endswith(".png"):
ConvertPixelsInPngFileToBytesFile(args.input, args.output).execute()
elif args.output.endswith(".png"):
png_width = calculate_output_png_width_from_file(args.input)
ConvertBytesInFileToPngFile(args.input, args.output, png_width).execute()
else:
raise ValueError("Either the input or output file must end in '.png'")
|
import re
from contracts import contract
class Utils:
WS_PATTERN = re.compile("\\s+")
@staticmethod
@contract
def replace_redundant_ws(string: str):
return re.subn(Utils.WS_PATTERN, " ", string)[0].strip()
@staticmethod
def normalize_string(string: str):
return re.subn(Utils.WS_PATTERN, " ", string)[0].strip().lower()
|
import command as cm
class CommandFactory():
# constructor takes in voice object
def __init__(self,voiceobj):
self.command = voiceobj.getcommand()
self.arg = voiceobj.getarg()
def getcmdobj(self):
if self.command == "rm":
return cm.rm(self.command,self.arg) #returns instance of class
if self.command == "cd":
return cm.cd(self.command,self.arg) #returns instance of class
if self.command == "cd ..":
return cm.cdback(self.command,self.arg) #returns instance of class
if self.command == "grep":
return cm.grep(self.command,self.arg)
else:
return cm.Command(self.command,self.arg) #returns instance of class
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.conf.urls import patterns, include, url
urlpatterns = patterns('site_fotos.album.views',
url(r'^$', 'albuns', name='albuns'),
) |
from sys import argv
import lxml.html as lh
html = lh.parse(argv[1])
root = html.getroot()
rootiter = html.getiterator()
for i in rootiter:
print(html.getpath(i))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, shutil
from conans import ConanFile, tools, CMake
from glob import glob
class EigenConan(ConanFile):
name = "eigen"
version = "3.3.7"
url = "https://github.com/conan-community/conan-eigen"
homepage = "http://eigen.tuxfamily.org"
description = "Eigen is a C++ template library for linear algebra: matrices, vectors, \
numerical solvers, and related algorithms."
license = "Mozilla Public License Version 2.0"
no_copy_source = True
settings = 'arch', 'cppstd', 'compiler', 'build_type'
options = {
"test": [True, False]
}
default_options = "test=False"
_build_subfolder = 'build'
_source_subfolder = 'src'
def configure(self):
pass
def source(self):
#https://gitlab.com/libeigen/eigen/-/archive/3.3.8/eigen-3.3.8.tar.gz
source_url = "https://gitlab.com/libeigen/eigen"
tools.get("{0}/-/archive/{1}/eigen-{1}.tar.gz".format(source_url, self.version))
shutil.move(glob("eigen-*")[0], self._source_subfolder)
if self.settings.build_type == 'Debug':
#s.makedirs('%s/%s/debug/gdb' % (self.build_folder, self._build_subfolder), exist_ok=True)
shutil.copyfile( '%s/%s/debug/gdb/printers.py' % (self.source_folder, self._source_subfolder)
, '%s/%s/debug/gdb/eigen_printers.py' % (self.source_folder, self._source_subfolder)
, follow_symlinks = True)
def _configure_cmake(self):
cmake = CMake(self)
if self.settings.compiler=="gcc":
if self.settings.arch=="broadwell":
cmake.definitions["CMAKE_CXX_FLAGS"] = "-march=broadwell -tune=broadwell"
else:
cmake.definitions["CMAKE_CXX_FLAGS"] = "-mtune=generic"
cmake.configure(build_folder=self._build_subfolder, source_folder=self._source_subfolder)
return cmake
def build(self):
# the step could be completely disabled since eigen perform again the whole
# setup on the install steps
self.output.info('current working dir: %s' % os.getcwd())
os.makedirs(self._build_subfolder)
with tools.chdir(self._build_subfolder):
cmake = self._configure_cmake()
cmake.build()
def package(self):
with tools.chdir(os.path.join(self.build_folder, self._build_subfolder)):
cmake = self._configure_cmake()
cmake.install()
# additional ressources
if self.settings.build_type == 'Debug':
self.copy('eigen_printers.py', src='src/debug/gdb', dst='gdb', keep_path=True)
def package_info(self):
self.cpp_info.includedirs = ['include/eigen3']
if self.settings.build_type == 'Debug':
# case sensitive in cmake
self.user_info.GDB_PRINTER_FOLDER = 'gdb'
self.user_info.GDB_PRINTER_FILE = 'eigen_printers.py'
self.user_info.GDB_IMPORT_CLASSES = 'register_eigen_printers, build_eigen_dictionary'
self.user_info.GDB_PRINTER_CLASS = 'register_eigen_printers'
|
from flask_migrate import Migrate, MigrateCommand
from flask_script import Manager
from src.core.flask_app import app
from src.core.database import db
from src.models.user import User
from src.models.todos import Todo
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run() |
import pytest
from app import create_app
@pytest.fixture
def client():
app = create_app()
app.config["TESTING"] = True
context = app.app_context()
context.push()
yield app.test_client()
context.pop()
|
import os.path
import shutil
from unittest.mock import patch
from programy.storage.stores.file.config import FileStorageConfiguration
from programy.storage.stores.file.engine import FileStorageEngine
from programy.storage.stores.file.store.conversations import FileConversationStore
from programytest.storage.asserts.store.assert_conversations import ConverstionStoreAsserts
from programy.storage.stores.file.store.config import FileStoreConfiguration
from programytest.client import TestClient
from programy.dialog.conversation import Conversation
from programy.dialog.question import Question
class FileConversationStoreTests(ConverstionStoreAsserts):
def setUp(self):
self._tmpdir = os.path.dirname(__file__) + os.sep + "conversations"
def tearDown(self):
if os.path.exists(self._tmpdir):
shutil.rmtree(self._tmpdir)
self.assertFalse(os.path.exists(self._tmpdir))
def test_initialise(self):
config = FileStorageConfiguration()
config.conversation_storage._dirs = [self._tmpdir]
engine = FileStorageEngine(config)
engine.initialise()
store = FileConversationStore(engine)
self.assertEqual(store.storage_engine, engine)
def test_storage_path(self):
config = FileStorageConfiguration()
engine = FileStorageEngine(config)
engine.initialise()
store = FileConversationStore(engine)
self.assertEquals('/tmp/conversations', store._get_storage_path())
self.assertIsInstance(store.get_storage(), FileStoreConfiguration)
def test_conversation_storage(self):
config = FileStorageConfiguration()
config.conversation_storage._dirs = [self._tmpdir]
engine = FileStorageEngine(config)
engine.initialise()
store = FileConversationStore(engine)
self.assertEqual(store.storage_engine, engine)
self.assert_conversation_storage(store, can_empty=True, test_load=True)
def patch_write_file(self, conversation_filepath, json_text):
raise Exception("Mock Exception")
@patch('programy.storage.stores.file.store.conversations.FileConversationStore._write_file', patch_write_file)
def test_save_conversation_with_exception(self):
config = FileStorageConfiguration()
config.conversation_storage._dirs = [self._tmpdir]
engine = FileStorageEngine(config)
engine.initialise()
store = FileConversationStore(engine)
store.empty()
client = TestClient()
client_context = client.create_client_context("user1")
conversation = Conversation(client_context)
question1 = Question.create_from_text(client_context, "Hello There")
question1.sentence(0).response = "Hi"
conversation.record_dialog(question1)
store.store_conversation(client_context, conversation)
def patch_read_file(self, conversation_filepath, conversation):
raise Exception("Mock Exception")
@patch('programy.storage.stores.file.store.conversations.FileConversationStore._read_file', patch_read_file)
def test_load_conversation_with_exception(self):
config = FileStorageConfiguration()
config.conversation_storage._dirs = [self._tmpdir]
engine = FileStorageEngine(config)
engine.initialise()
store = FileConversationStore(engine)
store.empty()
client = TestClient()
client_context = client.create_client_context("user1")
conversation = Conversation(client_context)
question1 = Question.create_from_text(client_context, "Hello There")
question1.sentence(0).response = "Hi"
conversation.record_dialog(question1)
store.store_conversation(client_context, conversation)
conversation = Conversation(client_context)
store.load_conversation(client_context, conversation)
|
import os
import sys
# input a valid adress of the target folder to path after running the file hw4.py
path = sys.argv[1]
print(f"Start in {path}")
# create a list of file-names of the target folder
files = os.listdir(path)
# create a set for file extensions
extensions_names = set()
# create lists for each file-group
music_files = []
image_files = []
document_files = []
video_files = []
unknown_files_and_folders = []
music_files_ext = ('.mp3', '.ogg', '.waw', '.amr')
image_file_ext = ('.jpeg', '.png', '.jpg', '.pdf')
document_files_ext = ('.doc', '.docx', '.txt')
video_files_ext = ('.avi', '.mp4', '.mov')
# variables to manage loop
name_len, index_of_dot = 0, 0
# sort all files according to extension
for file in files:
name_len = len(file)
for char in file:
index_of_dot = file.rfind('.')
extensions_names.add(file[index_of_dot:name_len:1])
if file.endswith(music_files_ext):
music_files.append(file)
elif file.endswith(image_file_ext):
image_files.append(file)
elif file.endswith(document_files_ext):
document_files.append(file)
elif file.endswith(video_files_ext):
video_files.append(file)
else:
unknown_files_and_folders.append(file)
print(f"Music files: {music_files} \n"
f"Image files: {image_files} \n"
f"Document files: {document_files} \n"
f"Video files: {video_files} \n"
f"Unknown files or folders: {unknown_files_and_folders} \n"
)
print(
f"We have files with the following extensions in this folder: {extensions_names}")
|
# Generated by Django 3.0.3 on 2020-11-13 07:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backend', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='class',
name='attributes',
field=models.CharField(default='[]', max_length=2000),
),
migrations.AddField(
model_name='object',
name='attributes',
field=models.CharField(default='{}', max_length=3000),
),
]
|
import logger
import network
import network.network_base
import config_handler
LOG_FILENAME = "server.log"
LOGGER = logger.logger_generator(LOG_FILENAME)
network.network_base.LOGGER = LOGGER
PROTOCOL_CATEGORY = "protocol"
PROTO_CODES = "protocol_codes"
NETWORK_CATEGORY = "network"
DATABASE_CATEGORY = "database"
ADMIN_CATEGORY = "admin"
# Load config files
CONFIG = network.CONFIG
STRINGS = config_handler.ConfigHandler(conf_type=config_handler.ConfigHandler.STRINGS)
SERVER_CONFIG = config_handler.ConfigHandler(conf_type=config_handler.ConfigHandler.SERVER)
# Load protocol configs
SERVER_HELLO = network.CONFIG.get(PROTOCOL_CATEGORY, "server_hello")
CLIENT_HELLO = network.CONFIG.get(PROTOCOL_CATEGORY, "client_hello")
ADMIN_HELLO = network.CONFIG.get(PROTOCOL_CATEGORY, "admin_hello")
INJECTOR_HELLO = network.CONFIG.get(PROTOCOL_CATEGORY, "injector_hello")
SOCKET_CLOSE_DATA = network.CONFIG.get(PROTOCOL_CATEGORY, "socket_close").decode('string_escape')
# Load proto codes configs
PROTOCOL_STATUS_CODES = {"ok": network.CONFIG.getint(PROTO_CODES, "ok"),
"error": network.CONFIG.getint(PROTO_CODES, "error"),
"incident_info": network.CONFIG.getint(PROTO_CODES, "incident_info"),
"authentication": network.CONFIG.getint(PROTO_CODES, "authentication"),
"get_rules": network.CONFIG.getint(PROTO_CODES, "get_rules"),
"add_rule": network.CONFIG.getint(PROTO_CODES, "add_rule"),
"update_rule": network.CONFIG.getint(PROTO_CODES, "update_rule"),
"delete_rule": network.CONFIG.getint(PROTO_CODES, "delete_rule"),
"get_log":network.CONFIG.getint(PROTO_CODES, "get_log"),
"get_connection_history":network.CONFIG.getint(PROTO_CODES, "get_connection_history"),
"get_incident_history":network.CONFIG.getint(PROTO_CODES, "get_incident_history")
}
# Load server configs
SERVER_PORT = SERVER_CONFIG.get(NETWORK_CATEGORY, "port")
INCIDENTS_FILE = SERVER_CONFIG.get("default", "incidents_file")
MAX_RECV = SERVER_CONFIG.getint(NETWORK_CATEGORY, "recv_buffer_size")
# Load admin config
ADMIN_LIST_FILENAME = DATABASE_NAME = SERVER_CONFIG.get(ADMIN_CATEGORY, "filename")
# Load database configs
DATABASE_NAME = SERVER_CONFIG.get(DATABASE_CATEGORY, "filename")
DATABASE_STRUCTURE_FILENAME = SERVER_CONFIG.get(DATABASE_CATEGORY, "structure")
|
import graphene
import crud_app.crud_api.schema
class Query(crud_app.crud_api.schema.Query, graphene.ObjectType):
pass
class Mutation(crud_app.crud_api.schema.Mutation, graphene.ObjectType):
pass
schema = graphene.Schema(query = Query, mutation = Mutation)
|
#!/usr/bin/env python
'''Test to see whether class variables and methods are in fact
inherited.'''
class Parent(object):
'''Parent class'''
__class_var = 10
@classmethod
def get_class_var(cls):
return cls.__class_var
@classmethod
def set_class_var(cls, value):
cls.__class_var = value
def __init__(self, obj_var):
self.__obj_var = obj_var
@property
def obj_var(self):
return self.__obj_var
@obj_var.setter
def obj_var(self, value):
self.__obj_var = value
def __str__(self):
return str(self.obj_var)
class Child(Parent):
'''Child class'''
def __init__(self, obj_var, new_ojb_var):
super().__init__(obj_var)
self.__new_obj_var = new_ojb_var
@property
def child_var(self):
return self.__new_obj_var
def __str__(self):
return super().__str__() + ' ' + str(self.child_var)
if __name__ == '__main__':
p = Parent('abc')
c = Child('bde', 'efg')
print('Parent: {}'.format(Parent.get_class_var()))
print('Child: {}'.format(Child.get_class_var()))
print('setting Child class variable')
Child.set_class_var(15)
print('Parent: {}'.format(Parent.get_class_var()))
print('Child: {}'.format(Child.get_class_var()))
print('setting Parent class variable')
Parent.set_class_var(25)
print('Parent: {}'.format(Parent.get_class_var()))
print('Child: {}'.format(Child.get_class_var()))
|
# Generated by Django 2.2.4 on 2020-11-11 16:14
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('objects', '0006_auto_20201111_1159'),
('circuits', '0003_auto_20201111_1506'),
]
operations = [
migrations.AddField(
model_name='historicalcircuit',
name='id_object',
field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='objects.Object'),
),
migrations.RemoveField(
model_name='circuit',
name='id_object',
),
migrations.AddField(
model_name='circuit',
name='id_object',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='circ_obj', to='objects.Object'),
),
]
|
# -*- coding: utf-8 -*-
# @Time : 2019-05-09 20:26
# @Author : focusxyhoo
# @FileName : pdf_reader.py
import os
from pdfminer.pdfparser import PDFParser, PDFDocument, PDFSyntaxError
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import PDFPageAggregator
from pdfminer.layout import LTTextBoxHorizontal, LAParams
from pdfminer.pdfinterp import PDFTextExtractionNotAllowed
def parse(file):
# 不知道为什么这样不能过滤掉 .DS_Store 文件
# if file == ".DS_Store":
# return
# 解析PDF文本,并保存到TXT文件中
fp = open(file, 'rb')
# 用文件对象创建一个PDF文档分析器
parser = PDFParser(fp)
# 创建一个PDF文档
doc = PDFDocument()
# 连接分析器,与文档对象
try:
parser.set_document(doc)
doc.set_parser(parser)
except PDFSyntaxError:
print("已过滤文件 %s" % file)
return
# 提供初始化密码,如果没有密码,就创建一个空的字符串
doc.initialize()
# 检测文档是否提供txt转换,不提供就忽略
if not doc.is_extractable:
raise PDFTextExtractionNotAllowed
else:
# 创建PDF,资源管理器,来共享资源
pdf_resource_manager = PDFResourceManager()
# 创建一个PDF设备对象
la_params = LAParams()
device = PDFPageAggregator(pdf_resource_manager, laparams=la_params)
# 创建一个PDF解释器对象
interpreter = PDFPageInterpreter(pdf_resource_manager, device)
# 循环遍历列表,每次处理一个page内容
# get_pages()方法用来获取page列表
for page in doc.get_pages():
interpreter.process_page(page)
# 接受该页面的LTPage对象
layout = device.get_result()
# 这里layout是一个LTPage对象 里面存放着这个page解析出的各种对象
# 一般包括LTTextBox, LTFigure, LTImage, LTTextBoxHorizontal等等
# 想要获取文本就获得对象的text属性,
for x in layout:
if isinstance(x, LTTextBoxHorizontal):
# 这里就暴力点,没有修改原来文件的后缀名
with open(file + '.txt', 'a') as f:
results = x.get_text()
# print(results)
f.write(results + "\n")
print("%s 转换成功,已保存本地!" % file)
def main():
files_path = "/Users/huxiaoyang/Desktop/同济项目论文搜索/"
for file in os.listdir(files_path):
print("正在处理 %s" % file)
parse(files_path + '/' + file)
if __name__ == '__main__':
main()
|
import json
import falcon
import peewee
from models import models_api
from utils.myjson import JSONEncoderPlus
class MinisterioId(object):
@models_api.database.atomic()
def on_get(self, req, resp, ministerio_id):
try:
ministerio_id = int(ministerio_id)
except ValueError:
raise falcon.HTTPNotFound()
try:
ministerio = models_api.Comparador.select(
models_api.Comparador.id_ministerio,
models_api.Comparador.nombre_ministerio
).where(
models_api.Comparador.id_ministerio == ministerio_id
).get()
except models_api.Comparador.DoesNotExist:
raise falcon.HTTPNotFound()
response = {
'id': ministerio.id_ministerio,
'nombre': ministerio.nombre_ministerio
}
resp.body = json.dumps(response, cls=JSONEncoderPlus, sort_keys=True)
class Ministerio(object):
@models_api.database.atomic()
def on_get(self, req, resp):
ministerios = models_api.Comparador.select(
models_api.Comparador.id_ministerio,
models_api.Comparador.nombre_ministerio
).distinct().order_by(
models_api.Comparador.id_ministerio
)
response = {
'n_ministerios': ministerios.count(),
'ministerios': [
{
'id': ministerio['id_ministerio'],
'nombre': ministerio['nombre_ministerio']
}
for ministerio in ministerios.dicts().iterator()]
}
resp.body = json.dumps(response, cls=JSONEncoderPlus, sort_keys=True)
class MinisterioCategoria(object):
@models_api.database.atomic()
def on_get(self, req, resp):
filters = []
q_ministerio = req.params.get('ministerio', [])
if q_ministerio:
if isinstance(q_ministerio, basestring):
q_ministerio = [q_ministerio]
try:
q_ministerio = map(lambda x: int(x), q_ministerio)
except ValueError:
raise falcon.HTTPBadRequest("Parametro incorrecto", "ministerio debe ser un entero")
filters.extend([models_api.Comparador.id_ministerio << q_ministerio])
categorias = models_api.Comparador.select(
models_api.Comparador.id_categoria_nivel1,
models_api.Comparador.categoria_nivel1,
peewee.fn.count(models_api.Comparador.id_categoria_nivel1)
).where(
models_api.Comparador.categoria_nivel1.is_null(False),
*filters
).group_by(
models_api.Comparador.id_categoria_nivel1,
models_api.Comparador.categoria_nivel1
).having(
peewee.fn.count(models_api.Comparador.id_categoria_nivel1) >= len(q_ministerio)
).order_by(
models_api.Comparador.id_categoria_nivel1
).distinct()
print categorias.sql()[0] % tuple(categorias.sql()[1])
response = {
'n_categorias': categorias.count(),
'categorias': [
{
'id': categoria['id_categoria_nivel1'],
'nombre': categoria['categoria_nivel1']
}
for categoria in categorias.dicts().iterator()]
}
resp.body = json.dumps(response, cls=JSONEncoderPlus, sort_keys=True)
class MinisterioIdCategoria(object):
@models_api.database.atomic()
def on_get(self, req, resp, ministerio_id):
categorias = models_api.Comparador.select(
models_api.Comparador.id_categoria_nivel1,
models_api.Comparador.categoria_nivel1
).where(
models_api.Comparador.id_ministerio == ministerio_id,
models_api.Comparador.categoria_nivel1.is_null(False)
).order_by(
models_api.Comparador.id_categoria_nivel1
)
response = {
'n_categorias': categorias.count(),
'categorias': [
{
'id': categoria['id_categoria_nivel1'],
'nombre': categoria['categoria_nivel1']
}
for categoria in categorias.dicts().iterator()]
}
resp.body = json.dumps(response, cls=JSONEncoderPlus, sort_keys=True)
class MinisterioIdCategoriaIdStats(object):
@models_api.database.atomic()
def on_get(self, req, resp, ministerio_id, categoria_id):
try:
ministerio_id = int(ministerio_id)
categoria_id = int(categoria_id)
except ValueError:
raise falcon.HTTPNotFound()
try:
stats = models_api.Comparador.get(
models_api.Comparador.id_ministerio == ministerio_id,
models_api.Comparador.id_categoria_nivel1 == categoria_id
)
except models_api.Comparador.DoesNotExist:
stats = None
if stats:
response = {
'categoria': {
"id": stats.id_categoria_nivel1,
"nombre": stats.categoria_nivel1,
},
'ministerio': {
'id': stats.id_ministerio,
'nombre': stats.nombre_ministerio,
},
'monto_promedio': int(stats.monto_promedio),
'monto_total': int(stats.monto),
'n_licitaciones_adjudicadas': stats.licit_adjudicadas,
'n_proveedores': stats.proveed_favorecidos
}
else:
# try:
response = {
'ministerio': {
'id': ministerio_id,
'nombre': models_api.MinisterioMr.get(models_api.MinisterioMr.id_ministerio == ministerio_id).nombre_ministerio,
},
'categoria': {
"id": categoria_id,
"nombre": models_api.Catnivel1.get(models_api.Catnivel1.id_categoria_nivel1 == categoria_id).categoria_nivel1,
},
'monto_promedio': 0,
'monto_total': 0,
'n_licitaciones_adjudicadas': 0,
'n_proveedores': 0
}
# except models_api.Comparador.DoesNotExist as e:
# raise falcon.HTTPNotFound()
# except:
# raise falcon.HTTPBadRequest("", "")
resp.body = json.dumps(response, cls=JSONEncoderPlus, sort_keys=True)
|
import unittest
import os
from application import create_app
class TestRootEndpoint(unittest.TestCase):
def setUp(self):
self.app = create_app(config="testing")
self.client = self.app.test_client()
def tearDown(self):
pass
def test_root_endpoint(self):
res = self.client.post("/")
self.assertEqual(res.status_code, 404) |
from src.tasks.api import Task
def test_task_equality():
t = Task('buy car', 'igor')
b = Task('buy beer', 'pawel')
assert not t == b
def test_dict_equality():
t_dict = Task('buy car', 'igor')._asdict()
expected = Task('buy beer', 'pawel')._asdict()
assert t_dict != expected
|
#!/usr/bin/python
# Mat4Pep-matrix_scorer.peptide.py
# Jeremy Horst, 04/06/2010
#####################################
# this program takes as input #
# [1]- a FASTA file #
# [2]- a directory of FASTA files #
# [3]- a scoring matrix #
# [4,5]- gap penalties #
# calculates total similarity score #
#####################################
# break up query protein into all possible fragments
# that match the size of sequences in the database
# score position by TSS of all fragments covering the position
# normalize by amount of fragments considered at position,
# perhaps multiplied by the size of the fragment itself
# test by recapture
import os
from sys import argv
from os import mkdir
from os import listdir
from random import shuffle
from random import randrange
from subprocess import call
from subprocess import Popen
from subprocess import PIPE
##################
# matrix details #
value_limit = 20
ggsearch_dir = './fasta-35.4.11/'
if '-gg' in argv:
ggsearch_dir = argv[argv.index('-gg')+1]
AA_order = "A R N D C Q E G H I L K M F P S T W Y V B Z X".split()
matrix_size = len(AA_order)
min_value = -0.01371074746
max_value = 0.01116456299
##################
#make process dir
try: mkdir('tmp')
except: nada=True
try: mkdir('tmp_mat')
except: nada=True
#############################
def calc_penalty(min_score, max_score):
# if no alignment was returned, its got to be worse than the worst reported.
return min_score - ((max_score-min_score)/2)
#############################
def get_seq(filename):
return ''.join([line.strip() for line in open(filename).readlines()[1:]])
############################
def TSS(fastasA, setA, fastasB, setB, gap_open, gap_extension, substitution_matrix):
# TSS_A-B([A]_NA - [B]_NB) =
# = (1/ (NA*delta_AB)) sum{1toNA}[ sum{1toNB}[ PSS_ij(1 - delta_ij * delta_AB)]]
# we add normalization of the second term by the sequence length (not considered before)
# change 20180305
# GOAL: charge penalty for no alignment
# PROCESS: calculate and retrieve all scores first, get max & min for entire set
# CALC: penalty = min - ((max-min)/2) # THIS IS ARBITRARY #
# OUTPUT: assign penalty when there is no alignment
a_names = [entry.split()[0] for entry in open(setA).read()[1:].split('\n>')]
a_seqs = [''.join(entry.split('\n')[1:]).replace('\n','') for entry in open(setA).read().split('\n>')]
a_names_seqs = dict(zip(a_names,a_seqs))
b_names = [entry.split()[0] for entry in open(setB).read()[1:].split('\n>')]
b_seqs = [''.join(entry.split('\n')[1:]).replace('\n','') for entry in open(setB).read().split('\n>')]
######################
# calculate first_term
# Kronecker delta: check if sets are the same
delta_AB = 0
if a_seqs == b_seqs: delta_AB = 1
first_term = 1/float(len(a_seqs)*(len(b_seqs)-delta_AB))
######################
# for second term iterate through sets
second_term = 0
protein_scores = {}
for protein_a in fastasA:
protein_a_name = open(setA).read()[1:].split()[0]
################
# run
ggsearch_output = 'tmp/'+protein_a_name+'.ggsearch_output'
run_ggsearch(gap_open, gap_extension, substitution_matrix, ggsearch_output, protein_a, setB)
################
# parse ggsearch output for each protein in library set
protein_scores[protein_a_name] = {}
for line in open(ggsearch_output).read().split('The best scores are:')[1].split('>>>')[0].strip().split('\n')[1:]:
# collect protein scores
#sorry, this next line is a parsing hack
if ')' in line and not ':' in line:
protein_b_name = line.split()[0]
score = float(line.split(')')[1].split()[0])
if not protein_scores[protein_a_name].has_key(protein_b_name):
protein_scores[protein_a_name][protein_b_name] = score
elif not line.strip():
break
######################
max_score = max([max(protein_scores[protein_a].values()) for protein_a in protein_scores])
min_score = min([min(protein_scores[protein_a].values()) for protein_a in protein_scores])
if method=='sw': penalty = 0
else: penalty = calc_penalty(min_score, max_score)
######################
for protein_a_name in a_names:
seq_length = len(a_names_seqs[protein_a_name])
# score protein a vs setB
for protein_b_name in b_names:
# Kronecker delta: check if proteins are the same, or even from same protein (uniprot code = 6 alpha-numeric long)
delta_ab = 0
if protein_a_name[:unique_sequence_name_length] != protein_b_name[:unique_sequence_name_length]:
# if no alignment output, assign a bad score
PairSimScore_ab = penalty
if protein_scores[protein_a_name].has_key(protein_b_name):
PairSimScore_ab = protein_scores[protein_a_name][protein_b_name]
###########################
# normalize to seq length #
# sum to second term #
# second_term += PairSimScore_ab * (1 - delta_ab * delta_AB) / seq_length
second_term += PairSimScore_ab / seq_length
###########################
return first_term * second_term
############################
def run_ggsearch(gap_open, gap_extension, substitution_matrix, ggsearch_output, fasta, library):
if method == 'nw':
command = "%s/bin/ggsearch35_t -C %s -T %s -H -z -1 -d 0 -q -p -f %s -g %s -s %s -O %s %s %s"\
% (ggsearch_dir, sequence_name_length, threads, gap_open, gap_extension, substitution_matrix, ggsearch_output, fasta, library)
else:
command = "%s/bin/ssearch35_t -C %s -T %s -H -z -1 -d 0 -q -p -f %s -g %s -s %s -O %s %s %s"\
% (ggsearch_dir, sequence_name_length, threads, gap_open, gap_extension, substitution_matrix, ggsearch_output, fasta, library)
crap = Popen(command.split(),stderr=PIPE,stdout=PIPE).communicate()[0]
#############################
###########################################################
def directory_to_library(fasta_set,library):
writer = open(library,'w')
for f in fasta_set:
for line in open(f).readlines():
writer.write(line.strip()+'\n')
writer.close()
#############################
def dir_to_fastalist_N_lib(db_dir,db_lib):
db_fastas = []
for f in listdir(db_dir):
if f.endswith('.fasta'): db_fastas += [db_dir+f]
# make searchable library from database
directory_to_library(db_fastas,db_lib)
return db_fastas
##############################################
#########
# START #
#########
if __name__=='__main__':
#####################################
# this program takes as input #
# [1]- a FASTA file #
# [2]- a directory of FASTA files + #
# [3]- a directory of FASTA files - #
# [4]- a scoring matrix #
# [5,6]- gap penalties (gop, gep) #
# calculates total similarity score #
#####################################
try:
# break up query protein into all possible fragments
# that match the size of sequences in the database
# score position by TSS of all fragments covering the position
# normalize by amount of fragments considered at position,
# perhaps multiplied by the size of the fragment itself
# test by recapture
######################
# prepare input sets #
######################
query_file = argv[1]
db_strong_dir = argv[2]+'/'
db_weak_dir = argv[3]+'/'
matrix_file_name = argv[4]
gap_open = int(argv[5])
gap_extension = int(argv[6])
multiplier=1
if '-multiplier' in argv: multiplier = int(argv[argv.index('-multiplier')+1])
elif '-invert' in argv: multiplier = -1
method='nw'
if '-sw' in argv: method = 'sw'
threads = '2'
if '-threads' in argv: threads = argv[argv.index('-threads')+1]
sequence_name_length = 8
if '-namelen' in argv: sequence_name_length = int(argv[argv.index('-namelen')+1])
unique_sequence_name_length = 6
if '-uniqname' in argv: unique_sequence_name_length = int(argv[argv.index('-uniqname')+1])
# grab query sequence
query_seq = get_seq(query_file)
#print 'sequence:',query_seq
#################
### strong db ###
#################
# input database directory
db_strong_lib = "lib_db_strong.fasta"
db_strong_fastas = dir_to_fastalist_N_lib(db_strong_dir,db_strong_lib)
#################
### weak db ###
#################
# input database directory
db_weak_lib = "lib_db_weak.fasta"
db_weak_fastas = dir_to_fastalist_N_lib(db_weak_dir,db_weak_lib)
######################################## ###
# calculate TSS for each fragment in the strong set
TSS_s = TSS([query_file], query_file, db_strong_fastas, db_strong_lib, gap_open, gap_extension, matrix_file_name)
######################################## ###
# calculate TSS for each fragment in the weak set
TSS_w = TSS([query_file], query_file, db_weak_fastas, db_weak_lib, gap_open, gap_extension, matrix_file_name)
#################################################
#################################################################
# Predictions
score = (TSS_s - TSS_w)*multiplier
print score
##############################################
except:
# [1]- a FASTA file #
# [2]- a directory of FASTA files + #
# [3]- a directory of FASTA files - #
# [4]- a scoring matrix #
# [5,6]- gap penalties (gop, gep) #
print "Usage: matrix_scorer.peptide.py <fasta file> <functional_sequences> <nonfunctional_sequences> <scoring matrix> <gap-open penalty> <gap-extend penalty>"
print "Options: -invert (apply alignment scores upsidedown)"
print " -sw (use smith-waterman algorithm instead of needleman-wunsch)"
print " -threads # (default=2 use more than 1 processor)"
print " -namelen # (default=8 catches sequence names, needed due to ggsearch)"
print " -uniqname # (default=6 avoids comparing peptides from the same protein, assumes UniProt coding)" |
import checks
import discord
import logging
import traceback
import valve.rcon
from bot import Discord_10man
from databases import Database
from discord.ext import commands
from logging.config import fileConfig
from steam.steamid import SteamID, from_url
from typing import List
class Setup(commands.Cog):
def __init__(self, bot: Discord_10man):
fileConfig('logging.conf')
self.logger = logging.getLogger(f'10man.{__name__}')
self.bot: Discord_10man = bot
self.logger.debug(f'Loaded {__name__}')
@commands.command(aliases=['login'],
help='This command connects users steam account to the bot.',
brief='Connect your SteamID to the bot', usage='<SteamID or CommunityURL>')
async def link(self, ctx: commands.Context, steamID_input: str):
self.logger.debug(f'{ctx.author}: {ctx.prefix}{ctx.invoked_with} {ctx.args[2:]}')
steamID = SteamID(steamID_input)
if not steamID.is_valid():
steamID = from_url(steamID_input, http_timeout=15)
if steamID is None:
steamID = from_url(f'https://steamcommunity.com/id/{steamID_input}/', http_timeout=15)
if steamID is None:
raise commands.UserInputError(message='Please enter a valid SteamID or community url.')
db = Database('sqlite:///main.sqlite')
await db.connect()
await db.execute('''
REPLACE INTO users (discord_id, steam_id)
VALUES( :discord_id, :steam_id )
''', {"discord_id": str(ctx.author.id), "steam_id": str(steamID.as_steam2_zero)})
embed = discord.Embed(description=f'Connected {ctx.author.mention} \n `{steamID.as_steam2}`', color=0x00FF00)
await ctx.send(embed=embed)
self.logger.info(f'{ctx.author} connected to {steamID.as_steam2}')
@link.error
async def link_error(self, ctx: commands.Context, error: Exception):
if isinstance(error, commands.UserInputError):
await ctx.send(str(error))
self.logger.warning(f'{ctx.author} did not enter a valid SteamID')
else:
self.logger.exception(f'{ctx.command} caused an exception')
@commands.command(aliases=['spectator', 'spec'],
help='Adds this user as a spectator in the config for the next map.',
brief='Add user as spectator', usage='<@User>')
@commands.has_permissions(administrator=True)
async def add_spectator(self, ctx: commands.Context, spec: discord.Member):
self.logger.debug(f'{ctx.author}: {ctx.prefix}{ctx.invoked_with} {ctx.args[2:]}')
db = Database('sqlite:///main.sqlite')
await db.connect()
data = await db.fetch_one('SELECT 1 FROM users WHERE discord_id = :spectator', {"spectator": str(spec.id)})
if data is None:
raise commands.UserInputError(message=f'<@{spec.id}> needs to `.link` their account.')
self.bot.spectators.append(spec)
await ctx.send(f'<@{spec.id}> was added as a spectator.')
self.logger.info(f'{ctx.author} was added as a spectator')
@add_spectator.error
async def add_spectator_error(self, ctx: commands.Context, error: Exception):
if isinstance(error, commands.UserInputError):
await ctx.send(str(error))
self.logger.warning(str(error))
else:
self.logger.exception(f'{ctx.command} caused an exception')
@commands.command(aliases=['queue_captain', 'captain'],
help='Set\'s the queue captain for the next match', usage='<@User> ?<@User>')
@commands.has_permissions(administrator=True)
async def set_queue_captain(self, ctx: commands.Context, *args: discord.Member):
self.logger.debug(f'{ctx.author}: {ctx.prefix}{ctx.invoked_with} {ctx.args[2:]}')
db = Database('sqlite:///main.sqlite')
await db.connect()
for captain in args:
data = await db.fetch_one('SELECT 1 FROM users WHERE discord_id = :spectator', {"spectator": str(captain.id)})
if data is None:
raise commands.UserInputError(message=f'<@{captain.id}> needs to `.link` their account.')
self.bot.queue_captains.append(captain)
await ctx.send(f'<@{captain.id}> was added as a captain for the next queue.')
self.logger.debug(f'<@{captain.id}> was added as a captain for the next queue.')
self.logger.debug(f'Current Queue Captains: {self.bot.queue_captains}')
@set_queue_captain.error
async def set_queue_captain_error(self, ctx: commands.Context, error: Exception):
if isinstance(error, commands.UserInputError):
await ctx.send(str(error))
self.logger.debug(str(error))
else:
self.logger.exception(f'{ctx.command} caused an exception')
@commands.command(aliases=['empty'],
help='Empties the queue')
@commands.has_permissions(administrator=True)
async def empty_queue(self, ctx: commands.Context):
self.logger.debug(f'{ctx.author}: {ctx.prefix}{ctx.invoked_with} {ctx.args[2:]}')
for member in self.bot.queue_voice_channel.members:
await member.move_to(channel=None, reason=f'Admin cleared the queue')
self.logger.debug(f'Admin cleared the queue')
@empty_queue.error
async def empty_queue_error(self, ctx: commands.Context, error: Exception):
self.logger.exception(f'{ctx.command} caused an exception')
@commands.command(aliases=['remove_spec'],
help='Removes this user as a spectator from the config.',
brief='Removes user as spectator', usage='<@User>')
@commands.has_permissions(administrator=True)
async def remove_spectator(self, ctx: commands.Context, spec: discord.Member):
self.logger.debug(f'{ctx.author}: {ctx.prefix}{ctx.invoked_with} {ctx.args[2:]}')
db = Database('sqlite:///main.sqlite')
await db.connect()
data = await db.fetch_one('SELECT 1 FROM users WHERE discord_id = :spectator',
{"spectator": str(spec.id)})
if data is None:
raise commands.UserInputError(
message=f'User did not `.link` their account and probably is not a spectator.')
if data[0] in self.bot.spectators:
self.bot.spectators.remove(spec)
await ctx.send(f'<@{spec.id}> was removed as a spectator.')
self.logger.debug(f'<@{spec.id}> was removed as a spectator.')
else:
raise commands.CommandError(message=f'<@{spec.id}> is not a spectator.')
@remove_spectator.error
async def remove_spectator_error(self, ctx: commands.Context, error: Exception):
if isinstance(error, commands.UserInputError) or isinstance(error, commands.CommandError):
await ctx.send(str(error))
self.logger.warning(str(error))
else:
self.logger.exception(f'{ctx.command} caused an exception')
@commands.command(aliases=['dm'],
help='Command to enable or disable sending a dm with the connect ip vs posting it in the channel',
brief='Enable or disable connect via dm')
@commands.has_permissions(administrator=True)
async def connect_dm(self, ctx: commands.Context, enabled: bool = False):
self.logger.debug(f'{ctx.author}: {ctx.prefix}{ctx.invoked_with} {ctx.args[2:]}')
self.bot.connect_dm = enabled
await ctx.send(f'Connect message will {"not" if not enabled else ""} be sent via a DM.')
@commands.command(aliases=['setupqueue', 'queue_setup', 'queuesetup'],
help='Command to set the server for the queue system. You must be in a voice channel.',
brief='Set\'s the server for the queue')
@commands.has_permissions(administrator=True)
@commands.check(checks.voice_channel)
async def setup_queue(self, ctx: commands.Context, enabled: bool = True):
self.logger.debug(f'{ctx.author}: {ctx.prefix}{ctx.invoked_with} {ctx.args[2:]}')
self.bot.queue_voice_channel = ctx.author.voice.channel
self.bot.queue_ctx = ctx
if enabled:
if self.bot.cogs['CSGO'].queue_check.is_running():
self.bot.cogs['CSGO'].queue_check.restart()
self.logger.warning(f'Queue Restarted')
else:
self.bot.cogs['CSGO'].queue_check.start()
self.logger.debug('Queue Started')
self.bot.cogs['CSGO'].pug.enabled = False
self.logger.debug('Pug Disabled')
self.logger.debug(f'Queue Channel: {self.bot.queue_ctx.author.voice.channel}')
else:
self.bot.cogs['CSGO'].queue_check.stop()
self.bot.cogs['CSGO'].pug.enabled = True
self.logger.debug('Queue Disabled, Pug Enabled')
await ctx.send(
f'{self.bot.queue_ctx.author.voice.channel} is the queue channel.\n'
f'Queue is {"enabled" if enabled else "disabled"}.\n'
f'Pug Command is {"enabled" if not enabled else "disabled"}.')
@setup_queue.error
async def setup_queue_error(self, ctx: commands.Context, error: Exception):
if isinstance(error, commands.CommandError):
await ctx.send(str(error))
self.logger.warning(str(error))
else:
self.logger.exception(f'{ctx.command} caused an exception')
@commands.command(aliases=['restart_queue'],
help='The command forcefully restarts the queue.',
brief='Restart\'s the queue')
@commands.has_permissions(administrator=True)
@commands.check(checks.queue_running)
async def force_restart_queue(self, ctx: commands.Context):
self.logger.debug(f'{ctx.author}: {ctx.prefix}{ctx.invoked_with} {ctx.args[2:]}')
self.bot.cogs['CSGO'].queue_check.cancel()
self.bot.cogs['CSGO'].queue_check.start()
self.bot.cogs['CSGO'].pug.enabled = False
await ctx.send('Queue forcefully restarted')
self.logger.warning('Queue forcefully restarted')
@force_restart_queue.error
async def force_restart_queue_error(self, ctx: commands.Context, error: Exception):
if isinstance(error, commands.CommandError):
await ctx.send(str(error))
self.logger.warning(str(error))
else:
self.logger.exception(f'{ctx.command} caused an exception')
@commands.command(aliases=['setup_queue_size', 'match_size', 'queue_size', 'set_match_size', 'set_queue_size'],
help='This command sets the size of the match and the queue.',
brief='Sets the size of the match & queue', usage='<size>')
@commands.has_permissions(administrator=True)
async def setup_match_size(self, ctx: commands.Context, size: int):
self.logger.debug(f'{ctx.author}: {ctx.prefix}{ctx.invoked_with} {ctx.args[2:]}')
if size <= 0:
raise commands.CommandError(message=f'Invalid match size.')
if size % 2 != 0:
raise commands.CommandError(message=f'Match size must be an even number.')
self.bot.match_size = size
if self.bot.cogs['CSGO'].queue_check.is_running():
self.bot.cogs['CSGO'].queue_check.restart()
await ctx.send(f'Set match size to {self.bot.match_size}.')
self.logger.debug(f'Set match size to {self.bot.match_size}.')
@setup_match_size.error
async def setup_match_size_error(self, ctx: commands.Context, error: Exception):
if isinstance(error, commands.BadArgument):
await ctx.send('Invalid Argument')
self.logger.warning('Invalid Argument')
elif isinstance(error, commands.CommandError):
await ctx.send(str(error))
self.logger.warning(str(error))
else:
self.logger.exception(f'{ctx.command} caused an exception')
@commands.command(help='Command to send a test message to the server to verify that RCON is working.',
brief='Sends a message to the server to test RCON', usage='<message>')
@commands.has_permissions(administrator=True)
async def RCON_message(self, ctx: commands.Context, *, message: str):
self.logger.debug(f'{ctx.author}: {ctx.prefix}{ctx.invoked_with} {ctx.args[2:]}')
for server in self.bot.servers:
test_message = valve.rcon.execute((server.server_address, server.server_port), server.RCON_password,
f'say {message}')
print(f'Server #{server.id} | {test_message}')
self.logger.debug(f'Server #{server.id} | {test_message}')
@RCON_message.error
async def RCON_message_error(self, ctx: commands.Context, error: Exception):
if isinstance(error, commands.MissingPermissions):
await ctx.send('Only an administrator can send a message using the console')
self.logger.warning('Only an administrator can send a message using the console')
elif isinstance(error, commands.MissingRequiredArgument):
await ctx.send('Please specify the message')
self.logger.warning('Please specify the message')
else:
self.logger.exception(f'{ctx.command} caused an exception')
@commands.command(help='This command unbans everyone on the server. Useful fix.',
brief='Unbans everyone from the server', hidden=True)
@commands.has_permissions(administrator=True)
async def RCON_unban(self, ctx: commands.Context):
self.logger.debug(f'{ctx.author}: {ctx.prefix}{ctx.invoked_with} {ctx.args[2:]}')
for server in self.bot.servers:
unban = valve.rcon.execute((server.server_address, server.server_port), server.RCON_password,
'removeallids')
print(f'Server #{server.id} | {unban}')
self.logger.debug(f'Server #{server.id} | {unban}')
@RCON_unban.error
async def RCON_unban_error(self, ctx: commands.Context, error: Exception):
if isinstance(error, commands.MissingPermissions):
await ctx.send('Only an administrator can unban every player')
self.logger.warning('Only an administrator can unban every player')
else:
self.logger.exception(f'{ctx.command} caused an exception')
@commands.command(aliases=['end', 'stop'],
help='This command force ends a match.',
brief='Force ends a match', usage='<ServerID>')
@commands.has_permissions(administrator=True)
async def force_end(self, ctx: commands.Context, server_id: int = 0):
self.logger.debug(f'{ctx.author}: {ctx.prefix}{ctx.invoked_with} {ctx.args[2:]}')
valve.rcon.execute((self.bot.servers[server_id].server_address, self.bot.servers[server_id].server_port),
self.bot.servers[server_id].RCON_password, 'get5_endmatch')
@force_end.error
async def force_end_error(self, ctx: commands.Context, error: Exception):
if isinstance(error, commands.MissingPermissions):
await ctx.send('Only an administrator can force end a match.')
else:
self.logger.exception(f'{ctx.command} caused an exception')
def setup(client):
client.add_cog(Setup(client))
|
#!/usr/bin/env python
import Tkinter
tk=Tkinter
import aniwinch
import threading
from datetime import datetime
import time
import serial
import sys
import winch_settings
from humminbird import HumminbirdMonitor
from gpio_wrapper import SerialGPIO
from async import async,OperationAborted
import logging
class CTD(object):
towyo_factor=1.5
depth_override=None
def __init__(self):
self.log = logging.getLogger('main')
self.lock = threading.Lock()
self.thread = None
self.abort_async = False
self.async_action = None
self.monitor = HumminbirdMonitor()
self.winch = aniwinch.AnimaticsWinch()
@async('cast on gpio')
def cast_on_gpio(self):
self.last_cast_time = datetime.now()
gpio = self.gpio()
try:
self.log.info("Begin GPIO single cast loop")
while 1:
gpio.wait_for_cast_signal(poll=self.poll)
self.log.info("Received cast signal")
gpio.signal_cast_in_progress()
self.do_synchronous_cast()
gpio.signal_cast_complete()
self.last_cast_time = datetime.now()
except OperationAborted:
raise
except Exception,exc:
self.log.error("while gpio casting:" + str(exc))
def depth_for_cast(self):
if self.depth_override is not None:
return self.depth_override
else:
return self.monitor.maxDepth
def towyo_depth(self):
return self.depth_for_cast() * self.towyo_factor
@async('tow-yo on gpio')
def towyo_on_gpio(self):
# tow-yo the CTD as long as the GPIO input is
# enabled - should it wait for a off/on transition
# before starting??
gpio = self.gpio()
try:
self.log.info("Begin GPIO/tow-yo loop")
# TODO: better logic for interrupted actions
# there are probably some issues right now
# i.e. when the winch is already doing something, none of the
# calls are graceful about knowing that and cancelling an existing
# action.
while 1:
self.log.info("Waiting for GPIO signal")
gpio.wait_for_cast_signal(poll=self.poll)
while gpio.cast_signal():
self.log.info("Tow-yo beginning next tow-yo drop")
self.winch.ctd_out(self.towyo_depth(), block=True)
# only bring it in most of the way
self.winch.complete_position_move(absol_m=self.winch.arm_length+self.winch.cage_length+0.05,
block=True,direc=-1)
self.log.info("Tow-yo disabled - recovering CTD")
self.winch.ctd_in(block=True)
except OperationAborted as exc:
# go ahead and have the winch start bringing it in, but don't wait.
if exc.cleanup:
self.winch.ctd_in(block=False)
raise
except Exception as exc:
print self.log.error("while gpio casting:"+str(exc))
@async('tow-yo')
def towyo(self):
"""
Towyo until auto action is cancelled
"""
try:
while 1:
self.poll()
self.log.debug("Tow-yo beginning next tow-yo drop")
self.winch.ctd_out(self.towyo_depth(),
block=True)
self.log.debug("ctd::towyo::return from ctd_out")
# only bring it in most of the way
self.poll()
self.log.debug("ctd::towyo::about to bring in towyo")
self.winch.complete_position_move(absol_m=self.winch.arm_length+self.winch.cage_length+0.05,
block=True,direc=-1)
except OperationAborted as exc:
# go ahead and have the winch start bringing it in, but don't wait.
# the logic is a bit wrong - on cancel of automated casts, want to
# bring it back in, but on cancel of winch action, should exit
# without queueing more actions
self.log.info("ctd::towyo:received abort with cleanup=%s"%exc.cleanup)
if exc.cleanup:
self.winch.ctd_in(block=False)
raise
def stop_auto(self,cleanup=True):
self.log.debug("stop_auto top, cleanup=%s"%cleanup)
# probably a race condition here!
# 1. there's a thread running
# 2. stop_auto() is called - abort_async=True
# 3. The thread exits
# END: abort_async is left True
# so async makes sure that the last thing that happens
# in the thread is clearing abort_async
with self.lock:
if self.async_action is not None:
self.log.info("ctd::stop_auto setting exception with cleanup=%s"%cleanup)
self.abort_async = OperationAborted(cleanup=cleanup)
def handle_abort(self):
self.log.info("async action was aborted")
# would be nice to do this with simple lexical scope,
# but doesn't appear to work.
waiting = 0
def do_synchronous_cast(self):
self.waiting=1
def on_complete(arg):
self.waiting -= 1
self.winch.ctd_cast(self.depth_for_cast(),
block=False,callback=on_complete)
while self.waiting >0:
self.poll()
time.sleep(0.2)
def poll(self):
# print "ctd::poll()"
if self.abort_async is not False:
if isinstance(self.abort_async,OperationAborted):
exc = self.abort_async
else:
exc = OperationAborted(cleanup=True)
self.abort_async = False # it's been received
self.log.info("ctd::poll() raising exc with cleanup=%s"%exc.cleanup)
raise exc
# Connect to the GPIO on demand
_gpio = None
def gpio(self):
if self._gpio is None:
self._gpio = SerialGPIO()
return self._gpio
def force_enable_gpio(self):
self.gpio().signal_cast_complete()
def force_disable_gpio(self):
self.gpio().signal_cast_in_progress()
# def cast_on_stop():
# try:
# while 1:
# if monitor.moving():
# stopped_time = datetime.now()
# else:
# if (datetime.now() - stopped_time).total_seconds() > 3:
# winch.ctd_out(monitor.maxDepth)
# winch.ctd_in()
# while not monitor.moving() and not winch.freak_out:
# time.sleep(1)
# stopped_time = datetime.now()
# except:
# pass
# def enable():
# print 'enable'
# global ctdThread
# ctdThread = threading.Thread(target = cast_on_stop)
# ctdThread.start()
def enable_hw_trig_cast(self):
def done(arg):
self.log.info("GPIO exited")
self.cast_on_gpio(block=False,callback=done)
def enable_hw_trig_towyo(self):
def done(arg):
self.log.info("GPIO exited")
self.towyo_on_gpio(block=False,callback=done)
def enable_towyo(self):
self.towyo(block=False)
def start_cast(self):
d=self.depth_for_cast()
self.log.info('manual cast out %s' % d)
self.winch.ctd_cast(d)
def manual_cast(self):
self.log.info('manual cast')
self.winch.ctd_cast(self.depth_for_cast(),block=False,callback=self.manual_cast_complete)
def manual_cast_complete(self,*args):
self.log.info("Manual cast is complete")
def recover(self):
self.log.info('recover')
# Note that if the CTD is already in, this will ease it
# out and bring it back in. Not sure if that's good
# or bad.
self.winch.ctd_in(block=False)
def recover_reset(self):
self.log.info('recover and reset')
self.winch.ctd_in_reset(block=False)
def reset_here(self):
self.winch.reset_encoder_position()
def stop_now(self):
self.log.info('ctd::stop_now')
self.stop_auto(cleanup=False) # signal that no cleanup actions should be taken
self.winch.abort()
self.winch.stop_motor()
def print_status(self):
self.winch.status_report()
update_rate_ms = 200
def periodic_update(self):
for text,thunk,str_var in self.state_values:
try:
str_var.set(thunk())
except Exception as exc:
print exc
self.top.after(self.update_rate_ms,self.periodic_update)
def gui_init_actions(self):
buttons = []
for text,cmd in [ ('STOP WINCH',self.stop_now),
('Manual CTD cast now',self.manual_cast),
('Tow-yo now',self.enable_towyo),
('Set current position as top',self.reset_here),
('Recover and reset CTD',self.recover_reset),
('Recover CTD',self.recover),
('Start GPIO-triggered single-cast mode',self.enable_hw_trig_cast),
('Start GPIO-triggered tow-yo',self.enable_hw_trig_towyo),
# ('Enable Speed-based CTD mode', self.enable),
('Force enable autopilot via GPIO',self.force_enable_gpio),
('Force disable autopilot via GPIO',self.force_disable_gpio),
('Stop automated casts',self.stop_auto),
('Print status info to console',self.print_status) ]:
buttons.append( Tkinter.Button(self.actions,text=text,command=cmd) )
for btn in buttons:
btn.pack(side=Tkinter.TOP,fill='x')
# And the slider
self.scale_var = Tkinter.DoubleVar()
self.scale = Tkinter.Scale(self.actions,command=self.scale_changed,
from_=-.450, to=0.45, resolution=0.01,
orient=Tkinter.HORIZONTAL,
variable = self.scale_var,
label="Run at speed:")
# go back to zero on mouse up
# self.scale.bind('<ButtonRelease>',lambda *x: (self.scale_var.set(0.0),self.scale_changed(0.0)) )
self.scale.bind('<Shift-ButtonRelease>',self.slider_nostop)
self.scale.bind('<ButtonRelease>',self.slider_stop)
self.scale.pack(side=Tkinter.TOP,fill='x')
# And a torque slider
self.tq_scale_var =Tkinter.DoubleVar()
self.tq_scale = Tkinter.Scale(self.actions,command=self.tq_scale_changed,
from_=-10, to=10, resolution=0.05,
orient=Tkinter.HORIZONTAL,
variable = self.tq_scale_var,
label="Run at force:")
self.tq_scale.bind('<ButtonRelease>',self.tq_stop)
self.tq_scale.bind('<ButtonPress>',self.tq_start)
self.tq_scale.pack(side=Tkinter.TOP,fill='x')
def scale_changed(self,new_value):
self.winch.start_velocity_move(self.scale_var.get())
def slider_nostop(self,evt):
print "NOT STOPPING!"
def slider_stop(self,evt):
self.scale_var.set(0.0)
self.scale_changed(0.0)
def tq_start(self,evt):
print "Releasing brake"
self.winch.release_brake()
self.tq_scale_changed(0.0)
def tq_stop(self,evt):
print "End torque mode"
self.winch.motor_stop()
self.tq_scale_var.set(0.0)
self.winch.enable_brake()
def tq_scale_changed(self,new_value):
force_kg=self.tq_scale_var.get()
self.winch.start_force_move(force_kg)
def gui_init_state(self):
# a list of parameters to update periodically
self.state_values = [ ['Depth',lambda: "%.2f m"%self.monitor.maxDepth],
['GPS velocity',lambda: "%.2f m/s"%self.monitor.velocity],
['Cable out',lambda: "%.2f m/%.2frev"%self.winch.get_cable_out(1.0,extra=True) ],
['Cable speed',lambda: "%.2f m/s"%self.winch.get_velocity(1.0) ],
['Winch current',lambda: "%.0f mA?"%self.winch.get_current(1.0)],
['Winch torque',lambda: "%.0f"%self.winch.get_torque(1.0)],
['Winch action',lambda: self.winch.async_action],
['CTD action',lambda: self.async_action],
['GPIO from APM',lambda: self.gpio().cast_signal()],
['GPIO to APM',lambda: self.gpio().last_signal_out ]]
hdr_font = ('Helvetica','13','bold')
hdr_key = Tkinter.Label(self.state,text="Variable",font=hdr_font,justify=tk.LEFT)
hdr_val = Tkinter.Label(self.state,text="Value",font=hdr_font)
hdr_key.grid(row=0,column=0,sticky=tk.N+tk.W+tk.S,ipadx=20)
hdr_val.grid(row=0,column=1,sticky=tk.N+tk.W+tk.S,ipadx=20)
for i in range(len(self.state_values)):
text,thunk = self.state_values[i]
lab = Tkinter.Label(self.state,text=text)
str_var = Tkinter.StringVar()
if 0:
val = Tkinter.Entry(self.state,textvariable=str_var,
state=Tkinter.DISABLED)
else:
val = Tkinter.Label(self.state,textvariable=str_var,
justify=Tkinter.LEFT)
str_var.set(thunk())
lab.grid(row=i+1,column=0,sticky=tk.N+tk.W+tk.S)
val.grid(row=i+1,column=1,sticky=tk.N+tk.W+tk.S)
self.state_values[i].append(str_var)
def gui_init_config(self):
# a list of values
self.config_values = []
def add_gen_config(text,setter,getter):
lab = Tkinter.Label(self.config,text=text)
svar = Tkinter.StringVar()
val = Tkinter.Entry(self.config,textvariable=svar,
state=Tkinter.NORMAL)
svar.set( getter() )
def real_setter(*args):
v = svar.get()
setter(v)
svar.trace('w', real_setter )
lab.grid(row=len(self.config_values),column=0)
val.grid(row=len(self.config_values),column=1)
self.config_values.append(svar)
def add_float_config(text,obj,attr,fmt):
def getter():
return fmt%getattr(obj,attr)
def setter(v):
try:
setattr(obj,attr,float(v))
except ValueError:
pass
add_gen_config(text,setter,getter)
def add_bool_config(text,obj,attr):
lab = Tkinter.Label(self.config,text=text)
ivar = Tkinter.IntVar()
val = Tkinter.Checkbutton(self.config,variable=ivar)
ivar.set( int(bool(getattr(obj,attr))) )
def real_setter(*args):
v = ivar.get()
setattr(obj,attr,bool(int(v)))
ivar.trace('w', real_setter )
lab.grid(row=len(self.config_values),column=0)
val.grid(row=len(self.config_values),column=1)
self.config_values.append(ivar)
add_float_config("Target velocity [m/s]", self.winch, "target_velocity", "%.2f")
add_float_config('Inner radius [m]', self.winch,"spool_radius_inner", "%.4f")
add_float_config('Outer radius [m]', self.winch,"spool_radius_outer", "%.4f")
add_float_config('Full-in force [kg]',self.winch,"block_a_block_kg","%.2f")
add_float_config('Zero tension current',self.winch,"deploy_slack_current","%.0f")
add_float_config('Deploy slack torque',self.winch,"deploy_slack_torque","%.0f")
add_float_config('Arm length [m]',self.winch,"arm_length","%.2f")
add_float_config('Cage length [m]',self.winch,"cage_length","%.2f")
add_float_config('Towyo factor [-]',self,"towyo_factor","%.2f")
add_float_config('Ease from block-a-block [m]',self.winch,"ease_from_block_a_block","%.2f")
add_gen_config('Max power fraction',
lambda v: self.winch.set_max_power_fraction(float(v)),
lambda: "%.2f"%self.winch.power_fraction)
add_gen_config('Override depth',
self.set_depth_override_str,
self.get_depth_override_str)
add_bool_config("Always reset?",self.winch,"reset_after_cast")
def set_depth_override_str(self,v):
v=v.strip()
if v=="":
self.depth_override=None
else:
try:
self.depth_override=float(v)
except ValueError:
pass
def get_depth_override_str(self):
if self.depth_override is None:
return ""
else:
return "%.2f"%self.depth_override
def gui(self):
self.top = top = Tkinter.Tk()
self.actions = Tkinter.LabelFrame(top,text="Actions")
self.state = Tkinter.LabelFrame(top,text="State")
self.config = Tkinter.LabelFrame(top,text="Config")
self.gui_init_actions()
self.gui_init_state()
self.gui_init_config()
self.actions.pack(side=Tkinter.LEFT,fill='both')
self.state.pack(side=Tkinter.LEFT,fill='both')
self.config.pack(side=Tkinter.LEFT,fill='both')
top.after(self.update_rate_ms,self.periodic_update)
top.mainloop()
self.log.info("exiting mainloop")
self.winch.close()
if self._gpio is not None:
self._gpio.close()
self.monitor.close()
sys.exit()
if __name__ == '__main__':
ctd = CTD()
ctd.gui()
|
n, m = map(int, input().split())
array = input().split()
array.sort() #사전식으로 출력해야 하므로 입력이후에 정렬 수행
#조합 사용하면됨
from itertools import combinations
#모음 2개가 꼭 있어야 한다
vowels = ('a', 'e', 'i', 'o', 'u')
# #난 이렇게 품
# comb = combinations(array, 4)
# for i in comb:
# for j in i:
# print(j, end = '')
# print()
for password in combinations(array, n):
#패스워드에 포함된 각 문자를 확인하며 모음의 개수를 세기
v_count = 0
s_count = 0
for i in password:
if i in vowels:
v_count += 1
else:
s_count += 1
#최소 1개의 모음과 최소 2개의 자음이 있는 경우 출력
if v_count >= 1 and s_count >= 2:
print(''.join(password))
print('--------------')
#길이가 1인 모든 암호 조합을 확인
for password in combinations(array, n):
#패스워드에 포함된 각 문자를 확인하며 모음의 개수를 세기
count = 0
for i in password:
if i in vowels:
count += 1
#최소 1개의 모음과 최소 2개의 자음이 있는 경우 출력
if count >= 1 and count <= n-2:
print(''.join(password)) |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ConsentCatalogueAudit'
db.create_table('bhp_consent_consentcatalogue_audit', (
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('user_created', self.gf('django.db.models.fields.CharField')(default='', max_length=250, db_index=True)),
('user_modified', self.gf('django.db.models.fields.CharField')(default='', max_length=250, db_index=True)),
('hostname_created', self.gf('django.db.models.fields.CharField')(default='mac.local', max_length=50, db_index=True, blank=True)),
('hostname_modified', self.gf('django.db.models.fields.CharField')(default='mac.local', max_length=50, db_index=True, blank=True)),
('revision', self.gf('django.db.models.fields.CharField')(max_length=150, null=True, blank=True)),
('_audit_subject_identifier', self.gf('django.db.models.fields.CharField')(max_length=50, null=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=50)),
('content_type_map', self.gf('django.db.models.fields.related.ForeignKey')(related_name='_audit_consentcatalogue', null=True, to=orm['bhp_content_type_map.ContentTypeMap'])),
('consent_type', self.gf('django.db.models.fields.CharField')(max_length=25)),
('version', self.gf('django.db.models.fields.IntegerField')()),
('start_datetime', self.gf('django.db.models.fields.DateTimeField')()),
('end_datetime', self.gf('django.db.models.fields.DateTimeField')()),
('list_for_update', self.gf('django.db.models.fields.BooleanField')(default=True)),
('add_for_app', self.gf('django.db.models.fields.CharField')(max_length=25, null=True, blank=True)),
('_audit_timestamp', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, db_index=True, blank=True)),
('_audit_change_type', self.gf('django.db.models.fields.CharField')(max_length=1)),
('id', self.gf('django.db.models.fields.CharField')(max_length=36)),
('_audit_id', self.gf('django.db.models.fields.CharField')(max_length=36, primary_key=True)),
))
db.send_create_signal('consent', ['ConsentCatalogueAudit'])
# Adding model 'ConsentCatalogue'
db.create_table('bhp_consent_consentcatalogue', (
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('user_created', self.gf('django.db.models.fields.CharField')(default='', max_length=250, db_index=True)),
('user_modified', self.gf('django.db.models.fields.CharField')(default='', max_length=250, db_index=True)),
('hostname_created', self.gf('django.db.models.fields.CharField')(default='mac.local', max_length=50, db_index=True, blank=True)),
('hostname_modified', self.gf('django.db.models.fields.CharField')(default='mac.local', max_length=50, db_index=True, blank=True)),
('id', self.gf('django.db.models.fields.CharField')(max_length=36, primary_key=True)),
('revision', self.gf('django.db.models.fields.CharField')(max_length=150, null=True, blank=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=50)),
('content_type_map', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['bhp_content_type_map.ContentTypeMap'], null=True)),
('consent_type', self.gf('django.db.models.fields.CharField')(max_length=25)),
('version', self.gf('django.db.models.fields.IntegerField')()),
('start_datetime', self.gf('django.db.models.fields.DateTimeField')()),
('end_datetime', self.gf('django.db.models.fields.DateTimeField')()),
('list_for_update', self.gf('django.db.models.fields.BooleanField')(default=True)),
('add_for_app', self.gf('django.db.models.fields.CharField')(max_length=25, null=True, blank=True)),
))
db.send_create_signal('consent', ['ConsentCatalogue'])
# Adding unique constraint on 'ConsentCatalogue', fields ['name', 'version']
db.create_unique('bhp_consent_consentcatalogue', ['name', 'version'])
# Adding model 'AttachedModelAudit'
db.create_table('bhp_consent_attachedmodel_audit', (
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('user_created', self.gf('django.db.models.fields.CharField')(default='', max_length=250, db_index=True)),
('user_modified', self.gf('django.db.models.fields.CharField')(default='', max_length=250, db_index=True)),
('hostname_created', self.gf('django.db.models.fields.CharField')(default='mac.local', max_length=50, db_index=True, blank=True)),
('hostname_modified', self.gf('django.db.models.fields.CharField')(default='mac.local', max_length=50, db_index=True, blank=True)),
('revision', self.gf('django.db.models.fields.CharField')(max_length=150, null=True, blank=True)),
('_audit_subject_identifier', self.gf('django.db.models.fields.CharField')(max_length=50, null=True)),
('consent_catalogue', self.gf('django.db.models.fields.related.ForeignKey')(related_name='_audit_attachedmodel', to=orm['consent.ConsentCatalogue'])),
('content_type_map', self.gf('django.db.models.fields.related.ForeignKey')(related_name='_audit_attachedmodel', to=orm['bhp_content_type_map.ContentTypeMap'])),
('is_active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('_audit_timestamp', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, db_index=True, blank=True)),
('_audit_change_type', self.gf('django.db.models.fields.CharField')(max_length=1)),
('id', self.gf('django.db.models.fields.CharField')(max_length=36)),
('_audit_id', self.gf('django.db.models.fields.CharField')(max_length=36, primary_key=True)),
))
db.send_create_signal('consent', ['AttachedModelAudit'])
# Adding model 'AttachedModel'
db.create_table('bhp_consent_attachedmodel', (
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('user_created', self.gf('django.db.models.fields.CharField')(default='', max_length=250, db_index=True)),
('user_modified', self.gf('django.db.models.fields.CharField')(default='', max_length=250, db_index=True)),
('hostname_created', self.gf('django.db.models.fields.CharField')(default='mac.local', max_length=50, db_index=True, blank=True)),
('hostname_modified', self.gf('django.db.models.fields.CharField')(default='mac.local', max_length=50, db_index=True, blank=True)),
('id', self.gf('django.db.models.fields.CharField')(max_length=36, primary_key=True)),
('revision', self.gf('django.db.models.fields.CharField')(max_length=150, null=True, blank=True)),
('consent_catalogue', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['consent.ConsentCatalogue'])),
('content_type_map', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['bhp_content_type_map.ContentTypeMap'])),
('is_active', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal('consent', ['AttachedModel'])
# Adding unique constraint on 'AttachedModel', fields ['consent_catalogue', 'content_type_map']
db.create_unique('bhp_consent_attachedmodel', ['consent_catalogue_id', 'content_type_map_id'])
def backwards(self, orm):
# Removing unique constraint on 'AttachedModel', fields ['consent_catalogue', 'content_type_map']
db.delete_unique('bhp_consent_attachedmodel', ['consent_catalogue_id', 'content_type_map_id'])
# Removing unique constraint on 'ConsentCatalogue', fields ['name', 'version']
db.delete_unique('bhp_consent_consentcatalogue', ['name', 'version'])
# Deleting model 'ConsentCatalogueAudit'
db.delete_table('bhp_consent_consentcatalogue_audit')
# Deleting model 'ConsentCatalogue'
db.delete_table('bhp_consent_consentcatalogue')
# Deleting model 'AttachedModelAudit'
db.delete_table('bhp_consent_attachedmodel_audit')
# Deleting model 'AttachedModel'
db.delete_table('bhp_consent_attachedmodel')
models = {
'bhp_content_type_map.contenttypemap': {
'Meta': {'ordering': "['name']", 'unique_together': "(['app_label', 'model'],)", 'object_name': 'ContentTypeMap'},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'hostname_created': ('django.db.models.fields.CharField', [], {'default': "'mac.local'", 'max_length': '50', 'db_index': 'True', 'blank': 'True'}),
'hostname_modified': ('django.db.models.fields.CharField', [], {'default': "'mac.local'", 'max_length': '50', 'db_index': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'module_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'user_created': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'db_index': 'True'}),
'user_modified': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'db_index': 'True'})
},
'consent.attachedmodel': {
'Meta': {'unique_together': "(('consent_catalogue', 'content_type_map'),)", 'object_name': 'AttachedModel', 'db_table': "'bhp_consent_attachedmodel'"},
'consent_catalogue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['consent.ConsentCatalogue']"}),
'content_type_map': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['bhp_content_type_map.ContentTypeMap']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'hostname_created': ('django.db.models.fields.CharField', [], {'default': "'mac.local'", 'max_length': '50', 'db_index': 'True', 'blank': 'True'}),
'hostname_modified': ('django.db.models.fields.CharField', [], {'default': "'mac.local'", 'max_length': '50', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'revision': ('django.db.models.fields.CharField', [], {'max_length': '150', 'null': 'True', 'blank': 'True'}),
'user_created': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'db_index': 'True'}),
'user_modified': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'db_index': 'True'})
},
'consent.attachedmodelaudit': {
'Meta': {'ordering': "['-_audit_timestamp']", 'object_name': 'AttachedModelAudit', 'db_table': "'bhp_consent_attachedmodel_audit'"},
'_audit_change_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'_audit_id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'primary_key': 'True'}),
'_audit_subject_identifier': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'_audit_timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'consent_catalogue': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'_audit_attachedmodel'", 'to': "orm['consent.ConsentCatalogue']"}),
'content_type_map': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'_audit_attachedmodel'", 'to': "orm['bhp_content_type_map.ContentTypeMap']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'hostname_created': ('django.db.models.fields.CharField', [], {'default': "'mac.local'", 'max_length': '50', 'db_index': 'True', 'blank': 'True'}),
'hostname_modified': ('django.db.models.fields.CharField', [], {'default': "'mac.local'", 'max_length': '50', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '36'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'revision': ('django.db.models.fields.CharField', [], {'max_length': '150', 'null': 'True', 'blank': 'True'}),
'user_created': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'db_index': 'True'}),
'user_modified': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'db_index': 'True'})
},
'consent.consentcatalogue': {
'Meta': {'ordering': "['name', 'version']", 'unique_together': "(('name', 'version'),)", 'object_name': 'ConsentCatalogue', 'db_table': "'bhp_consent_consentcatalogue'"},
'add_for_app': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}),
'consent_type': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'content_type_map': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['bhp_content_type_map.ContentTypeMap']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'end_datetime': ('django.db.models.fields.DateTimeField', [], {}),
'hostname_created': ('django.db.models.fields.CharField', [], {'default': "'mac.local'", 'max_length': '50', 'db_index': 'True', 'blank': 'True'}),
'hostname_modified': ('django.db.models.fields.CharField', [], {'default': "'mac.local'", 'max_length': '50', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'primary_key': 'True'}),
'list_for_update': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'revision': ('django.db.models.fields.CharField', [], {'max_length': '150', 'null': 'True', 'blank': 'True'}),
'start_datetime': ('django.db.models.fields.DateTimeField', [], {}),
'user_created': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'db_index': 'True'}),
'user_modified': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'db_index': 'True'}),
'version': ('django.db.models.fields.IntegerField', [], {})
},
'consent.consentcatalogueaudit': {
'Meta': {'ordering': "['-_audit_timestamp']", 'object_name': 'ConsentCatalogueAudit', 'db_table': "'bhp_consent_consentcatalogue_audit'"},
'_audit_change_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'_audit_id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'primary_key': 'True'}),
'_audit_subject_identifier': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'_audit_timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'add_for_app': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}),
'consent_type': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'content_type_map': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'_audit_consentcatalogue'", 'null': 'True', 'to': "orm['bhp_content_type_map.ContentTypeMap']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'end_datetime': ('django.db.models.fields.DateTimeField', [], {}),
'hostname_created': ('django.db.models.fields.CharField', [], {'default': "'mac.local'", 'max_length': '50', 'db_index': 'True', 'blank': 'True'}),
'hostname_modified': ('django.db.models.fields.CharField', [], {'default': "'mac.local'", 'max_length': '50', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '36'}),
'list_for_update': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'revision': ('django.db.models.fields.CharField', [], {'max_length': '150', 'null': 'True', 'blank': 'True'}),
'start_datetime': ('django.db.models.fields.DateTimeField', [], {}),
'user_created': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'db_index': 'True'}),
'user_modified': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'db_index': 'True'}),
'version': ('django.db.models.fields.IntegerField', [], {})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['consent'] |
# Generated by Django 2.1.2 on 2018-12-17 20:54
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('pharmacies', '0008_comments_rating'),
]
operations = [
migrations.CreateModel(
name='Other',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=1024)),
],
),
migrations.AddField(
model_name='pharmacy',
name='owner',
field=models.OneToOneField(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
# problem [1204] : 최빈수 구하기
# 고등학교 1000명의 수학성적을 토대로 최빈수 구하기
# 학생의 수는 1000명이며, 각 학생의 점수는 0점 이상 100점 이하의 값이다.
test_cnt =int(input())
for i in range(test_cnt):
test_num = int(input())
score_list = list(map(int,input().split()))
score_dict = dict()
result = list()
for score in score_list:
if (score in score_dict) :
score_dict[score] += 1
else :
score_dict[score] = 1
for score,count in score_dict.items():
if count == max(score_dict.values()) :
result.append(score)
print('#{} {}'.format(test_num,max(result))) |
import modulex
modulex.panikimanlinollu()
print("jithu is ",modulex.edava)
x=modulex.Student('jithu','xyz')
x.display()
|
from functools import wraps
def authenticate_user(func):
@wraps(func)
def wrapper(request, *args, **kwargs):
user = request.user
if not user.is_authenticated:
return restful.un_signup(message='请先登录')
return func(request, *args, **kwargs)
return wrapper
|
import envi.archs.amd64 as e_amd64
import envi.archs.i386.renderer as e_i386_rend
class Amd64OpcodeRenderer(e_i386_rend.i386OpcodeRenderer):
def __init__(self):
e_i386_rend.i386OpcodeRenderer.__init__(self)
self.arch = e_amd64.Amd64Module()
self.rctx = e_amd64.Amd64RegisterContext()
|
import pandas as pd
import numpy as np
dataset_data = pd.read_csv('/home/valentin/human_tracker_ws/FERIT_dataset/kinect_k07_1/results/k07_stamps_annotations.csv').to_numpy()
method_data = pd.read_csv('/home/valentin/human_tracker_ws/FERIT_dataset/kinect_k07_1/results/k07_method21.csv').to_numpy()
# print(method_data)
method_full_data = dataset_data.copy()
counter = 0
for i in range(method_full_data.shape[0]):
row_dataset = method_full_data[i]
for row_method in method_data:
if abs(row_dataset[1] - row_method[1]) < 0.00001:
counter += 1
method_full_data[i] = row_method[0:2]
break
print(counter)
pd.DataFrame(method_full_data, columns=["PersoneNumber", "TIME"]).to_csv("/home/valentin/human_tracker_ws/FERIT_dataset/kinect_k07_1/results/k07_full_method21_data.csv", index=None)
|
from django.conf.urls import url
from django.views.generic.base import RedirectView
from . import views, accountViews, queries
app_name = 'ASUi3dea'
urlpatterns = [
#accountViews.py
url(r'^$', RedirectView.as_view(url='login', permanent=False), name='login'),
url(r'^login/$', accountViews.login, name='login' ),
url(r'^logout/$', accountViews.logout, name='logout' ),
url(r'^invalid/$', accountViews.invalid_login, name='invalid_login' ),
url(r'^auth/$', accountViews.auth_view, name='auth_view' ),
#views.py
url(r'^authUser/$', views.loggedin, name='loggedin' ),
url(r'^authUser/choropleth/(?P<data_type>.*)/$', views.choropleth_data, name='choropleth_data' ),
url(r'^basicUser/$', views.loggedin_basic, name='loggedin_basic'),
url(r'^registerDevice/$', views.registerDevice, name='registerDevice'),
url(r'^(?P<inverter_pk>[a-z0-9]+-[0-9]+)/$', views.detail, name='detail'),
#queries.py
url(r'^save_controls/$', queries.save_controls, name='save_controls' ),
url(r'^save_data/$', queries.save_data, name='save_data' ),
url(r'^rabbitTest/$', queries.rabbitTest, name='rabbitTest' ),
url(r'^get_pi_data/$', queries.get_pi_data, name='get_pi_data' ),
url(r'^register_pi/$', queries.register_pi, name='register_pi' ),
url(r'^register_inverter/$', queries.register_inverter, name='register_inverter' ),
url(r'^update/$', queries.recieve_data_to_save, name='recieve_data_to_save'),
url(r'^create_group/$', queries.create_group, name='create_group'),
url(r'^add_to_group/$', queries.add_to_group, name='add_to_group'),
url(r'^recieve_data/$', queries.recieve_data_to_save, name='recieve_data'),
url(r'^(?P<inverter_pk>[a-z0-9]+-[0-9]+)/change_invert_name/$', queries.change_invert_name, name='change_invert_name'),
url(r'^(?P<inverter_pk>[a-z0-9]+-[0-9]+)/single_overview/$', queries.single_overview, name='single_overview' ),
url(r'^(?P<inverter_pk>[a-z0-9]+-[0-9]+)/power_query/$', queries.power_query, name='power_query' ),
url(r'^(?P<inverter_pk>[a-z0-9]+-[0-9]+)/current_query/$', queries.current_query, name='current_query' ),
url(r'^(?P<inverter_pk>[a-z0-9]+-[0-9]+)/voltage_query/$', queries.voltage_query, name='voltage_query' ),
url(r'^(?P<inverter_pk>[a-z0-9]+-[0-9]+)/efficiency_query/$', queries.efficiency_query, name='efficiency_query' ),
url(r'^(?P<inverter_pk>[a-z0-9]+-[0-9]+)/energy_query/$', queries.energy_query, name='energy_query' ),
url(r'^(?P<inverter_pk>[a-z0-9]+-[0-9]+)/temperature_query/$', queries.temperature_query, name='temperature_query' ),
url(r'^(?P<inverter_pk>[a-z0-9]+-[0-9]+)/update/$', queries.pull_data_from_inverter, name='pull_data_from_inverter'),
url(r'^(?P<inverter_pk>[a-z0-9]+-[0-9]+)/(?P<data_set>.*)/$', queries.get_inverter_data, name='get_inverter_data')
]
|
from JumpScale9 import j
class JSBase:
def __init__(self):
# self.__j = None
self.__logger = None
@property
def j(self):
if self.__logger == None:
self.__logger = j.logger.get()
return self.__logger
# @property
# def j(self):
# if self.__j==None:
# from JumpScale9 import jumpScale2
# self.__j= JumpScale2()
# return self.__j
|
#!/usr/bin/python
# -*- coding:utf8 -*-
import numpy as np
from scipy.integrate import odeint
import sympy as sp
import math as mt
########################################################################
################ ########## SEIR_DIT CLASS ########## ##################
########################################################################
class SEIR_DIT():
def __init__(self):
self.dummy=True
def set_params(self,beta,theta,phi,phi_,
beta_0=1.3743,beta_1=0.019899665,beta_H=0.01,r=0.65,
omega=1/4.6, gamma_M=1/2.1, sigma_C=1/3., sigma_CSI=1/4.1, sigma_CT=1/7.1,
gamma_HR=1/9., nu=1/14.8, gamma_R=1/3., sigma_HD=1/9., sigma_UD=1/11.1,
delta_M=0.9790248, delta_HR=0.6795883, delta_HD=0.1068193, delta_UR=0.1388350,
xi_PCR=0.85, xi_AG=0.75, T_PCR=2., T_AG=1., psi=3.,
alpha=1/1., t0_DIT=184, n=15.2, nt=0, rho=1/12., q=0.5):
self.beta_0=beta_0 #Transmission rate outside household
self.beta_1=beta_1 #Transmission rate within household
self.beta_H=beta_H #Transmission rate within hospitals
self.r=r #Rate of self-isolation for case infections
self.beta=beta #Transmission rate
self.omega=omega #Mean incubation period
self.gamma_M=gamma_M #Mean duration of mild infection
self.sigma_C=sigma_C #Mean time upon self-isolation for severe infections
self.sigma_CSI=sigma_CSI #Mean duration of isolation for severe infections prior hospitalization
self.sigma_CT=sigma_CT #Mean duration of isolation for traced contacts with severe infection prior hospitalization
self.gamma_HR=gamma_HR #Mean duration of hospitalization for non-critical cases if survive
self.nu=nu #Mean duration in ICU if survive
self.gamma_R=gamma_R #Mean duration of stepdown post ICU
self.sigma_HD=sigma_HD #Mean duration of hospitalization for non-critical cases if die
self.sigma_UD=sigma_UD #Mean duration in ICU if die
self.delta_M=delta_M #Probability of mild infections
self.delta_HR=delta_HR #Probability of recovery for hospitalized infections requiring a general hospital bed
self.delta_HD=delta_HD #Probability of dying for hospitalized infections requiring a general hospital bed
self.delta_UR=delta_UR #Probability of recovery for hospitalized infections requiring an ICU bed
self.delta_UD=(1. #Probability of dying for hospitalized infections requiring an ICU bed
-delta_HR-delta_HD-delta_UR)
self.xi_PCR=xi_PCR #Sensitivity of RT-PCR test
self.xi_AG=xi_AG #Sensitivity of Antigen test
self.T_PCR=T_PCR #Mean time to results of RT-PCR test
self.T_AG=T_AG #Mean time to results of Antigen test
self.psi=psi #Proportion of non-infected suspected cases
self.alpha=alpha #Mean time between onset of symptoms and detection
self.n=float(n) #Averge number of contacts
self.nt=float(nt) #Traced contacts of suspected index cases
self.rho=rho #Mean duration of isolation period
self.q=q #Rate of accomplishment of isolation DIT strategy
self.t0_DIT=t0_DIT #Initial time of DIT strategy
self.theta=theta #Detection rate
self.phi=phi #Tracing rate of exposed contacts
self.phi_=phi_ #Tracing rate of non-exposed contacts
def set_initial(self, N0, E0=0, ET0=0, IM0=0, IMD0=0, IMT0=0,
IC0=0, ICT0=0, ICSI0=0, IHR0=0, IUR0=0, IHD0=0, IUD0=0,
IR0=0, R0=0, D0=0,
QIMD10=0, QET0=0, QIMT0=0, QIMT10=0, QICT0=0, QS10=0, QS20=0):
self.N0=N0
self.E0=E0
self.ET0=ET0
self.IM0=IM0
self.IMD0=IMD0
self.IMT0=IMT0
self.IC0=IC0
self.ICT0=ICT0
self.ICSI0=ICSI0
self.IHR0=IHR0
self.IUR0=IUR0
self.IHD0=IHD0
self.IUD0=IUD0
self.IR0=IR0
self.R0=R0
self.D0=D0
self.QIMD10=QIMD10
self.QET0=QET0
self.QIMT0=QIMT0
self.QIMT10=QIMT10
self.QICT0=QICT0
self.QS10=QS10
self.QS20=QS20
self.S0= (self.N0 - self.E0 - self.ET0 - self.IM0 - self.IMD0 - self.IMT0
- self.IC0 - self.ICT0 - self.ICSI0 - self.IHR0 - self.IUR0 - self.IHD0
- self.IUD0 - self.IR0 - self.R0 - self.D0
- self. QIMD10 - self.QET0 - self. QIMT0 - self.QIMT10 - self. QICT0 - self.QS10 - self.QS20)
def ODES(self,y,t):
S, E, ET, IM, IMD, IMT, IC, ICT, ICSI, IHR, IUR, IHD, IUD, IR, R, D, N, QIMD1, QET, QIMT, QIMT1, QICT, QS1, QS2 = y
beta=self.beta(t)
theta=self.theta(t,IM)
b=beta/self.n
dSdt = (-beta*S/float(N)*(IM+IC+(1.-self.q)*(IMD+IMT+ICT)+(1.-self.r)*ICSI)
-self.beta_H*S/float(N)*(IHR+IUR+IHD+IUD+IR)
-(self.nt+1.)*(self.psi-1.)*theta*self.alpha*S/float(N)*IM
-self.n*(1.-b)*self.phi_(t)*theta*S/float(N)*IM
+ 1./self.T_AG*QS1
+1./(1./self.omega + self.T_PCR)*QS2
)
dEdt =(beta*S/float(N)*((1.-self.phi(t)*theta)*IM+IC
+(1.-self.r)*ICSI
+(1-self.q)*(IMD+IMT+ICT))
+self.beta_H*S/float(N)*(IHR+IUR+IHD+IUD+IR)
-self.omega*E
)
dETdt = beta*self.phi(t)*theta*IM*S/float(N) - self.omega*ET
dIMdt = self.delta_M*self.omega*E - theta*self.alpha*IM - (1.-theta)*self.gamma_M*IM
dIMDdt = theta*self.alpha*IM - 1./(1./self.gamma_M - 1./self.alpha)*IMD
dIMTdt = self.delta_M*self.omega*ET - self.gamma_M*IMT
dICdt = (1.-self.delta_M)*self.omega*E - self.sigma_C*IC
dICSIdt = self.sigma_C*IC - self.sigma_CSI*ICSI
dICTdt = (1.-self.delta_M)*self.omega*ET - self.sigma_CT*ICT
dIHRdt = self.delta_HR*(self.sigma_CSI*ICSI+self.sigma_CT*ICT) - self.gamma_HR*IHR
dIURdt = self.delta_UR*(self.sigma_CSI*ICSI+self.sigma_CT*ICT) - self.nu*IUR
dIHDdt = self.delta_HD*(self.sigma_CSI*ICSI+self.sigma_CT*ICT) - self.sigma_HD*IHD
dIUDdt = self.delta_UD*(self.sigma_CSI*ICSI+self.sigma_CT*ICT) - self.sigma_UD*IUD
dIRdt = self.nu*IUR - self.gamma_R*IR
dRdt = (self.gamma_R*IR
+self.gamma_HR*IHR
+(1.-theta)*self.gamma_M*IM
+1./(1./self.gamma_M-1./self.alpha)*IMD
+self.gamma_M*IMT
)
dDdt = self.sigma_HD*IHD + self.sigma_UD*IUD
dNdt = -self.sigma_HD*IHD - self.sigma_UD*IUD
#Isolation of index cases and contacts
dQIMD1dt = self.xi_AG*(1./self.T_AG)*IMD - self.rho*QIMD1
dQETdt = beta*self.phi(t)*theta*IM*S/float(N) - self.omega*QET
dQIMTdt = self.delta_M*self.omega*QET - 1./float(self.T_PCR)*QIMT
dQIMT1dt = (self.xi_PCR*1./float(self.T_PCR)*QIMT
- 1./((1./self.rho)
-(1./self.omega)
-self.T_PCR)*QIMT1)
dQICTdt = (1.-self.delta_M)*self.omega*QET - self.sigma_CT*QICT
dQS1dt= ((self.psi-1.)*(self.nt+1.)*self.alpha*theta*IM*S/float(N)
- 1./self.T_AG*QS1)
dQS2dt = (self.n*(1.-b)*self.phi_(t)*theta*IM*S/float(N)
- 1./(1./self.omega + self.T_PCR)*QS2)
return [dSdt, dEdt, dETdt, dIMdt, dIMDdt, dIMTdt, dICdt, dICTdt, dICSIdt,
dIHRdt, dIURdt, dIHDdt, dIUDdt, dIRdt, dRdt, dDdt, dNdt,
dQIMD1dt ,dQETdt, dQIMTdt, dQIMT1dt, dQICTdt, dQS1dt, dQS2dt]
def solve(self,t0,tf,dt):
self.t0=t0
self.tf=tf
self.dt_=1/dt
y0= [self.S0, self.E0, self.ET0, self.IM0, self.IMD0, self.IMT0,
self.IC0, self.ICT0, self.ICSI0, self.IHR0, self.IUR0,
self.IHD0, self.IUD0, self.IR0, self.R0, self.D0, self.N0,
self.QIMD10, self.QET0, self.QIMT0, self.QIMT10, self.QICT0, self.QS10, self.QS20]
t= np.linspace(self.t0, self.tf, (self.tf-self.t0)*self.dt_+1)
self.t_=t
solution= odeint(self.ODES,y0,t)
self.S=solution.T[0]
self.E=solution.T[1]
self.ET=solution.T[2]
self.IM=solution.T[3]
self.IMD=solution.T[4]
self.IMT=solution.T[5]
self.IC=solution.T[6]
self.ICT=solution.T[7]
self.ICSI=solution.T[8]
self.IHR=solution.T[9]
self.IUR=solution.T[10]
self.IHD=solution.T[11]
self.IUD=solution.T[12]
self.IR=solution.T[13]
self.R=solution.T[14]
self.D=solution.T[15]
self.N=solution.T[16]
self.QIMD1=solution.T[17]
self.QET=solution.T[18]
self.QIMT=solution.T[19]
self.QIMT1=solution.T[20]
self.QICT=solution.T[21]
self.QS1=solution.T[22]
self.QS2=solution.T[23]
def count_tracing_isolation(self):
self.index_cases=[]
self.isolated_total=[]
self.isolated_contacts=[]
self.traced_contacts=[]
for i in range(len(self.t_)):
theta=self.theta(self.t_[i],self.IM[i])
beta=self.beta(self.t_[i])
b=beta/self.n
self.index_cases.append(theta*self.IM[i]*self.psi)
self.isolated_total.append((self.QIMD1[i]+self.QET[i]+
self.QIMT[i]+self.QIMT1[i]+
self.QICT[i]+
self.QS1[i]+self.QS2[i])*self.q)
self.isolated_contacts.append((self.n*(1-b)*self.phi_(self.t_[i])*theta
+(self.psi-1.)*(self.nt)*self.alpha*theta
+ beta*self.phi(self.t_[i])*theta)*self.S[i]*self.IM[i]/float(self.N[i])*self.q)
self.traced_contacts.append((self.n*(1-b)*self.phi_(self.t_[i])*theta
+(self.psi-1.)*(self.nt)*self.alpha*theta
+ beta*self.phi(self.t_[i])*theta)*self.S[i]*self.IM[i]/float(self.N[i]))
def count_tests(self):
self.N_PCR=[]
self.N_AG=[]
self.pos_PCR=[]
self.pos_AG=[]
for i in range(len(self.t_)):
theta=self.theta(self.t_[i],self.IM[i])
#Before DIT, the level of testing is unknown
if self.t_[i]<self.t0_DIT:
self.N_PCR.append(0)
self.pos_PCR.append(0)
self.N_AG.append(0)
self.pos_AG.append(0)
#Testing starts with DIT strategy
if self.t_[i]>=self.t0_DIT:
beta=self.beta(self.t_[i])
b=beta/self.n
self.N_PCR.append(self.omega*self.QET[i]
+self.n*(1-b)*self.phi_(self.t_[i])*theta*self.S[i]*self.IM[i]/float(self.N[i]))
self.pos_PCR.append(self.omega*self.QET[i]*self.xi_PCR)
self.N_AG.append(self.sigma_CSI*self.ICSI[i]*self.psi+theta*self.alpha*self.IM[i]*self.psi)
self.pos_AG.append((self.sigma_CSI*self.ICSI[i]+theta*self.alpha*self.IM[i])*self.xi_AG)
self.R_pos_PCR=np.array(self.pos_PCR)/np.array(self.N_PCR)
self.R_pos_AG=np.array(self.pos_AG)/np.array(self.N_AG)
def count_icu(self):
self.icu_occupancy=np.array(self.IUR)+np.array(self.IUD)
def count_daily_deaths(self):
self.daily_deaths=[]
for i in range(len(self.t_)):
self.daily_deaths.append(self.sigma_HD*self.IHD[i] + self.sigma_UD*self.IUD[i])
def calculate_incidence(self):
self.incidence=[]
for i in range(len(self.t_)):
beta=self.beta(self.t_[i])
self.incidence.append(
beta*self.S[i]/float(self.N[i])*(self.IM[i]+self.IC[i]
+(1.-self.q)*(self.IMD[i]+self.IMT[i]+self.ICT[i])
+(1.-self.r)*self.ICSI[i])
+self.beta_H*self.S[i]/float(self.N[i])*(self.IHR[i]+self.IUR[i]+self.IHD[i]+self.IUD[i]+self.IR[i]))
def calculate_prevalence(self):
self.prevalence= (np.array(self.IM)
+np.array(self.IMD)
+np.array(self.IMT)
+np.array(self.IC)
+np.array(self.ICT)
+np.array(self.ICSI)
+np.array(self.IHR)
+np.array(self.IUR)
+np.array(self.IHD)
+np.array(self.IUD)
+np.array(self.IR))
def calculate_attack_rate(self):
self.attack_rate = np.array(self.R)/self.N0
def calculate_rt(self):
omega=self.omega
gamma_M=self.gamma_M
sigma_C=self.sigma_C
sigma_CSI=self.sigma_CSI
sigma_CT=self.sigma_CT
gamma_HR=self.gamma_HR
nu=self.nu
gamma_R=self.gamma_R
sigma_HD=self.sigma_HD
sigma_UD=self.sigma_UD
delta_M = self.delta_M
delta_HR = self.delta_HR
delta_UR = self.delta_UR
delta_HD = self.delta_HD
delta_UD = self.delta_UD
alpha=self.alpha
beta_H= self.beta_H
q=self.q
r=self.r
self.rt=[]
for i in range(len(self.t_)):
beta=self.beta(self.t_[i])
theta=self.theta(self.t_[i],self.IM[i])
phi=self.phi(self.t_[i])
K11= (beta*((1.-phi*theta)*delta_M/(alpha*theta+gamma_M*(1.-theta))
+(1.-q)*alpha*delta_M*theta*(1./gamma_M-1./alpha)/(alpha*theta+gamma_M*(1.-theta))
+(1.-delta_M)/sigma_C
+(1.-r)*(1.-delta_M)/sigma_CSI)
+beta_H*(1-delta_M)*(delta_HR/gamma_HR
+delta_UR/nu
+delta_HD/sigma_HD
+delta_UD/sigma_UD
+delta_UR/gamma_R))
K12= (beta*((1.-q)*delta_M/gamma_M
+(1.-q)*(1.-delta_M)/sigma_CT)
+beta_H*(1-delta_M)*(delta_HR/gamma_HR
+delta_UR/nu
+delta_HD/sigma_HD
+delta_UD/sigma_UD
+delta_UR/gamma_R))
K21= beta*phi*theta*delta_M/(alpha*theta+gamma_M*(1.-theta))
lmbd1=(K11 + mt.sqrt(K11**2 + 4*K12*K21))/2.
reff =lmbd1*self.S[i]/float(self.N[i])
self.rt.append(reff)
def calculate_beta(self,Rt,S,N):
omega=self.omega
gamma_M=self.gamma_M
sigma_C=self.sigma_C
sigma_CSI=self.sigma_CSI
sigma_CT=self.sigma_CT
gamma_HR=self.gamma_HR
nu=self.nu
gamma_R=self.gamma_R
sigma_HD=self.sigma_HD
sigma_UD=self.sigma_UD
delta_M = self.delta_M
delta_HR = self.delta_HR
delta_UR = self.delta_UR
delta_HD = self.delta_HD
delta_UD = self.delta_UD
alpha=self.alpha
theta=self.theta
phi=self.phi
beta_H= self.beta_H
beta_ = sp.symbols('beta_')
q=self.q
r=self.r
self.beta_solution=[]
#Dominant eigen-value
K11_= (beta_*((1.-phi*theta)*delta_M/(alpha*theta+gamma_M*(1.-theta))
+(1.-q)*alpha*delta_M*theta*(1./gamma_M-1./alpha)/(alpha*theta+gamma_M*(1.-theta))
+(1.-delta_M)/sigma_C
+(1.-r)*(1.-delta_M)/sigma_CSI)
+beta_H*(1-delta_M)*(delta_HR/gamma_HR
+delta_UR/nu
+delta_HD/sigma_HD
+delta_UD/sigma_UD
+delta_UR/gamma_R))
K12_= (beta_*((1.-q)*delta_M/gamma_M
+(1.-q)*(1.-delta_M)/sigma_CT)
+beta_H*(1-delta_M)*(delta_HR/gamma_HR
+delta_UR/nu
+delta_HD/sigma_HD
+delta_UD/sigma_UD
+delta_UR/gamma_R))
K21_= beta_*phi*theta*delta_M/(alpha*theta+gamma_M*(1.-theta))
lmbd1_=(K11_ + sp.sqrt(K11_**2 + 4*K12_*K21_))/2.
if len(Rt)!=len(S) or len(Rt)!=len(N) or len(S)!=len(N):
print 'Todos los vectores deben tener el mismo largo'
else:
for i in range(len(Rt)):
Rt_beta=lmbd1_*S[i]/N[i]
expr= Rt_beta - Rt[i]
sol= sp.solve(expr)
self.beta_solution.append(max(sol))
del(beta_)
|
import sys
import re
from PySide.QtGui import *
from EntryForm import *
class EntryApplication(QMainWindow, Ui_MainWindow):
states = ["AK", "AL", "AZ", "AR", "CA", "CO", "CT", "DE", "FL", "GA", "HI", "ID", "IL", "IN", "IA", "KS", "KY",
"LA", "ME", "MD", "MA", "MI", "MN", "MS", "MO", "MT", "NE", "NV", "NH", "NJ", "NM", "NY", "NC", "ND",
"OH", "OK", "OR", "PA", "RI", "SC", "SD", "TN", "TX", "UT", "VT", "VA", "WA", "WV", "WI", "WY"]
def __init__(self, parent=None):
super(EntryApplication, self).__init__(parent)
self.setupUi(self)
self.edits = [self.txtFirstName, self.txtLastName, self.txtAddress, self.txtCity, self.txtState, self.txtZip, self.txtEmail]
self.btnClear.clicked.connect(self.clearScreen)
for field in self.edits:
field.textChanged.connect(self.enableBtn)
self.btnSave.clicked.connect(self.getData)
self.btnLoad.clicked.connect(self.loadData)
def loadFromXmlFile(self, filePath):
"""
Handling the loading of the data from the given file name. This method should only be invoked by the
'loadData' method.
"""
with open(filePath, 'r') as f:
data = f.readlines()
del data[0:2]
del data[len(data)-1]
itemlist = []
for item in data:
item = item.split('>')[1].split('<')[0]
itemlist.append(item)
self.txtFirstName.setText(itemlist[0])
self.txtLastName.setText(itemlist[1])
self.txtAddress.setText(itemlist[2])
self.txtCity.setText(itemlist[3])
self.txtState.setText(itemlist[4])
self.txtZip.setText(itemlist[5])
self.txtEmail.setText(itemlist[6])
def loadData(self):
"""
Obtain a file name from a file dialog, and pass it on to the loading method. This is to facilitate automated
testing. Invoke this method when clicking on the 'load' button.
*** DO NOT MODIFY THIS METHOD, OR THE TEST WILL NOT PASS! ***
"""
filePath, _ = QFileDialog.getOpenFileName(self, caption='Open XML file ...', filter="XML files (*.xml)")
if not filePath:
return
self.loadFromXmlFile(filePath)
def clearScreen(self):
for field in self.edits:
field.setText('')
self.btnSave.setEnabled(False)
self.btnLoad.setEnabled(True)
self.lblError.setText('')
def enableBtn(self):
self.btnLoad.setEnabled(False)
self.btnSave.setEnabled(True)
def getData(self):
self.clearError = True
if self.txtFirstName.text() == '':
self.lblError.setText('Error: Please enter a first name')
self.clearError = False
else:
self.fname = self.txtFirstName.text()
if self.txtLastName.text() == '':
self.lblError.setText('Error: Please enter a last name')
self.clearError = False
else:
self.lname = self.txtLastName.text()
if self.txtAddress.text() == '':
self.lblError.setText('Error: Please enter an address')
self.clearError = False
else:
self.address = self.txtAddress.text()
if self.txtCity.text() == '':
self.lblError.setText('Error: Please enter a city')
self.clearError = False
else:
self.city = self.txtCity.text()
if self.txtState.text() == '':
self.lblError.setText('Error: Please enter a state')
self.clearError = False
elif self.txtState.text() not in self.states:
self.lblError.setText('Error: State is not valid!')
self.clearError = False
else:
self.state = self.txtState.text()
testz = re.compile(r'[0-9]{5}')
checkz = re.match(testz,self.txtZip.text())
if self.txtZip.text() == '':
self.lblError.setText('Error: Please enter a Zip code')
self.clearError = False
elif not checkz:
self.lblError.setText('Error: Zip code is not valid!')
self.clearError = False
else:
self.zip = self.txtZip.text()
teste = re.compile(r'\w+@\w+\.\w+')
checke = re.match(teste,self.txtEmail.text())
if self.txtEmail.text() == '':
self.lblError.setText('Error: Please enter an email address')
self.clearError = False
elif not checke:
self.lblError.setText('Error: Email is not valid!')
self.clearError = False
else:
self.email = self.txtEmail.text()
if self.clearError:
self.lblError.setText('')
with open('target.xml', 'w') as f:
f.write('<?xml version="1.0" encoding="UTF-8"?>\n<user>\n\t<FirstName>{}</FirstName>\n\t<LastName>{}</LastName>'
'\n\t<Address>{}</Address>\n\t<City>{}</City>\n\t<State>{}</State>\n\t<ZIP>{}</ZIP>\n\t<Email>{}'
'</Email>\n</user>\n'.format(self.fname,self.lname,self.address,self.city,self.state,self.zip,self.email))
if __name__ == "__main__":
currentApp = QApplication(sys.argv)
currentForm = EntryApplication()
currentForm.show()
currentApp.exec_()
|
import serial
import speech_recognition as sr
def soz():
r = sr.Recognizer()
with sr.Microphone() as source:
print("Скажите что-нибудь")
audio = r.listen(source)
try:
return(r.recognize_google(audio, language="ru-RU"))
except sr.UnknownValueError:
print("Робот не расслышал фразу")
except sr.RequestError as e:
print("Ошибка сервиса; {0}".format(e))
Data = serial.Serial('com6',9600)
def led_k():
Data.write(bytes('1',"ascii"))
def led_j():
Data.write(bytes('2',"ascii"))
def led_s():
Data.write(bytes('3',"ascii"))
while True:
sozdor=str(soz())
print(sozdor)
if(sozdor=='красный'):
led_k()
if(sozdor=='зелёный' or sozdor=='зелeный' or sozdor=='зелённый'):
led_j()
if(sozdor=='жёлтый'):
led_s()
|
from flask import Flask, render_template, request
from Flaskproject.models import *
@app.route('/user/registration/', methods=['GET','POST'])
def register_page():
if request.method=='POST':
user = Userinfo(name=request.form['nm'],
address=request.form['adr'],
contact=request.form['con'],
education=request.form['edu'],
gender=request.form['gen']
)
login = Logindetails(username=request.form['username'],
password=request.form['password'])
if request.form['password']!=request.form['cpwd']:
return render_template('register.html', userinfo=user.dummy_user(), logininfo=login.dummy_login())
db.session.add(user)
db.session.commit()
login.uid=user.id
db.session.add(login)
db.session.commit()
return render_template('login.html', msg='User Created Successfully...')
else:
return render_template('register.html', user=Userinfo.dummy_user(), login=Logindetails.dummy_login())
if __name__ == '__main__':
app.run(debug=True) |
class Solution:
def isPossible(self, n: int, edges: List[List[int]]) -> bool:
es, nc, on = set(), defaultdict(int), []
for a,b in edges:
nc[a], nc[b] = nc[a]+1, nc[b]+1
es.add((a,b)), es.add((b,a))
for k in nc:
if nc[k] % 2 !=0: on.append(k)
if len(on) > 4: return False
while on:
fnd=False
for a,b in combinations(on,2):
if (a,b) not in es:
fnd=True
break
if fnd: _,__=on.remove(a),on.remove(b)
else: break
if not on: return True
for i in range(1,n+1):
if nc[i] % 2 != 0: continue
while on:
fnd=False
for a,b in combinations(on,2):
if (a,i) not in es and (b,i) not in es:
fnd=True
break
if fnd: _,__=on.remove(a),on.remove(b)
else: break
if not on: return True
return False
|
# Copyright 2015 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Methods for Pretty Tensor."""
import collections
import tensorflow as tf
from prettytensor import functions
from prettytensor import layers
from prettytensor import pretty_tensor_class as prettytensor
from prettytensor.pretty_tensor_class import DIM_REST
from prettytensor.pretty_tensor_class import DIM_SAME
from prettytensor.pretty_tensor_class import Phase
from prettytensor.pretty_tensor_class import PROVIDED
@prettytensor.Register
def reshape(input_layer, shape_spec):
"""Reshapes this tensor to the given spec.
If a shape description is specified, resolve it as follows:
1. DIM_SAME will use the corresponding value from the current shape.
2. DIM_REST will put all the remaining values in the current shape.
Only one DIM_REST is allowed and it must be the last element.
3. An integer will be used as is.
A compact syntax is also supported for setting shapes. If the new shape is
only composed of DIM_SAME, DIM_REST and single digit integers, then a string
can be passed in. Integers larger than 9 must be passed in as part of a
sequence.
Examples (assuming a rank 4 Tensor):
1. Collapse to just a batch dimension: [DIM_SAME, 1] or '_1'.
2. Flatten to a batch dimension: [DIM_SAME, DIM_REST] or '_*'.
3. Generate a single value along the depth dimension:
[DIM_SAME, DIM_SAME, DIM_SAME, 1] or '___1'.
4. Generate length 11 tensors along the depth:
[DIM_SAME, DIM_SAME, DIM_SAME, 11]. The compact syntax is not supported
in this case.
Args:
input_layer: The Pretty Tensor object, supplied.
shape_spec: The spec for the new shape.
Returns:
A LayerWrapper with the reshaped tensor.
Raises:
ValueError: If there are two many unknown dimensions or the shape_spec is
not valid (e.g. requries out of range DIM_SAME or has DIM_REST in an
illegal spot.)
"""
unknowns = 0
old_shape = input_layer.shape
new_shape = []
for i in range(len(shape_spec)):
s = shape_spec[i]
if s == DIM_SAME:
if i >= len(old_shape):
raise ValueError('%d exceeds the head_shape' % i)
if old_shape[i] is None:
new_shape.append(-1)
unknowns += 1
else:
new_shape.append(old_shape[i])
elif s == DIM_REST:
if i != len(shape_spec) - 1:
raise ValueError('DIM_REST must be at the end.')
size = 1
for j in range(i, len(old_shape)):
if old_shape[j] is not None:
size *= old_shape[j]
else:
size = -1
unknowns += 1
break
new_shape.append(size)
elif s is None or s == -1:
new_shape.append(-1)
unknowns += 1
else:
new_shape.append(int(s))
if unknowns > 1:
raise ValueError('Invalid shape, too many unknowns: %s' % new_shape)
return input_layer.with_tensor(tf.reshape(input_layer, new_shape))
@prettytensor.Register
def flatten(input_layer, preserve_batch=True):
"""Flattens this.
If preserve_batch is True, the result is rank 2 and the first dim (batch) is
unchanged. Otherwise the result is rank 1.
Args:
input_layer: The Pretty Tensor object, supplied.
preserve_batch: If True (the default), then preserve the first dimension.
Returns:
A LayerWrapper with the flattened tensor.
"""
if preserve_batch:
return reshape(input_layer, [DIM_SAME, DIM_REST])
else:
return reshape(input_layer, [DIM_REST])
@prettytensor.Register
def stop_gradient(input_layer):
"""Cuts off the gradient at this point.
This works on both sequence and regular Pretty Tensors.
Args:
input_layer: The input.
Returns:
A new Pretty Tensor of the same type with stop_gradient applied.
"""
if input_layer.is_sequence():
result = [tf.stop_gradient(t) for t in input_layer.sequence]
return input_layer.with_sequence(result)
else:
return tf.stop_gradient(input_layer)
@prettytensor.Register(assign_defaults='phase')
def dropout(input_layer, keep_prob, phase=Phase.train, name=PROVIDED):
"""Aplies dropout if this is in the train phase."""
if phase == Phase.train:
return tf.nn.dropout(input_layer, keep_prob, name=name)
else:
return input_layer
# TODO(eiderman): Give a good name for this function: Maybe InnerProductIsh ?
# pylint: disable=invalid-name
@prettytensor.Register(assign_defaults=('l2loss', 'stddev'))
class diagonal_matrix_mul(prettytensor.VarStoreMethod):
"""Diagonal Matrix Multiplication."""
def __call__(self, input_layer, init=None, stddev=None, l2loss=None):
"""Performs a diagonal matrix multiplication with a learned vector.
This creates the parameter vector.
Args:
input_layer: The input_layer.
init: An optional initialization. If not specified, uses Xavier
initialization.
stddev: A standard deviation to use in parameter initialization.
l2loss: An l2 weight decay to apply.
Returns:
A Pretty Tensor handle to the layer.
Raises:
ValueError: if the head_shape is not rank 2 or the number of input nodes
(second dim) is not known.
"""
size = input_layer.shape[-1]
if init is None:
if stddev is None:
init = layers.xavier_init(size, 0)
elif stddev:
init = tf.truncated_normal_initializer(stddev=stddev)
else:
init = tf.zeros_initializer
param = self.variable('weights', [size], init)
layers.add_l2loss(input_layer.bookkeeper, param, l2loss)
return input_layer * param
# pylint: enable=invalid-name
# pylint: disable=invalid-name
@prettytensor.Register(assign_defaults=('activation_fn', 'l2loss', 'stddev'))
class fully_connected(prettytensor.VarStoreMethod):
def __call__(self,
input_layer,
size,
name=PROVIDED,
activation_fn=None,
l2loss=None,
init=None,
stddev=None,
bias=True,
bias_init=0.):
"""Adds the parameters for a fully connected layer and returns a tensor.
The current head must be a rank 2 Tensor.
Args:
input_layer: The Pretty Tensor object, supplied.
size: The number of neurons
name: The name for this operation is also used to create/find the
parameter variables.
activation_fn: A tuple of (activation_function, extra_parameters). Any
function that takes a tensor as its first argument can be used. More
common functions will have summaries added (e.g. relu).
l2loss: Set to a value greater than 0 to use L2 regularization to decay
the weights.
init: An optional initialization. If not specified, uses Xavier
initialization.
stddev: A standard deviation to use in parameter initialization.
bias: Set to False to not have a bias.
bias_init: The initial value for the bias.
Returns:
A Pretty Tensor handle to the layer.
Raises:
ValueError: if the head_shape is not rank 2 or the number of input nodes
(second dim) is not known.
"""
if len(input_layer.shape) != 2:
raise ValueError(
'Cannot perform fully connected on tensor with shape %s' %
input_layer.shape)
in_size = input_layer.shape[1]
if input_layer.shape[1] is None:
raise ValueError('Number of input nodes must be known.')
books = input_layer.bookkeeper
if init is None:
if stddev is None:
init = layers.xavier_init(in_size, size)
elif stddev:
init = tf.truncated_normal_initializer(stddev=stddev)
else:
init = tf.zeros_initializer
elif stddev is not None:
raise ValueError('Do not set both init and stddev.')
dtype = input_layer.tensor.dtype
params = self.variable(
'weights',
[in_size, size],
init,
dt=dtype)
y = tf.matmul(input_layer, params)
layers.add_l2loss(books, params, l2loss)
if bias:
y += self.variable(
'bias',
[size],
tf.constant_initializer(bias_init),
dt=dtype)
if activation_fn is not None:
if not isinstance(activation_fn, collections.Sequence):
activation_fn = (activation_fn,)
return layers.apply_activation(
books,
y,
activation_fn[0],
activation_args=activation_fn[1:])
else:
return y
# pylint: enable=invalid-name
@prettytensor.Register
def apply_with_summary(input_layer, operation, *op_args, **op_kwargs):
"""Applies the given operation to this and sets the new head.
Args:
input_layer: The input layer for this op.
operation: An operation that takes a tensor and the supplied args.
*op_args: Extra arguments for operation.
**op_kwargs: Keyword arguments for the operation.
Returns:
A new layer with operation applied.
"""
return layers.apply_activation(
input_layer.bookkeeper,
input_layer.tensor,
operation,
activation_args=op_args,
activation_kwargs=op_kwargs)
@prettytensor.Register()
def _rapply(input_layer, operation, *op_args, **op_kwargs):
"""Applies the given operation to this after expanding op_args.
Args:
input_layer: The input layer for this op.
operation: An operation that takes a tensor and the supplied args.
*op_args: Extra arguments for operation.
**op_kwargs: Keyword arguments for the operation.
Returns:
A new layer with operation applied.
"""
op_args = list(op_args)
op_args.append(input_layer.tensor)
return input_layer.with_tensor(operation(*op_args, **op_kwargs))
@prettytensor.Register(method_name='apply')
def apply_op(input_layer, operation, *op_args, **op_kwargs):
"""Applies the given operation to this before without adding any summaries.
Args:
input_layer: The input layer for this op.
operation: An operation that takes a tensor and the supplied args.
*op_args: Extra arguments for operation.
**op_kwargs: Keyword arguments for the operation.
Returns:
A new layer with operation applied.
"""
return input_layer.with_tensor(
operation(input_layer.tensor, *op_args, **op_kwargs))
@prettytensor.Register
def __getitem__(input_layer, key): # pylint: disable=invalid-name
if input_layer.is_sequence():
return input_layer.with_tensor(input_layer.sequence[key])
else:
return input_layer.tensor[key]
@prettytensor.Register
def join(input_layer, others, include_self=True, join_function=None):
"""Joins the provided PrettyTensors with this using the join function.
Args:
input_layer: The input layer for this op.
others: Sequence of PrettyTensor objects.
include_self: Whether or not this includes itself or if the value is only
derived from others.
join_function: The function to use for joining, must accept a list of
tensors. Use None for concat on the final dimension.
Returns:
self.
"""
if include_self:
list_of_tensors = [input_layer]
list_of_tensors.extend(others)
else:
list_of_tensors = others
return prettytensor.join_pretty_tensors(
list_of_tensors, input_layer, join_function)
def _check_split_dims(num_splits, split_dim, shape):
if split_dim >= len(shape):
raise ValueError('split_dim out of bounds: %d %s' % (split_dim, shape))
if shape[split_dim] % num_splits != 0:
raise ValueError(
'Failure to split %s tensor at split_dim=%d\nMust divide the split '
'dimension evenly: %d mod %d != 0' %
(shape, split_dim, shape[split_dim], num_splits))
@prettytensor.Register
def unzip(input_layer, split_dim=0, num_splits=2):
"""Unzips the head Tensor along the split_dim into num_splits Equal chunks.
Examples:
* `[1, 2, 3, 4] -> [1, 3], [2, 4]`
* `[[1, 1], [2, 2], [3, 3], [4, 4]] -> [[1, 1], [3, 3]], [[2, 2], [4, 4]]`
Args:
input_layer: The chainable object, supplied.
split_dim: The dimension to split along. Defaults to batch.
num_splits: The number of splits.
Returns:
A list of PrettyTensors.
Raises:
ValueError: If split_dim is out of range or isn't divided evenly by
num_splits.
"""
shape = input_layer.shape
_check_split_dims(num_splits, split_dim, shape)
splits = functions.unzip(input_layer, split_dim, shape[split_dim], num_splits)
return input_layer.with_sequence(splits)
@prettytensor.Register
def concat(input_layer, concat_dim, other_tensors):
"""Concatenates input PrettyTensor with other_tensors along the specified dim.
This adds the Pretty Tensor passed via input_layer to the front of the list of
tensors to concat.
Args:
input_layer: The input layer.
concat_dim: The dimension along which to concat.
other_tensors: The tensors to concatenate with.
Returns:
A new PrettyTensor.
"""
result = [input_layer]
result.extend(other_tensors)
return tf.concat(concat_dim, result)
@prettytensor.Register(method_name='slice')
def slice_(input_layer, begin, size):
"""Extracts a slice from a tensor.
This operation extracts a slice of size `size` from a tensor `input` starting
at the location specified by `begin`. The slice `size` is represented as a
tensor shape, where `size[i]` is the number of elements of the 'i'th dimension
of 'input' that you want to slice. The starting location (`begin`) for the
slice is represented as an offset in each dimension of `input`. In other
words, `begin[i]` is the offset into the 'i'th dimension of 'input' that you
want to slice from.
`begin` is zero-based; 'size' is one-based. If `size[i]` is -1,
all remaining elements in dimension i are included in the
slice. In other words, this is equivalent to setting:
`size[i] = input.dim_size(i) - begin[i]`
This operation requires that:
`0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n]`
Examples:
# 'input' is [[[1, 1, 1], [2, 2, 2]],
# [[3, 3, 3], [4, 4, 4]],
# [[5, 5, 5], [6, 6, 6]]]
tf.slice(input, [1, 0, 0], [1, 1, 3]) ==> [[[3, 3, 3]]]
tf.slice(input, [1, 0, 0], [1, 2, 3]) ==> [[[3, 3, 3],
[4, 4, 4]]]
tf.slice(input, [1, 0, 0], [2, 1, 3]) ==> [[[3, 3, 3]],
[[5, 5, 5]]]
Args:
input_layer: A Tensor.
begin: An int32 or int64 Tensor of length rank(input_layer)
size: An int32 or int64 Tensor of length rank(input_layer)
Returns:
A tensor with the selected slice.
"""
return tf.slice(input_layer, begin, size)
@prettytensor.Register
def split(input_layer, split_dim=0, num_splits=2):
"""Splits the head Tensor along the split_dim into num_splits Equal chunks.
Examples:
* `[1, 2, 3, 4] -> [1, 2], [3, 4]`
* `[[1, 1], [2, 2], [3, 3], [4, 4]] -> [[1, 1], [2, 2]], [[3, 3], [4, 4]]`
Args:
input_layer: The chainable object, supplied.
split_dim: The dimension to split along. Defaults to batch.
num_splits: The number of splits.
Returns:
A list of PrettyTensors.
Raises:
ValueError: If split_dim is out of range or isn't divided evenly by
num_splits.
"""
shape = input_layer.shape
_check_split_dims(num_splits, split_dim, shape)
splits = tf.split(split_dim, num_splits, input_layer)
return input_layer.with_sequence(splits)
@prettytensor.Register
def squeeze(input_layer, squeeze_dims=None):
"""Removes dimensions of size 1 from the shape of a tensor.
This operation returns a tensor of the same type with all singleton
dimensions removed. If you don't want to remove all singleton dimensions, you
can remove specific size 1 dimensions by specifying a list of squeeze_dims.
Args:
input_layer: A Tensor of any type to squeeze.
squeeze_dims: An optional list of ints. Defaults to [].
Returns:
The sequeezed tensor.
"""
return tf.squeeze(input_layer, squeeze_dims)
@prettytensor.Register(method_name='map')
def map_(input_layer, fn):
"""Maps the given function across this sequence.
To map an entire template across the sequence, use the `as_fn` method on the
template.
Args:
input_layer: The input tensor.
fn: A function of 1 argument that is applied to each item in the sequence.
Returns:
A new sequence Pretty Tensor.
"""
return prettytensor.wrap_sequence([fn(x) for x in input_layer])
|
from computer import Computer
def main():
fname = "Input/9.1.in"
arr = map(int, open(fname).readline().split(","))
for i in xrange(1,3):
c = Computer(arr[:])
ret, outputs, dump = c.run_computer([i])
print(outputs)
if __name__=="__main__":
main() |
import cv2
import numpy as np
import time
class gridSquare:
topLeft = np.zeros([2])
topRight = np.zeros([2])
bottomLeft = np.zeros([2])
topRight = np.zeros([2])
def perp( a ) :
b = np.empty_like(a)
b[0] = -a[1]
b[1] = a[0]
return b
def intersection(a1,a2, b1,b2) :
da = a2-a1
db = b2-b1
dp = a1-b1
dap = perp(da)
denom = np.dot( dap, db)
if (denom == 0):
return [np.inf, np.inf]
num = np.dot( dap, dp )
return (num / denom)*db + b1
def isOutlier(image, x,y):
if (abs(y) > image[0] + image[0]/10):
return True
elif (abs(x) > image[1] + image[1]/10):
return True
else:
return False
#np.set_printoptions(threshold=np.inf)
start_time = time.time()
img = cv2.imread('images\GridClose2.jpg')
edges = cv2.Canny(img,50,100,apertureSize = 3)
cv2.imshow('edges',edges)
lineimg = np.zeros([img.shape[0],img.shape[1],img.shape[2]])
lines = cv2.HoughLines(edges,2,np.pi/180,500)
rtolerance = 20
ttolerance = 0.25
shape = lines.shape[0]
i = 0
#------ Similar Line Deletion ------
while (i < shape):
j = 0
while (j < shape):
rho = lines[j][0][0]
theta = lines[j][0][1]
diffr = abs(lines[i][0][0] - rho)
difft = abs(lines[i][0][1] - theta)
# If lines are the same, do nothing
if (lines[i][0][0] == rho and lines[i][0][1] == theta):
j = j
# If lines are similar, delete
elif (diffr < rtolerance):
if (difft < ttolerance):
lines = np.delete(lines, j, 0)
shape-=1
j-=1
j+=1
i+=1
#------ Sorting lines into horizontal or vertical sections ------
vert = []
horiz = []
for i in range (0, lines.shape[0] - 1):
difft = abs((np.pi/2) - lines[i][0][1])
difftN = abs((np.pi*1.5) - lines[i][0][1])
if (difft < np.pi/4 or difftN < np.pi/4):
horiz.append(lines[i][0])
else:
vert.append(lines[i][0])
vert = np.array(vert)
vert.dtype = [('x', np.float32), ('m', np.float32)]
#print "Unsorted V"
#print vert
vert = np.sort(vert, axis=0, kind='mergesort', order=['x'])
#print "Sorted V"
#print vert
horiz = np.array(horiz)
horiz.dtype = [('x', np.float32), ('m', np.float32)]
#print "Unsorted H"
#print horiz
horiz = np.sort(horiz, axis=0, kind='mergesort', order=['x'])
#print "Sorted H"
#print horiz
vsize = vert.shape[0]
hsize = horiz.shape[0]
lsize = vsize + hsize
lines = np.zeros([vert.shape[0]+horiz.shape[0],2])
for i in range (0, vert.shape[0]):
lines[i][0] = vert[i]['x']
lines[i][1] = vert[i]['m']
for i in range (0, horiz.shape[0]):
lines[i + vert.shape[0]][0] = horiz[i]['x']
lines[i + vert.shape[0]][1] = horiz[i]['m']
#ntr = np.zeros([SEpoints.shape[0]*SEpoints.shape[0],2])
## Carry the fix through. Switch to 2d array
#------ Calculating Lines ------
SEpoints = np.zeros([lines.shape[0],lines.shape[1],2])
for i in range (0, lines.shape[0] - 1):
rho = lines[i][0]
theta = lines[i][1]
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 2000*(-b))
y1 = int(y0 + 2000*(a))
x2 = int(x0 - 2000*(-b))
y2 = int(y0 - 2000*(a))
SEpoints[i][0] = ([x1,y1])
SEpoints[i][1] = ([x2,y2])
cv2.line(lineimg,(x1,y1),(x2,y2),(0,0,255),2)
intgrid = []
#------ Calculating Intersections ------
## Change to loop based on h and v size to eliminate duplicates
for i in range (0, SEpoints.shape[0] - 1):
for j in range (0, SEpoints.shape[0] - 1):
if (np.array_equal(SEpoints[i], SEpoints[j]) == False):
if (len(intgrid) > 520):
i = i
ntr = intersection(SEpoints[i][0],SEpoints[i][1],SEpoints[j][0],SEpoints[j][1])
if (ntr[0] != np.inf and ntr[1] != np.inf):
if (isOutlier(img.shape,ntr[0],ntr[1]) == False):
intgrid.append(ntr)
cv2.circle(lineimg,(int(ntr[0]),int(ntr[1])), 5, (0,255,0), -1)
intgrid = np.array(intgrid).astype(int)
#print intgrid
posLim = intgrid.shape[0]/2
pos = 539
x = vsize - 1
y = hsize - 2
grid = np.zeros([x,y])
#intgrid = np.reshape(intgrid,[hsize,vsize,2])
#intgrid[pos],intgrid[pos+1],intgrid[pos+vsize],intgrid[pos+vsize+1]
#for count in range
contours = np.array( [intgrid[pos],intgrid[pos+1],intgrid[pos+hsize],intgrid[pos+hsize-1]] ) # top left,bottom left,bottom right,top right
img = np.zeros( (200,200) ) # create a single channel 200x200 pixel black image
cv2.fillPoly(lineimg, pts =[contours], color=(255,0,0))
cv2.imshow('Lines',lineimg)
print("--- %s seconds ---" % (time.time() - start_time)) |
from functools import wraps
def catch_all_exceptions(func):
"""
This decorator is used to abstract the try except block for functions that don't affect the final status of an action.
"""
@wraps(func)
def func_wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except:
pass
return func_wrapper
|
# pylint: disable=protected-access
import os
import glob
import radical.utils as ru
from .. import states as s
from .session import fetch_json
_debug = os.environ.get('RP_PROF_DEBUG')
# ------------------------------------------------------------------------------
#
# pilot and unit activities: core hours are derived by multiplying the
# respective time durations with pilot size / unit size. The 'idle'
# utilization and the 'agent' utilization are derived separately.
#
# Note that durations should add up to the `x_total` generations to ensure
# accounting for the complete unit/pilot utilization.
#
# An updated list of events is available at docs/source/events.md
PILOT_DURATIONS = {
'provide' : {
'total' : [{ru.EVENT: 'bootstrap_0_start'},
{ru.EVENT: 'bootstrap_0_stop' }]
},
# times between PMGR_ACTIVE and the termination command are not
# considered pilot specific consumptions. If some resources remain
# unused during that time, it is either due to inefficiencies of
# workload management (accounted for in the unit consumption metrics),
# or the pilot is starving for workload.
'consume' : {
'boot' : [{ru.EVENT: 'bootstrap_0_start'},
{ru.EVENT: 'sync_rel' }],
'setup_1' : [{ru.EVENT: 'sync_rel' },
{ru.STATE: s.PMGR_ACTIVE }],
'ignore' : [{ru.STATE: s.PMGR_ACTIVE },
{ru.EVENT: 'cmd' ,
ru.MSG : 'cancel_pilot' }],
'term' : [{ru.EVENT: 'cmd' ,
ru.MSG : 'cancel_pilot' },
{ru.EVENT: 'bootstrap_0_stop' }],
},
# FIXME: separate out DVM startup time
# 'rte' : [{ru.STATE: s.PMGR_ACTIVE },
# {ru.STATE: s.PMGR_ACTIVE }],
# 'setup_2' : [{ru.STATE: s.PMGR_ACTIVE },
# {ru.STATE: s.PMGR_ACTIVE }],
#
# resources on agent nodes are consumed for all of the pilot's lifetime
'agent' : {
'total' : [{ru.EVENT: 'bootstrap_0_start'},
{ru.EVENT: 'bootstrap_0_stop' }]
}
}
# The set of default unit durations that are available for every unit
# description, default resource configuration, and default scheduler and
# launcher.
UNIT_DURATIONS_DEFAULT = {
'consume' : {
'exec_queue' : [{ru.EVENT: 'schedule_ok' },
{ru.STATE: s.AGENT_EXECUTING }],
'exec_prep' : [{ru.STATE: s.AGENT_EXECUTING },
{ru.EVENT: 'exec_start' }],
'exec_rp' : [{ru.EVENT: 'exec_start' },
{ru.EVENT: 'cu_start' }],
'exec_sh' : [{ru.EVENT: 'cu_start' },
{ru.EVENT: 'cu_exec_start' }],
'exec_cmd' : [{ru.EVENT: 'cu_exec_start' },
{ru.EVENT: 'cu_exec_stop' }],
'term_sh' : [{ru.EVENT: 'cu_exec_stop' },
{ru.EVENT: 'cu_stop' }],
'term_rp' : [{ru.EVENT: 'cu_stop' },
{ru.EVENT: 'exec_stop' }],
'unschedule' : [{ru.EVENT: 'exec_stop' },
{ru.EVENT: 'unschedule_stop' }]
# # if we have cmd_start / cmd_stop:
# 'exec_sh' : [{ru.EVENT: 'cu_start' },
# {ru.EVENT: 'cmd_start' }],
# 'exec_cmd' : [{ru.EVENT: 'cmd_start' },
# {ru.EVENT: 'cmd_stop' }],
# 'term_sh' : [{ru.EVENT: 'cmd_stop' },
# {ru.EVENT: 'cu_stop' }],
}
}
# The set of default unit durations augmented with the durations of the app
# events. App events are generated by RADICAL Synapse and by `hello_rp.sh`. The
# latter is useful for testing as a sleep command drop-in.
UNIT_DURATIONS_APP = {
'consume' : {
'exec_queue' : [{ru.EVENT: 'schedule_ok' },
{ru.STATE: s.AGENT_EXECUTING }],
'exec_prep' : [{ru.STATE: s.AGENT_EXECUTING },
{ru.EVENT: 'exec_start' }],
'exec_rp' : [{ru.EVENT: 'exec_start' },
{ru.EVENT: 'cu_start' }],
'exec_sh' : [{ru.EVENT: 'cu_start' },
{ru.EVENT: 'cu_exec_start' }],
'init_app' : [{ru.EVENT: 'cu_exec_start' },
{ru.EVENT: 'app_start' }],
'exec_cmd' : [{ru.EVENT: 'app_start' },
{ru.EVENT: 'app_stop' }],
'term_app' : [{ru.EVENT: 'app_stop' },
{ru.EVENT: 'cu_exec_stop' }],
'term_sh' : [{ru.EVENT: 'cu_exec_stop' },
{ru.EVENT: 'cu_stop' }],
'term_rp' : [{ru.EVENT: 'cu_stop' },
{ru.EVENT: 'exec_stop' }],
'unschedule' : [{ru.EVENT: 'exec_stop' },
{ru.EVENT: 'unschedule_stop' }]
}
}
# The set of default unit durations with the durations generated when using
# PRRTE as launch method.
UNIT_DURATIONS_PRTE = {
'consume' : {
'exec_queue' : [{ru.EVENT: 'schedule_ok' },
{ru.STATE: s.AGENT_EXECUTING }],
'exec_prep' : [{ru.STATE: s.AGENT_EXECUTING },
{ru.EVENT: 'exec_start' }],
'exec_rp' : [{ru.EVENT: 'exec_start' },
{ru.EVENT: 'cu_start' }],
'exec_sh' : [{ru.EVENT: 'cu_start' },
{ru.EVENT: 'cu_exec_start' }],
'prte_phase_1': [{ru.EVENT: 'cu_exec_start' },
{ru.EVENT: 'prte_init_complete' }],
'prte_phase_2': [{ru.EVENT: 'prte_init_complete' },
{ru.EVENT: 'prte_sending_launch_msg'}],
'exec_cmd' : [{ru.EVENT: 'prte_sending_launch_msg'},
{ru.EVENT: 'prte_iof_complete' }],
'prte_phase_3': [{ru.EVENT: 'prte_iof_complete' },
{ru.EVENT: 'prte_notify_completed' }],
'prte_phase_4': [{ru.EVENT: 'prte_notify_completed' },
{ru.EVENT: 'cu_exec_stop' }],
'term_sh' : [{ru.EVENT: 'cu_exec_stop' },
{ru.EVENT: 'cu_stop' }],
'term_rp' : [{ru.EVENT: 'cu_stop' },
{ru.EVENT: 'exec_stop' }],
'unschedule' : [{ru.EVENT: 'exec_stop' },
{ru.EVENT: 'unschedule_stop' }],
# # if we have app_start / app_stop:
# 'prte_phase_2': [{ru.EVENT: 'prte_init_complete' },
# {ru.EVENT: 'cmd_start' }],
# 'exec_cmd' : [{ru.EVENT: 'cmd_start' },
# {ru.EVENT: 'cmd_stop' }],
# 'prte_phase_3': [{ru.EVENT: 'cmd_stop' },
# {ru.EVENT: 'prte_notify_completed' }],
}
}
# The set of default unit durations with the durations generated when using
# PRRTE as launch method and an app that records app events (e.g., RADICAL
# Synapse and `hello_rp.sh`).
UNIT_DURATIONS_PRTE_APP = {
'consume' : {
'exec_queue' : [{ru.EVENT: 'schedule_ok' },
{ru.STATE: s.AGENT_EXECUTING }],
'exec_prep' : [{ru.STATE: s.AGENT_EXECUTING },
{ru.EVENT: 'exec_start' }],
'exec_rp' : [{ru.EVENT: 'exec_start' },
{ru.EVENT: 'cu_start' }],
'exec_sh' : [{ru.EVENT: 'cu_start' },
{ru.EVENT: 'cu_exec_start' }],
'prte_phase_1': [{ru.EVENT: 'cu_exec_start' },
{ru.EVENT: 'prte_init_complete' }],
'prte_phase_2': [{ru.EVENT: 'prte_init_complete' },
{ru.EVENT: 'prte_sending_launch_msg'}],
'init_app' : [{ru.EVENT: 'prte_sending_launch_msg'},
{ru.EVENT: 'app_start' }],
'exec_cmd' : [{ru.EVENT: 'app_start' },
{ru.EVENT: 'app_stop' }],
'term_app' : [{ru.EVENT: 'app_stop' },
{ru.EVENT: 'prte_iof_complete' }],
'prte_phase_3': [{ru.EVENT: 'prte_iof_complete' },
{ru.EVENT: 'prte_notify_completed' }],
'prte_phase_4': [{ru.EVENT: 'prte_notify_completed' },
{ru.EVENT: 'cu_exec_stop' }],
'term_sh' : [{ru.EVENT: 'cu_exec_stop' },
{ru.EVENT: 'cu_stop' }],
'term_rp' : [{ru.EVENT: 'cu_stop' },
{ru.EVENT: 'exec_stop' }],
'unschedule' : [{ru.EVENT: 'exec_stop' },
{ru.EVENT: 'unschedule_stop' }]
}
}
# ----------------------------------------------------------------------------
#
def _convert_sdurations(sdurations):
'''
Converts a collection of durations expressed in short form to the same
collection of durations expressed in long form.
Definitions:
- Short form collection: one dictionary of short form durations
- Long form: one dictionary of long form durations.
Args:
sdurations (dict): a collections of durations in short form
Return:
ldurations (dict): a collection of long form durations
Example:
sdurations = {'name_of_duration': [{'STATE': s.STATE_NAME},
{'EVENT': 'event_name'}]}
ldurations = {'name_of_duration': [{ru.EVENT: 'state',
ru.STATE: s.STATE_NAME},
{ru.EVENT: 'event_name',
ru.STATE: None}]}
sdurations = {'name_of_duration': [{'STATE': s.STATE_NAME},
[{'EVENT': 'event_name'},
{'STATE': s.STATE_NAME}]]}
ldurations = {'name_of_duration': [{ru.EVENT: 'state',
ru.STATE: s.STATE_NAME},
[{ru.EVENT: 'event_name',
ru.STATE: None},
{ru.EVENT: 'state',
ru.STATE: s.STATE_NAME}]]}
sdurations = {'name_of_duration': [{'STATE': s.STATE_NAME},
{'MSG': 'message_name'}]}
ldurations = {'name_of_duration': [{ru.EVENT: 'state',
ru.STATE: s.STATE_NAME},
{ru.EVENT: 'cmd',
ru.MSG: 'message_name'}]}
'''
ldurations = dict()
for k,v in sdurations.items():
ldurations[k] = list()
for ts in v:
if isinstance(ts, dict):
ldurations[k].append(_expand_sduration(ts))
if isinstance(ts, list):
lds = list()
for i in ts:
lds.append(_expand_sduration(i))
ldurations[k].append(lds)
return ldurations
# ----------------------------------------------------------------------------
#
def _expand_sduration(sduration):
'''
Expands a duration expressed in short form to its long form for the
timestamp types `ru.STATE`, `ru.EVENT` and `ru.MSG`.
Definitions:
- Short form duration: one dictionary containing a state or event name.
- Long form duration: one dictionary containing two keys, one of type
`ru.EVENT` and one of type `ru.STATE`. The `ru.EVENT` key has a string
value while the `ru.STATE` key has a `s.STATE_NAME` object as its value.
Args:
sduration (dict): a duration in short form
Return:
lduration (dict): sduration in long form
Example:
sduration = {'STATE': s.STATE_NAME}
lduration = {ru.EVENT: 'state', ru.STATE: s.STATE_NAME}
sduration = {'EVENT': 'event_name'}
lduration = {ru.EVENT: 'event_name', ru.STATE: None}
sduration = {'MSG': 'mesage_name'}
lduration = {ru.EVENT: 'cmd', ru.MSG: 'message_name'}
'''
# Allow durations with both ru.EVENT and ru.STATE.
tt = list(sduration.keys())
if len(tt) == 1 and tt[0] not in ['STATE', 'EVENT', 'MSG']:
raise Exception('unknown timestamp type: %s' % tt)
if len(tt) == 2:
return sduration
if len(tt) > 2:
raise Exception('invalid duration: too many timestamps (%s)' % tt)
# Expand known timestamps.
lduration = None
for k,v in sduration.items():
if k == 'STATE':
lduration = {ru.EVENT: 'state', ru.STATE: v}
elif k == 'EVENT':
lduration = {ru.EVENT: v, ru.STATE: None}
elif k == 'MSG':
lduration = {ru.EVENT: 'cmd', ru.MSG: v}
return lduration
# Set of default pilot durations for RADICAL-Analytics. All the durations
# are contiguos.
# NOTE: _init durations are most often 0.
PILOT_DURATIONS_DEBUG_SHORT = {
'p_pmgr_create' : [{'STATE': s.NEW },
{'STATE': s.PMGR_LAUNCHING_PENDING}],
'p_pmgr_launching_init' : [{'STATE': s.PMGR_LAUNCHING_PENDING},
{'STATE': s.PMGR_LAUNCHING }],
'p_pmgr_launching' : [{'STATE': s.PMGR_LAUNCHING },
{'EVENT': 'staging_in_start' }],
'p_pmgr_stage_in' : [{'EVENT': 'staging_in_start' },
{'EVENT': 'staging_in_stop' }],
'p_pmgr_submission_init' : [{'EVENT': 'staging_in_stop' },
{'EVENT': 'submission_start' }],
'p_pmgr_submission' : [{'EVENT': 'submission_start' },
{'EVENT': 'submission_stop' }],
'p_pmgr_scheduling_init' : [{'EVENT': 'submission_stop' },
{'STATE': s.PMGR_ACTIVE_PENDING }],
# batch system queue time
'p_pmgr_scheduling' : [{'STATE': s.PMGR_ACTIVE_PENDING },
{'EVENT': 'bootstrap_0_start' }],
'p_agent_ve_setup_init' : [{'EVENT': 'bootstrap_0_start' },
{'EVENT': 've_setup_start' }],
'p_agent_ve_setup' : [{'EVENT': 've_setup_start' },
{'EVENT': 've_setup_stop' }],
'p_agent_ve_activate_init': [{'EVENT': 've_setup_stop' },
{'EVENT': 've_activate_start' }],
'p_agent_ve_activate' : [{'EVENT': 've_activate_start' },
{'EVENT': 've_activate_stop' }],
'p_agent_install_init' : [{'EVENT': 've_activate_stop' },
{'EVENT': 'rp_install_start' }],
'p_agent_install' : [{'EVENT': 'rp_install_start' },
{'EVENT': 'rp_install_stop' }],
'p_agent_launching' : [{'EVENT': 'rp_install_stop' },
{'STATE': s.PMGR_ACTIVE }],
'p_agent_terminate_init' : [{'STATE': s.PMGR_ACTIVE },
{'MSG' : 'cancel_pilot' }],
'p_agent_terminate' : [{'MSG' : 'cancel_pilot' },
{'EVENT': 'bootstrap_0_stop' }],
# total pilot runtime
'p_agent_finalize' : [{'EVENT': 'bootstrap_0_stop' },
[{'STATE': s.DONE },
{'STATE': s.CANCELED },
{'STATE': s.FAILED }]],
'p_agent_runtime' : [{'EVENT': 'bootstrap_0_start' },
{'EVENT': 'bootstrap_0_stop' }]
}
PILOT_DURATIONS_DEBUG = _convert_sdurations(PILOT_DURATIONS_DEBUG_SHORT)
# Debug pilot durations tagged with keys taht can be used when calculating
# resource utilization.
# TODO: add the 'client' tag to relevant resource utilization methods.
_pdd = PILOT_DURATIONS_DEBUG
PILOT_DURATIONS_DEBUG_RU = {
'provide' : {
'p_agent_runtime' : _pdd['p_agent_runtime']
},
'client' : {
'p_pmgr_create' : _pdd['p_pmgr_create'],
'p_pmgr_launching_init' : _pdd['p_pmgr_launching_init'],
'p_pmgr_launching' : _pdd['p_pmgr_launching'],
'p_pmgr_stage_in' : _pdd['p_pmgr_stage_in'],
'p_pmgr_submission_init' : _pdd['p_pmgr_submission_init'],
'p_pmgr_submission' : _pdd['p_pmgr_submission'],
'p_pmgr_scheduling_init' : _pdd['p_pmgr_scheduling_init'],
'p_pmgr_scheduling' : _pdd['p_pmgr_scheduling'],
'p_agent_finalize' : _pdd['p_agent_finalize']
},
'consume' : {
'p_agent_ve_setup_init' : _pdd['p_agent_ve_setup_init'],
'p_agent_ve_setup' : _pdd['p_agent_ve_setup'],
'p_agent_ve_activate_init': _pdd['p_agent_ve_activate_init'],
'p_agent_ve_activate' : _pdd['p_agent_ve_activate'],
'p_agent_install_init' : _pdd['p_agent_install_init'],
'p_agent_install' : _pdd['p_agent_install'],
'p_agent_launching' : _pdd['p_agent_launching'],
'p_agent_terminate_init' : _pdd['p_agent_terminate_init'],
'p_agent_terminate' : _pdd['p_agent_terminate']
},
'agent' : {
'p_agent_runtime' : _pdd['p_agent_runtime']
}
}
# Set of default unit durations for RADICAL-Analytics. All the durations
# are contiguos.
UNIT_DURATIONS_DEBUG_SHORT = {
'u_umgr_create' : [{'STATE': s.NEW },
{'STATE': s.UMGR_SCHEDULING_PENDING }],
'u_umgr_schedule_queue' : [{'STATE': s.UMGR_SCHEDULING_PENDING },
{'STATE': s.UMGR_SCHEDULING }],
'u_umgr_schedule' : [{'STATE': s.UMGR_SCHEDULING },
{'STATE': s.UMGR_STAGING_INPUT_PENDING }],
# push to mongodb
'u_umgr_stage_in_queue' : [{'STATE': s.UMGR_STAGING_INPUT_PENDING },
{'STATE': s.UMGR_STAGING_INPUT }],
# wait in mongodb
'u_umgr_stage_in' : [{'STATE': s.UMGR_STAGING_INPUT },
{'STATE': s.AGENT_STAGING_INPUT_PENDING }],
# pull from mongodb
'u_agent_stage_in_queue' : [{'STATE': s.AGENT_STAGING_INPUT_PENDING },
{'STATE': s.AGENT_STAGING_INPUT }],
'u_agent_stage_in' : [{'STATE': s.AGENT_STAGING_INPUT },
{'STATE': s.AGENT_SCHEDULING_PENDING }],
'u_agent_schedule_queue' : [{'STATE': s.AGENT_SCHEDULING_PENDING },
{'STATE': s.AGENT_SCHEDULING }],
'u_agent_schedule' : [{'STATE': s.AGENT_SCHEDULING },
{'STATE': s.AGENT_EXECUTING_PENDING }],
'u_agent_execute_queue' : [{'STATE': s.AGENT_EXECUTING_PENDING },
{'STATE': s.AGENT_EXECUTING }],
'u_agent_execute_prepare' : [{'STATE': s.AGENT_EXECUTING },
{'EVENT': 'exec_mkdir' }],
'u_agent_execute_mkdir' : [{'EVENT': 'exec_mkdir' },
{'EVENT': 'exec_mkdir_done' }],
'u_agent_execute_layer_start': [{'EVENT': 'exec_mkdir_done' },
{'EVENT': 'exec_start' }],
# orte, ssh, mpi, ...
'u_agent_execute_layer' : [{'EVENT': 'exec_start' },
[{'EVENT': 'exec_ok' },
{'EVENT': 'exec_fail' }]],
# PROBLEM: discontinuity
'u_agent_lm_start' : [{'EVENT': 'cu_start' },
{'EVENT': 'cu_pre_start' }],
'u_agent_lm_pre_execute' : [{'EVENT': 'cu_pre_start' },
{'EVENT': 'cu_pre_stop' }],
'u_agent_lm_execute_start' : [{'EVENT': 'cu_pre_stop' },
{'EVENT': 'cu_exec_start' }],
'u_agent_lm_execute' : [{'EVENT': 'cu_exec_start' },
{'EVENT': 'cu_exec_stop' }],
'u_agent_lm_stop' : [{'EVENT': 'cu_exec_stop' },
{'EVENT': 'cu_stop' }],
'u_agent_stage_out_start' : [{'EVENT': 'cu_stop' },
{'STATE': s.AGENT_STAGING_OUTPUT_PENDING}],
'u_agent_stage_out_queue' : [{'STATE': s.AGENT_STAGING_OUTPUT_PENDING},
{'STATE': s.AGENT_STAGING_OUTPUT }],
'u_agent_stage_out' : [{'STATE': s.AGENT_STAGING_OUTPUT },
{'STATE': s.UMGR_STAGING_OUTPUT_PENDING }],
# push/pull mongodb
'u_agent_push_to_umgr' : [{'STATE': s.UMGR_STAGING_OUTPUT_PENDING },
{'STATE': s.UMGR_STAGING_OUTPUT }],
'u_umgr_destroy' : [{'STATE': s.UMGR_STAGING_OUTPUT },
[{'STATE': s.DONE },
{'STATE': s.CANCELED },
{'STATE': s.FAILED }]],
'u_agent_unschedule' : [{'EVENT': 'unschedule_start' },
{'EVENT': 'unschedule_stop' }]
}
UNIT_DURATIONS_DEBUG = _convert_sdurations(UNIT_DURATIONS_DEBUG_SHORT)
# Debug unit durations tagged with keys taht can be used when calculating
# resource utilization.
# TODO: add the 'client' tag to relevant resource utilization methods.
_udd = UNIT_DURATIONS_DEBUG
UNIT_DURATIONS_DEBUG_RU = {
'client' : {
'u_umgr_create' : _udd['u_umgr_create'],
'u_umgr_schedule_queue' : _udd['u_umgr_schedule_queue'],
'u_umgr_schedule' : _udd['u_umgr_schedule'],
'u_umgr_stage_in_queue' : _udd['u_umgr_stage_in_queue'],
'u_umgr_stage_in' : _udd['u_umgr_stage_in'],
'u_umgr_destroy' : _udd['u_umgr_destroy'],
'u_agent_unschedule' : _udd['u_agent_unschedule']
},
'consume' : {
'u_agent_stage_in_queue' : _udd['u_agent_stage_in_queue'],
'u_agent_stage_in' : _udd['u_agent_stage_in'],
'u_agent_schedule_queue' : _udd['u_agent_schedule_queue'],
'u_agent_schedule' : _udd['u_agent_schedule'],
'u_agent_execute_queue' : _udd['u_agent_execute_queue'],
'u_agent_execute_prepare' : _udd['u_agent_execute_prepare'],
'u_agent_execute_mkdir' : _udd['u_agent_execute_mkdir'],
'u_agent_execute_layer_start': _udd['u_agent_execute_layer_start'],
'u_agent_execute_layer' : _udd['u_agent_execute_layer'],
'u_agent_lm_start' : _udd['u_agent_lm_start'],
'u_agent_lm_pre_execute' : _udd['u_agent_lm_pre_execute'],
'u_agent_lm_execute_start' : _udd['u_agent_lm_execute_start'],
'u_agent_lm_execute' : _udd['u_agent_lm_execute'],
'u_agent_lm_stop' : _udd['u_agent_lm_stop'],
'u_agent_stage_out_start' : _udd['u_agent_stage_out_start'],
'u_agent_stage_out_queue' : _udd['u_agent_stage_out_queue'],
'u_agent_stage_out' : _udd['u_agent_stage_out'],
'u_agent_push_to_umgr' : _udd['u_agent_push_to_umgr'],
}
}
# ------------------------------------------------------------------------------
#
def get_hostmap(profile):
'''
We abuse the profile combination to also derive a pilot-host map, which
will tell us on what exact host each pilot has been running. To do so, we
check for the PMGR_ACTIVE advance event in agent.0.prof, and use the NTP
sync info to associate a hostname.
'''
# FIXME: This should be replaced by proper hostname logging
# in `pilot.resource_details`.
hostmap = dict() # map pilot IDs to host names
for entry in profile:
if entry[ru.EVENT] == 'hostname':
hostmap[entry[ru.UID]] = entry[ru.MSG]
return hostmap
# ------------------------------------------------------------------------------
#
def get_hostmap_deprecated(profiles):
'''
This method mangles combine_profiles and get_hostmap, and is deprecated.
At this point it only returns the hostmap
'''
hostmap = dict() # map pilot IDs to host names
for pname, prof in profiles.items():
if not len(prof):
continue
if not prof[0][ru.MSG]:
continue
host, ip, _, _, _ = prof[0][ru.MSG].split(':')
host_id = '%s:%s' % (host, ip)
for row in prof:
if 'agent.0.prof' in pname and \
row[ru.EVENT] == 'advance' and \
row[ru.STATE] == s.PMGR_ACTIVE:
hostmap[row[ru.UID]] = host_id
break
return hostmap
# ------------------------------------------------------------------------------
#
def get_session_profile(sid, src=None):
if not src:
src = "%s/%s" % (os.getcwd(), sid)
if os.path.exists(src):
# we have profiles locally
profiles = glob.glob("%s/*.prof" % src)
profiles += glob.glob("%s/*/*.prof" % src)
else:
# need to fetch profiles
from .session import fetch_profiles
profiles = fetch_profiles(sid=sid, skip_existing=True)
# filter out some frequent, but uninteresting events
efilter = {ru.EVENT: [
# 'get',
'publish',
'schedule_skip',
'schedule_fail',
'staging_stderr_start',
'staging_stderr_stop',
'staging_stdout_start',
'staging_stdout_stop',
'staging_uprof_start',
'staging_uprof_stop',
'update_pushed',
]}
profiles = ru.read_profiles(profiles, sid, efilter=efilter)
profile, accuracy = ru.combine_profiles(profiles)
profile = ru.clean_profile(profile, sid, s.FINAL, s.CANCELED)
hostmap = get_hostmap(profile)
if not hostmap:
# FIXME: legacy host notation - deprecated
hostmap = get_hostmap_deprecated(profiles)
return profile, accuracy, hostmap
# ------------------------------------------------------------------------------
#
def get_session_description(sid, src=None, dburl=None):
"""
This will return a description which is usable for radical.analytics
evaluation. It informs about
- set of stateful entities
- state models of those entities
- event models of those entities (maybe)
- configuration of the application / module
If `src` is given, it is interpreted as path to search for session
information (json dump). `src` defaults to `$PWD/$sid`.
if `dburl` is given, its value is used to fetch session information from
a database. The dburl value defaults to `RADICAL_PILOT_DBURL`.
"""
if not src:
src = "%s/%s" % (os.getcwd(), sid)
if os.path.isfile('%s/%s.json' % (src, sid)):
json = ru.read_json('%s/%s.json' % (src, sid))
else:
ftmp = fetch_json(sid=sid, dburl=dburl, tgt=src, skip_existing=True)
json = ru.read_json(ftmp)
# make sure we have uids
# FIXME v0.47: deprecate
def fix_json(json):
def fix_uids(json):
if isinstance(json, list):
for elem in json:
fix_uids(elem)
elif isinstance(json, dict):
if 'unitmanager' in json and 'umgr' not in json:
json['umgr'] = json['unitmanager']
if 'pilotmanager' in json and 'pmgr' not in json:
json['pmgr'] = json['pilotmanager']
if '_id' in json and 'uid' not in json:
json['uid'] = json['_id']
if 'cfg' not in json:
json['cfg'] = dict()
for v in json.values():
fix_uids(v)
fix_uids(json)
fix_json(json)
assert(sid == json['session']['uid']), 'sid inconsistent'
ret = dict()
ret['entities'] = dict()
tree = dict()
tree[sid] = {'uid' : sid,
'etype' : 'session',
'cfg' : json['session']['cfg'],
'has' : ['umgr', 'pmgr'],
'children' : list()
}
for pmgr in sorted(json['pmgr'], key=lambda k: k['uid']):
uid = pmgr['uid']
tree[sid]['children'].append(uid)
tree[uid] = {'uid' : uid,
'etype' : 'pmgr',
'cfg' : pmgr['cfg'],
'has' : ['pilot'],
'children' : list()
}
for umgr in sorted(json['umgr'], key=lambda k: k['uid']):
uid = umgr['uid']
tree[sid]['children'].append(uid)
tree[uid] = {'uid' : uid,
'etype' : 'umgr',
'cfg' : umgr['cfg'],
'has' : ['unit'],
'children' : list()
}
# also inject the pilot description, and resource specifically
tree[uid]['description'] = dict()
for pilot in sorted(json['pilot'], key=lambda k: k['uid']):
uid = pilot['uid']
pmgr = pilot['pmgr']
pilot['cfg']['resource_details'] = pilot['resource_details']
tree[pmgr]['children'].append(uid)
tree[uid] = {'uid' : uid,
'etype' : 'pilot',
'cfg' : pilot['cfg'],
'description': pilot['description'],
'has' : ['unit'],
'children' : list()
}
# also inject the pilot description, and resource specifically
for unit in sorted(json['unit'], key=lambda k: k['uid']):
uid = unit['uid']
pid = unit['pilot']
umgr = unit['umgr']
tree[pid ]['children'].append(uid)
tree[umgr]['children'].append(uid)
tree[uid] = {'uid' : uid,
'etype' : 'unit',
'cfg' : unit,
'description' : unit['description'],
'has' : list(),
'children' : list()
}
# remove duplicate
del(tree[uid]['cfg']['description'])
ret['tree'] = tree
ret['entities']['pilot'] = {'state_model' : s._pilot_state_values,
'state_values' : s._pilot_state_inv_full,
'event_model' : dict()}
ret['entities']['unit'] = {'state_model' : s._unit_state_values,
'state_values' : s._unit_state_inv_full,
'event_model' : dict()}
ret['entities']['session'] = {'state_model' : None, # has no states
'state_values' : None,
'event_model' : dict()}
ret['config'] = dict() # session config goes here
return ret
# ------------------------------------------------------------------------------
#
def get_node_index(node_list, node, cpn, gpn):
r0 = node_list.index(node) * (cpn + gpn)
r1 = r0 + cpn + gpn - 1
return [r0, r1]
# ------------------------------------------------------------------------------
#
def get_duration(thing, dur):
for e in dur:
if ru.STATE in e and ru.EVENT not in e:
e[ru.EVENT] = 'state'
t0 = thing.timestamps(event=dur[0])
t1 = thing.timestamps(event=dur[1])
if not len(t0) or not len(t1):
return [None, None]
return(t0[0], t1[-1])
# ------------------------------------------------------------------------------
#
def cluster_resources(resources):
# resources is a list of
# - single index (single core of gpu
# - [r0, r1] tuples (ranges of core, gpu indexes)
# cluster continuous stretches of resources
ret = list()
idx = set()
for r in resources:
if isinstance(r, int):
idx.add(r)
else:
for i in range(r[0], r[1] + 1):
idx.add(i)
r0 = None
r1 = None
for i in sorted(list(idx)):
if r0 is None:
r0 = i
continue
if r1 is None:
if i == r0 + 1:
r1 = i
continue
ret.append([r0, r0])
r0 = None
continue
if i == r1 + 1:
r1 = i
continue
ret.append([r0, r1])
r0 = i
r1 = None
if r0 is not None:
if r1 is not None:
ret.append([r0, r1])
else:
ret.append([r0, r0])
return ret
# ------------------------------------------------------------------------------
#
def _get_pilot_provision(pilot):
pid = pilot.uid
cpn = pilot.cfg['resource_details']['rm_info']['cores_per_node']
gpn = pilot.cfg['resource_details']['rm_info']['gpus_per_node']
ret = dict()
nodes, _, _ = _get_nodes(pilot)
for metric in PILOT_DURATIONS['provide']:
boxes = list()
t0, t1 = get_duration(pilot, PILOT_DURATIONS['provide'][metric])
if t0 is None:
t0 = pilot.events [0][ru.TIME]
t1 = pilot.events[-1][ru.TIME]
for node in nodes:
r0, r1 = get_node_index(nodes, node, cpn, gpn)
boxes.append([t0, t1, r0, r1])
ret['total'] = {pid: boxes}
return ret
# ------------------------------------------------------------------------------
#
def get_provided_resources(session):
'''
For all ra.pilots, return the amount and time of resources provided.
This computes sets of 4-tuples of the form: [t0, t1, r0, r1] where:
t0: time, begin of resource provision
t1: time, begin of resource provision
r0: int, index of resources provided (min)
r1: int, index of resources provided (max)
The tuples are formed so that t0 to t1 and r0 to r1 are continuous.
'''
provided = dict()
for p in session.get(etype='pilot'):
data = _get_pilot_provision(p)
for metric in data:
if metric not in provided:
provided[metric] = dict()
for uid in data[metric]:
provided[metric][uid] = data[metric][uid]
return provided
# ------------------------------------------------------------------------------
#
def get_consumed_resources(session):
'''
For all ra.pilot or ra.unit entities, return the amount and time of
resources consumed. A consumed resource is characterized by:
- a resource type (we know about cores and gpus)
- a metric name (what the resource was used for)
- a list of 4-tuples of the form: [t0, t1, r0, r1]
- t0: time, begin of resource consumption
- t1: time, begin of resource consumption
- r0: int, index of resources consumed (min)
- r1: int, index of resources consumed (max)
The tuples are formed so that t0 to t1 and r0 to r1 are continuous.
An entity can consume different resources under different metrics - but the
returned consumption specs will never overlap, meaning, that any resource is
accounted for exactly one metric at any point in time. The returned
structure has the following overall form:
{
'metric_1' : {
uid_1 : [[t0, t1, r0, r1],
[t2, t3, r2, r3],
...
],
uid_2 : ...
},
'metric_2' : ...
}
'''
log = ru.Logger('radical.pilot.utils')
consumed = dict()
for e in session.get(etype=['pilot', 'unit']):
if e.etype == 'pilot': data = _get_pilot_consumption(e)
elif e.etype == 'unit' : data = _get_unit_consumption(session, e)
for metric in data:
if metric not in consumed:
consumed[metric] = dict()
for uid in data[metric]:
consumed[metric][uid] = data[metric][uid]
# we defined two additional metrics, 'warmup' and 'drain', which are defined
# for all resources of the pilot. `warmup` is defined as the time from
# when the pilot becomes active, to the time the resource is first consumed
# by a unit. `drain` is the inverse: the time from when any unit last
# consumed the resource to the time when the pilot begins termination.
for pilot in session.get(etype='pilot'):
if pilot.cfg['task_launch_method'] == 'PRTE':
# print('\nusing prte configuration')
unit_durations = UNIT_DURATIONS_PRTE
else:
# print('\nusing default configuration')
unit_durations = UNIT_DURATIONS_DEFAULT
pt = pilot.timestamps
log.debug('timestamps:')
for ts in pt():
log.debug(' %10.2f %-20s %-15s %-15s %-15s %-15s %s',
ts[0], ts[1], ts[2], ts[3], ts[4], ts[5], ts[6])
p_min = pt(event=PILOT_DURATIONS['consume']['ignore'][0]) [0]
p_max = pt(event=PILOT_DURATIONS['consume']['ignore'][1])[-1]
# p_max = pilot.events[-1][ru.TIME]
log.debug('pmin, pmax: %10.2f / %10.2f', p_min, p_max)
pid = pilot.uid
cpn = pilot.cfg['resource_details']['rm_info']['cores_per_node']
gpn = pilot.cfg['resource_details']['rm_info']['gpus_per_node']
nodes, _, pnodes = _get_nodes(pilot)
# find resource utilization scope for all resources. We begin filling
# the resource dict with
#
# resource_id : [t_min=None, t_max=None]
#
# and then iterate over all units. Wen we find a unit using some
# resource id, we set or adjust t_min / t_max.
resources = dict()
for pnode in pnodes:
idx = get_node_index(nodes, pnode, cpn, gpn)
for c in range(idx[0], idx[1] + 1):
resources[c] = [None, None]
for unit in session.get(etype='unit'):
if unit.cfg.get('pilot') != pid:
continue
try:
snodes = unit.cfg['slots']['nodes']
ut = unit.timestamps
u_min = ut(event=unit_durations['consume']['exec_queue'][0]) [0]
u_max = ut(event=unit_durations['consume']['unschedule'][1])[-1]
except:
continue
for snode in snodes:
node = [snode['name'], snode['uid']]
r0, _ = get_node_index(nodes, node, cpn, gpn)
for core_map in snode['core_map']:
for core in core_map:
idx = r0 + core
t_min = resources[idx][0]
t_max = resources[idx][1]
if t_min is None or t_min > u_min: t_min = u_min
if t_max is None or t_max < u_max: t_max = u_max
resources[idx] = [t_min, t_max]
for gpu_map in snode['gpu_map']:
for gpu in gpu_map:
idx = r0 + cpn + gpu
t_min = resources[idx][0]
t_max = resources[idx][1]
if t_min is None or t_min > u_min: t_min = u_min
if t_max is None or t_max < u_max: t_max = u_max
resources[idx] = [t_min, t_max]
# now sift through resources and find buckets of pairs with same t_min
# or same t_max
bucket_min = dict()
bucket_max = dict()
bucket_none = list()
for idx in resources:
t_min = resources[idx][0]
t_max = resources[idx][1]
if t_min is None:
assert(t_max is None)
bucket_none.append(idx)
else:
if t_min not in bucket_min:
bucket_min[t_min] = list()
bucket_min[t_min].append(idx)
if t_max not in bucket_max:
bucket_max[t_max] = list()
bucket_max[t_max].append(idx)
boxes_warm = list()
boxes_drain = list()
boxes_idle = list()
# now cluster all lists and add the respective boxes
for t_min in bucket_min:
for r in cluster_resources(bucket_min[t_min]):
boxes_warm.append([p_min, t_min, r[0], r[1]])
for t_max in bucket_max:
for r in cluster_resources(bucket_max[t_max]):
boxes_drain.append([t_max, p_max, r[0], r[1]])
for r in cluster_resources(bucket_none):
boxes_idle.append([p_min, p_max, r[0], r[1]])
if 'warm' not in consumed: consumed['warm'] = dict()
if 'drain' not in consumed: consumed['drain'] = dict()
if 'idle' not in consumed: consumed['idle'] = dict()
consumed['warm'][pid] = boxes_warm
consumed['drain'][pid] = boxes_drain
consumed['idle'][pid] = boxes_idle
# pprint.pprint(consumed)
return consumed
# ------------------------------------------------------------------------------
#
def _get_nodes(pilot):
pnodes = pilot.cfg['resource_details']['rm_info']['node_list']
agents = pilot.cfg['resource_details']['rm_info'].get('agent_nodes', [])
anodes = list()
nodes = list()
for agent in agents:
anodes.append(agents[agent])
nodes = pnodes + anodes
return nodes, anodes, pnodes
# ------------------------------------------------------------------------------
#
def _get_pilot_consumption(pilot):
# Pilots consume resources in different ways:
#
# - the pilot needs to bootstrap and initialize before becoming active,
# i.e., before it can begin to manage the workload, and needs to
# terminate and clean up during shutdown;
# - the pilot may block one or more nodes or cores for it's own components
# (sub-agents), and those components are not available for workload
# execution
# - the pilot may perform operations while managing the workload.
#
# This method will compute the first two contributions and part of the 3rd.
# It will *not* account for those parts of the 3rd which are performed while
# specfic resources are blocked for the affected workload element (task)
# - those resource consumption is considered to be a consumption *of that
# task*, which allows us to compute tasks specific resource utilization
# overheads.
pid = pilot.uid
cpn = pilot.cfg['resource_details']['rm_info']['cores_per_node']
gpn = pilot.cfg['resource_details']['rm_info']['gpus_per_node']
ret = dict()
# Account for agent resources. Agents use full nodes, i.e., cores and GPUs
# We happen to know that agents use the first nodes in the allocation and
# their resource tuples thus start at index `0`, but for completeness we
# ensure that by inspecting the pilot cfg.
# Duration is for all of the pilot runtime. This is not precises really,
# since several bootstrapping phases happen before the agents exist - but we
# consider the nodes blocked for the sub-agents from the get-go.
t0, t1 = get_duration(pilot, PILOT_DURATIONS['agent']['total'])
boxes = list()
# Substract agent nodes from the nodelist, so that we correctly attribute
# other global pilot metrics to the remaining nodes.
nodes, anodes, pnodes = _get_nodes(pilot)
if anodes and t0 is not None:
for anode in anodes:
r0, r1 = get_node_index(nodes, anode, cpn, gpn)
boxes.append([t0, t1, r0, r1])
ret['agent'] = {pid: boxes}
# account for all other pilot metrics
for metric in PILOT_DURATIONS['consume']:
if metric == 'ignore':
continue
boxes = list()
t0, t1 = get_duration(pilot, PILOT_DURATIONS['consume'][metric])
if t0 is not None:
for node in pnodes:
r0, r1 = get_node_index(nodes, node, cpn, gpn)
boxes.append([t0, t1, r0, r1])
ret[metric] = {pid: boxes}
return ret
# ------------------------------------------------------------------------------
#
def _get_unit_consumption(session, unit):
# we need to know what pilot the unit ran on. If we don't find a designated
# pilot, no resources were consumed
uid = unit.uid
pid = unit.cfg['pilot']
if not pid:
return dict()
# get the pilot for inspection
pilot = session.get(uid=pid)
if isinstance(pilot, list):
assert(len(pilot) == 1)
pilot = pilot[0]
# FIXME: it is inefficient to query those values again and again
cpn = pilot.cfg['resource_details']['rm_info']['cores_per_node']
gpn = pilot.cfg['resource_details']['rm_info']['gpus_per_node']
nodes, _, _ = _get_nodes(pilot)
# Units consume only those resources they are scheduled on.
if 'slots' not in unit.cfg:
return dict()
snodes = unit.cfg['slots']['nodes']
resources = list()
for snode in snodes:
node = [snode['name'], snode['uid']]
r0, _ = get_node_index(nodes, node, cpn, gpn)
for core_map in snode['core_map']:
for core in core_map:
resources.append(r0 + core)
for gpu_map in snode['gpu_map']:
for gpu in gpu_map:
resources.append(r0 + cpn + gpu)
# find continuous stretched of resources to minimize number of boxes
resources = cluster_resources(resources)
# we heuristically switch between PRTE event traces and normal (fork) event
# traces
if pilot.cfg['task_launch_method'] == 'PRTE':
unit_durations = UNIT_DURATIONS_PRTE
else:
unit_durations = UNIT_DURATIONS_DEFAULT
if _debug:
print()
ret = dict()
for metric in unit_durations['consume']:
boxes = list()
t0, t1 = get_duration(unit, unit_durations['consume'][metric])
if t0 is not None:
if _debug:
print('%s: %-15s : %10.3f - %10.3f = %10.3f'
% (unit.uid, metric, t1, t0, t1 - t0))
for r in resources:
boxes.append([t0, t1, r[0], r[1]])
else:
if _debug:
print('%s: %-15s : -------------- ' % (unit.uid, metric))
dur = unit_durations['consume'][metric]
print(dur)
for e in dur:
if ru.STATE in e and ru.EVENT not in e:
e[ru.EVENT] = 'state'
t0 = unit.timestamps(event=dur[0])
t1 = unit.timestamps(event=dur[1])
print(t0)
print(t1)
for e in unit.events:
print('\t'.join([str(x) for x in e]))
# sys.exit()
ret[metric] = {uid: boxes}
return ret
# ------------------------------------------------------------------------------
|
import os, sys
import logging
import time
import shutil
logging.basicConfig(level=10)
logger = logging.getLogger(__name__)
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(parentdir)
logger.debug("parentdir: %s" % parentdir)
from _common_test import TestDummyResponse, DummyDiscogsAlbum
from discogstagger.tagger_config import TaggerConfig
from discogstagger.discogsalbum import DiscogsConnector, DiscogsAlbum
class TestDiscogsAlbum(object):
def setUp(self):
self.ogsrelid = "1448190"
# construct config with only default values
self.tagger_config = TaggerConfig(os.path.join(parentdir, "test/empty.conf"))
self.dummy_dir = "/tmp/dummy_test_dir"
if not os.path.exists(self.dummy_dir):
os.makedirs(self.dummy_dir)
def tearDown(self):
self.ogsrelid = None
self.tagger_config = None
if os.path.exists(self.dummy_dir):
shutil.rmtree(self.dummy_dir)
self.dummy_dir = None
def test_download_release(self):
"""
This is not really a test, just a showcase, that the rate-limiting works ;-)
you can call it using nosetest -s --nologcapture test/test_discogs.py
This call will show, that almost certainly some WARN-messages are printed
(except you haven an extremely fast pc).
"""
discogs_connection = DiscogsConnector(self.tagger_config)
start = time.time()
for x in range(1, 12):
discogs_connection.fetch_release(self.ogsrelid)
stop = time.time()
logger.debug('stop - start: %d' % (stop - start))
assert stop - start > 10
def test_download_image_wo_tokens(self):
"""
Test the downloads of images without a token, no download possible
Not really a valid test, just watching, that the auth stuff is working ;-)
"""
if os.path.exists(self.dummy_dir):
shutil.rmtree(self.dummy_dir)
discogs_connection = DiscogsConnector(self.tagger_config)
discogs_connection.fetch_image(os.path.join(self.dummy_dir, 'folder.jpg'), "http://api.discogs.com/image/R-3083-1167766285.jpeg")
assert not os.path.exists(os.path.join(self.dummy_dir, 'folder.jpg'))
def test_download_image_with_tokens(self):
"""
test the download of images with authentification
Because we would like to test this stuff on travis as well, we cannot store the tokens inside the
usual "env" variables (otherwise the test test_download_images_wo_tokens would not work), as well
as not in any config file. We do need to attache them from the travis environment to the tagger_config
for this test to work, you should set the below mentioned environment variables before running the tesst
with nosetests -s test/test_discogs.py
"""
if os.environ.has_key("TRAVIS_DISCOGS_CONSUMER_KEY"):
consumer_key = os.environ.get('TRAVIS_DISCOGS_CONSUMER_KEY')
if os.environ.has_key("TRAVIS_DISCOGS_CONSUMER_SECRET"):
consumer_secret = os.environ.get("TRAVIS_DISCOGS_CONSUMER_SECRET")
config = TaggerConfig(os.path.join(parentdir, "test/empty.conf"))
config.set("discogs", "consumer_key", consumer_key)
config.set("discogs", "consumer_secret", consumer_secret)
logger.debug('consumer_key %s' % consumer_key)
logger.debug('config %s' % config.get("discogs", "consumer_key"))
discogs_connection = DiscogsConnector(config)
discogs_connection.fetch_image(os.path.join(self.dummy_dir, 'folder.jpg'), "http://api.discogs.com/image/R-3083-1167766285.jpeg")
assert os.path.exists(os.path.join(self.dummy_dir, 'folder.jpg'))
os.remove(os.path.join(self.dummy_dir, 'folder.jpg'))
discogs_connection.fetch_image(os.path.join(self.dummy_dir, 'folder.jpg'), "http://api.discogs.com/image/R-367882-1193559996.jpeg")
assert os.path.exists(os.path.join(self.dummy_dir, 'folder.jpg'))
def test_year(self):
"""test the year property of the DiscogsAlbum
"""
dummy_response = TestDummyResponse(self.ogsrelid)
discogs_album = DummyDiscogsAlbum(dummy_response)
discogs_album.release.data["year"] = "2000"
assert discogs_album.year == "2000"
discogs_album.release.data["year"] = "xxxx"
assert discogs_album.year == "1900"
discogs_album.release.data["year"] = None
assert discogs_album.year == "1900"
discogs_album.release.data["year"] = 2000
assert discogs_album.year == "2000"
discogs_album.release.data["year"] = 20
assert discogs_album.year == "1900"
def test_construct_token_file(self):
"""test the construct_token_file in discogsConnector
"""
discogs_connection = DiscogsConnector(self.tagger_config)
filename = discogs_connection.construct_token_file()
assert filename.endswith('.token')
def test_read_token(self):
"""read the token file, if it exists
"""
config = TaggerConfig(os.path.join(parentdir, "test/empty.conf"))
config.set("discogs", "skip_auth", True)
discogs_connection = DiscogsConnector(self.tagger_config)
filename = discogs_connection.construct_token_file()
if os.path.exists(filename):
os.remove(filename)
access_token, access_secret = discogs_connection.read_token()
assert not access_token
assert not access_secret
with open(filename, 'w') as fh:
fh.write('{0},{1}'.format("token", "secret"))
access_token, access_secret = discogs_connection.read_token()
assert access_token
assert access_secret
test_download_release.needs_network = True
test_download_release.needs_authentication = True
test_download_image_wo_tokens.needs_network = True
test_download_image_with_tokens.needs_network = True
test_download_image_with_tokens.needs_authentication = True
|
# Generated by Django 2.1.1 on 2018-09-15 15:49
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('rooms', '0009_room_event'),
]
operations = [
migrations.RemoveField(
model_name='room',
name='event',
),
]
|
import sys
n, k = map(int, sys.stdin.readline().split(' '))
arr = list(range(1, n+1))
res = list()
idx = k-1
while len(arr) > 0:
res.append(arr.pop(idx))
idx += (k-1)
while len(arr) > 0 and idx >= len(arr):
idx -= len(arr)
ret = '<'
for i in res[:-1]:
ret += (str(i) + ', ')
ret += (str(res[-1]) + '>')
print(ret) |
"""imageadmin URL Configuration
"""
from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name='home'),
path('view_all_diva/', views.view_all_diva, name='view_all_diva'),
path('view_diva/<str:document_id>', views.view_diva, name='view_diva'),
path('view_ext_diva/<path:manifest_url>', views.view_ext_diva, name='view_ext_diva'),
path('view_ext_diva/', views.view_ext_diva, name='view_ext_diva'),
path('show_to_archive/', views.show_to_archive, name='show_to_archive'),
path('show_to_diva/', views.show_to_diva, name='show_to_diva'),
path('show_diva_redo/', views.show_diva_redo, name='show_diva_redo'),
path('login/', views.user_login, name='login'),
path('logout/', views.user_logout, name='logout'),
path('to_archive/', views.to_archive, name='toarchive'),
path('to_diva/', views.to_diva, name='todiva'),
path('diva_redo/', views.diva_redo, name='divaredo'),
path('view_task_result/<str:task_id>', views.view_task_result, name='view_task_result'),
]
|
#!/usr/bin/env python3
from wtforms import (Form, StringField, PasswordField, BooleanField)
from wtforms.validators import DataRequired
class LoginForm(Form):
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
remember_me = BooleanField('Remember Me')
|
"""
@file filter2D.py
@brief Sample code that shows how to implement your own linear filters by using filter2D function
"""
import sys
import cv2 as cv
import numpy as np
def main(argv):
window_name = 'filter2D Demo'
## [load]
imageName = argv[0] if len(argv) > 0 else 'lena.jpg'
# Loads an image
src = cv.imread(cv.samples.findFile(imageName), cv.IMREAD_COLOR)
# Check if image is loaded fine
if src is None:
print ('Error opening image!')
print ('Usage: filter2D.py [image_name -- default lena.jpg] \n')
return -1
## [load]
## [init_arguments]
# Initialize ddepth argument for the filter
ddepth = -1
## [init_arguments]
# Loop - Will filter the image with different kernel sizes each 0.5 seconds
ind = 0
while True:
## [update_kernel]
# Update kernel size for a normalized box filter
kernel_size = 3 + 2 * (ind % 5)
kernel = np.ones((kernel_size, kernel_size), dtype=np.float32)
kernel /= (kernel_size * kernel_size)
## [update_kernel]
## [apply_filter]
# Apply filter
dst = cv.filter2D(src, ddepth, kernel)
## [apply_filter]
cv.imshow(window_name, dst)
c = cv.waitKey(500)
if c == 27:
break
ind += 1
return 0
if __name__ == "__main__":
main(sys.argv[1:])
|
"""
This type stub file was generated by pyright.
"""
from .vtkImageReader import vtkImageReader
class vtkBMPReader(vtkImageReader):
"""
vtkBMPReader - read Windows BMP files
Superclass: vtkImageReader
vtkBMPReader is a source object that reads Windows BMP files. This
includes indexed and 24bit bitmaps Usually, all BMPs are converted to
24bit RGB, but BMPs may be output as 8bit images with a LookupTable
if the Allow8BitBMP flag is set.
BMPReader creates structured point datasets. The dimension of the
dataset depends upon the number of files read. Reading a single file
results in a 2D image, while reading more than one file results in a
3D volume.
To read a volume, files must be of the form "FileName.<number>"
(e.g., foo.bmp.0, foo.bmp.1, ...). You must also specify the image
range. This range specifies the beginning and ending files to read
(range can be any pair of non-negative numbers).
The default behavior is to read a single file. In this case, the form
of the file is simply "FileName" (e.g., foo.bmp).
@sa
vtkBMPWriter
"""
def Allow8BitBMPOff(self):
"""
V.Allow8BitBMPOff()
C++: virtual void Allow8BitBMPOff()
If this flag is set and the BMP reader encounters an 8bit file,
the data will be kept as unsigned chars and a lookuptable will be
exported
"""
...
def Allow8BitBMPOn(self):
"""
V.Allow8BitBMPOn()
C++: virtual void Allow8BitBMPOn()
If this flag is set and the BMP reader encounters an 8bit file,
the data will be kept as unsigned chars and a lookuptable will be
exported
"""
...
def CanReadFile(self, string):
"""
V.CanReadFile(string) -> int
C++: int CanReadFile(const char *fname) override;
Is the given file a BMP file?
"""
...
def GetAllow8BitBMP(self):
"""
V.GetAllow8BitBMP() -> int
C++: virtual vtkTypeBool GetAllow8BitBMP()
If this flag is set and the BMP reader encounters an 8bit file,
the data will be kept as unsigned chars and a lookuptable will be
exported
"""
...
def GetColors(self):
"""
V.GetColors() -> (int, ...)
C++: virtual unsigned char *GetColors()
Returns the color lut.
"""
...
def GetDepth(self):
"""
V.GetDepth() -> int
C++: virtual int GetDepth()
Returns the depth of the BMP, either 8 or 24.
"""
...
def GetDescriptiveName(self):
"""
V.GetDescriptiveName() -> string
C++: const char *GetDescriptiveName() override;
Return a descriptive name for the file format that might be
useful in a GUI.
"""
...
def GetFileExtensions(self):
"""
V.GetFileExtensions() -> string
C++: const char *GetFileExtensions() override;
Get the file extensions for this format. Returns a string with a
space separated list of extensions in the format .extension
"""
...
def GetLookupTable(self):
"""
V.GetLookupTable() -> vtkLookupTable
C++: virtual vtkLookupTable *GetLookupTable()
"""
...
def GetNumberOfGenerationsFromBase(self, string):
"""
V.GetNumberOfGenerationsFromBase(string) -> int
C++: vtkIdType GetNumberOfGenerationsFromBase(const char *type)
override;
Given a the name of a base class of this class type, return the
distance of inheritance between this class type and the named
class (how many generations of inheritance are there between this
class and the named class). If the named class is not in this
class's inheritance tree, return a negative value. Valid
responses will always be nonnegative. This method works in
combination with vtkTypeMacro found in vtkSetGet.h.
"""
...
def GetNumberOfGenerationsFromBaseType(self, string):
"""
V.GetNumberOfGenerationsFromBaseType(string) -> int
C++: static vtkIdType GetNumberOfGenerationsFromBaseType(
const char *type)
Given a the name of a base class of this class type, return the
distance of inheritance between this class type and the named
class (how many generations of inheritance are there between this
class and the named class). If the named class is not in this
class's inheritance tree, return a negative value. Valid
responses will always be nonnegative. This method works in
combination with vtkTypeMacro found in vtkSetGet.h.
"""
...
def IsA(self, string):
"""
V.IsA(string) -> int
C++: vtkTypeBool IsA(const char *type) override;
Return 1 if this class is the same type of (or a subclass of) the
named class. Returns 0 otherwise. This method works in
combination with vtkTypeMacro found in vtkSetGet.h.
"""
...
def IsTypeOf(self, string):
"""
V.IsTypeOf(string) -> int
C++: static vtkTypeBool IsTypeOf(const char *type)
Return 1 if this class type is the same type of (or a subclass
of) the named class. Returns 0 otherwise. This method works in
combination with vtkTypeMacro found in vtkSetGet.h.
"""
...
def NewInstance(self):
"""
V.NewInstance() -> vtkBMPReader
C++: vtkBMPReader *NewInstance()
"""
...
def SafeDownCast(self, vtkObjectBase):
"""
V.SafeDownCast(vtkObjectBase) -> vtkBMPReader
C++: static vtkBMPReader *SafeDownCast(vtkObjectBase *o)
"""
...
def SetAllow8BitBMP(self, p_int):
"""
V.SetAllow8BitBMP(int)
C++: virtual void SetAllow8BitBMP(vtkTypeBool _arg)
If this flag is set and the BMP reader encounters an 8bit file,
the data will be kept as unsigned chars and a lookuptable will be
exported
"""
...
def __delattr__(self, *args, **kwargs):
""" Implement delattr(self, name). """
...
def __getattribute__(self, *args, **kwargs):
""" Return getattr(self, name). """
...
def __init__(self, *args, **kwargs) -> None:
...
@staticmethod
def __new__(*args, **kwargs):
""" Create and return a new object. See help(type) for accurate signature. """
...
def __repr__(self, *args, **kwargs):
""" Return repr(self). """
...
def __setattr__(self, *args, **kwargs):
""" Implement setattr(self, name, value). """
...
def __str__(self, *args, **kwargs) -> str:
""" Return str(self). """
...
__this__ = ...
__dict__ = ...
__vtkname__ = ...
|
#!/usr/bin/env python3
# Documentation: https://docs.python.org/3/library/socket.html
# from MasterMenu import Menu
from MasterMenu import Menu
import socket
import json
import sys
import struct
sys.path.append("..")
class MasterPi:
"""
This class shows the connection through socket programming between
master pi and reception pi.
Attributes
----------
HOST
IP address of the Host device.
PORT
Port to listen on (non-privileged ports are > 1023).
ADDRESS
Address is the combination of the host ip and port number.
Methods
-------
Main()
Lets master py act as a listener for the reception pi.
Maintains connectivity between master pi and reception pi using
particular sockets.
"""
# Empty string means to listen on all IP's on the machine, also works with IPv6.
HOST = ""
# Note "0.0.0.0" also works but only with IPv4.
PORT = 65000 # Port to listen on (non-privileged ports are > 1023).
ADDRESS = (HOST, PORT)
def main(self):
menu = Menu()
socket_utils = Socket_utils()
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(self.ADDRESS)
s.listen()
print("Listening on {}...".format(self.ADDRESS))
while True:
print("Waiting for Reception Pi...")
conn, addr = s.accept()
with conn:
print("Connected to {}".format(addr))
print()
user = socket_utils.recvJson(conn)
menu.runMenu(user)
socket_utils.sendJson(conn, {"logout": True})
class Socket_utils:
"""
This class shows the connection through socket programming between
master pi and reception pi.
Methods
-------
sendJson()
Lets master py and reception py act through particular
socket and pass object between then using json.
recvJson()
Receives objects through socket.
"""
def sendJson(self,socket, object):
jsonString = json.dumps(object)
data = jsonString.encode("utf-8")
jsonLength = struct.pack("!i", len(data))
socket.sendall(jsonLength)
socket.sendall(data)
def recvJson(self,socket):
buffer = socket.recv(4)
jsonLength = struct.unpack("!i", buffer)[0]
# Reference: https://stackoverflow.com/a/15964489/9798310
buffer = bytearray(jsonLength)
view = memoryview(buffer)
while jsonLength:
nbytes = socket.recv_into(view, jsonLength)
view = view[nbytes:]
jsonLength -= nbytes
jsonString = buffer.decode("utf-8")
return json.loads(jsonString)
# Execute program.
if __name__ == "__main__":
MasterPi().main()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 16 20:34:25 2019
@author: aleksandr
"""
import graphics as gr
window = gr.GraphWin('wondow', 300, 300)
def fractal_rectangle(A, B, C, D, deep=10):
if deep < 1:
return
gr.Line(gr.Point(*A), gr.Point(*B)).draw(window)
gr.Line(gr.Point(*B), gr.Point(*C)).draw(window)
gr.Line(gr.Point(*C), gr.Point(*D)).draw(window)
gr.Line(gr.Point(*D), gr.Point(*A)).draw(window)
fractal_rectangle((10, 10), (50, 10), (50, 50), (10, 50)) |
#!/usr/bin/env python
"""
Demonstrate how to do two plots on the same axes with different left
right scales.
The trick is to use *2 different axes*. Turn the axes rectangular
frame off on the 2nd axes to keep it from obscuring the first.
Manually set the tick locs and labels as desired. You can use
separate matplotlib.ticker formatters and locators as desired since
the two axes are independent.
This is achieved in the following example by calling the Axes.twinx()
method, which performs this work. See the source of twinx() in
axes.py for an example of how to do it for different x scales. (Hint:
use the xaxis instance and call tick_bottom and tick_top in place of
tick_left and tick_right.)
The twinx and twiny methods are also exposed as pyplot functions.
"""
import numpy as np
import matplotlib.pyplot as plt
fig = plt.figure()
ax1 = fig.add_subplot(111)
t = np.arange(0.01, 10.0, 0.01)
s1 = np.exp(t)
ax1.plot(t, s1, 'b-')
ax1.set_xlabel('time (s)')
# Make the y-axis label and tick labels match the line color.
ax1.set_ylabel('exp', color='b')
for tl in ax1.get_yticklabels():
tl.set_color('b')
ax2 = ax1.twinx()
s2 = np.sin(2*np.pi*t)
ax2.plot(t, s2, 'r.')
ax2.set_ylabel('sin', color='r')
for tl in ax2.get_yticklabels():
tl.set_color('r')
plt.show()
|
# Adafruit NeoPixel library port to the rpi_ws281x library.
# Author: Tony DiCola (tony@tonydicola.com), Jeremy Garff (jer@jers.net)
import atexit
import numpy as np
import _rpi_ws281x as ws
def Color(red, green, blue, white=0):
"""Convert the provided red, green, blue color to a 24-bit color value.
Each color component should be a value 0-255 where 0 is the lowest intensity
and 255 is the highest intensity.
"""
return (white << 24) | (green << 16)| (red << 8) | blue
class _LED_Data(object):
"""Wrapper class which makes a SWIG LED color data array look and feel like
a Python list of integers.
"""
def __init__(self, channel, size):
self.size = size
if isinstance(size, tuple):
self.width, self.height = self.size
self.channel = channel
def __getitem__(self, index):
def get_pixel(x, y):
pos = self.map_pixel(x, y)
return ws.ws2811_led_get(self.channel, pos)
if isinstance(index, slice):
try:
# TODO: Only full slice available
rows = []
for x in range(self.width):
col = []
for y in range(self.height):
col.append(get_pixel(x, y))
rows.append(col)
return np.array(rows)
except (Exception,) as error:
print('LED wrapper failed with {}'.format(error))
if isinstance(index, tuple):
try:
# TODO: Only single index available
x, y = index
return get_pixel(x, y)
except (Exception,) as error:
print('LED wrapper failed with {}'.format(error))
def __setitem__(self, index, value):
def set_pixel(x, y, value):
pos = self.map_pixel(x, y)
ws.ws2811_led_set(self.channel, pos, int(value))
if isinstance(index, slice):
try:
# TODO: Only full slice available
for x in range(self.width):
for y in range(self.height):
set_pixel(x, y, value[x, y])
except (Exception,) as error:
print('LED wrapper failed with {}'.format(error))
if isinstance(index, tuple):
try:
# TODO: Only single index available
x, y = index
set_pixel(x, y, value)
except (Exception,) as error:
print('LED wrapper failed with {}'.format(error))
def map_pixel(self, x, y):
"""
Convert coordinates to stream for led strip
Note: the current C code treats the led as a strip and not a matrix
"""
pos = (x * self.height) - 1
if pos < 0:
pos = 0
if x % 2:
pos += self.height - y
elif x == 0:
pos += y
else:
pos += y + 1
if pos < 0:
pos = 0
return pos
class Adafruit_NeoPixel(object):
def __init__(self, num, pin, freq_hz=800000, dma=10, invert=False,
brightness=255, channel=0, strip_type=ws.WS2811_STRIP_RGB):
"""Class to represent a NeoPixel/WS281x LED display. Num should be the
number of pixels in the display, and pin should be the GPIO pin connected
to the display signal line (must be a PWM pin like 18!). Optional
parameters are freq, the frequency of the display signal in hertz (default
800khz), dma, the DMA channel to use (default 10), invert, a boolean
specifying if the signal line should be inverted (default False), and
channel, the PWM channel to use (defaults to 0).
"""
# Create ws2811_t structure and fill in parameters.
if isinstance(num, tuple):
self.size = num
self.width = num[0]
self.height = num[1]
num = num[0] * num[1]
self._leds = ws.new_ws2811_t()
# Initialize the channels to zero
for channum in range(2):
chan = ws.ws2811_channel_get(self._leds, channum)
ws.ws2811_channel_t_count_set(chan, 0)
ws.ws2811_channel_t_gpionum_set(chan, 0)
ws.ws2811_channel_t_invert_set(chan, 0)
ws.ws2811_channel_t_brightness_set(chan, 0)
# Initialize the channel in use
self._channel = ws.ws2811_channel_get(self._leds, channel)
ws.ws2811_channel_t_count_set(self._channel, num)
ws.ws2811_channel_t_gpionum_set(self._channel, pin)
ws.ws2811_channel_t_invert_set(self._channel, 0 if not invert else 1)
ws.ws2811_channel_t_brightness_set(self._channel, brightness)
ws.ws2811_channel_t_strip_type_set(self._channel, strip_type)
# Initialize the controller
ws.ws2811_t_freq_set(self._leds, freq_hz)
ws.ws2811_t_dmanum_set(self._leds, dma)
# Grab the led data array.
self._led_data = _LED_Data(self._channel, self.size)
# Substitute for __del__, traps an exit condition and cleans up properly
atexit.register(self._cleanup)
def _cleanup(self):
# Clean up memory used by the library when not needed anymore.
if self._leds is not None:
ws.delete_ws2811_t(self._leds)
self._leds = None
self._channel = None
def begin(self):
"""Initialize library, must be called once before other functions are
called.
"""
resp = ws.ws2811_init(self._leds)
if resp != ws.WS2811_SUCCESS:
message = ws.ws2811_get_return_t_str(resp)
raise RuntimeError('ws2811_init failed with code {0} ({1})'.format(resp, message))
def show(self):
"""Update the display with the data from the LED buffer."""
resp = ws.ws2811_render(self._leds)
if resp != ws.WS2811_SUCCESS:
message = ws.ws2811_get_return_t_str(resp)
raise RuntimeError('ws2811_render failed with code {0} ({1})'.format(resp, message))
def setPixelColor(self, n, color):
"""Set LED at position n to the provided 24-bit color value (in RGB order).
"""
self._led_data[n] = color
def setPixelColorRGB(self, n, red, green, blue, white = 0):
"""Set LED at position n to the provided red, green, and blue color.
Each color component should be a value from 0 to 255 (where 0 is the
lowest intensity and 255 is the highest intensity).
"""
self.setPixelColor(n, Color(red, green, blue, white))
def setBrightness(self, brightness):
"""Scale each LED in the buffer by the provided brightness. A brightness
of 0 is the darkest and 255 is the brightest.
"""
ws.ws2811_channel_t_brightness_set(self._channel, brightness)
def getBrightness(self):
"""Get the brightness value for each LED in the buffer. A brightness
of 0 is the darkest and 255 is the brightest.
"""
return ws.ws2811_channel_t_brightness_get(self._channel)
def getPixels(self):
"""Return an object which allows access to the LED display data as if
it were a sequence of 24-bit RGB values.
"""
return self._led_data
def numPixels(self):
"""Return the number of pixels in the display."""
return ws.ws2811_channel_t_count_get(self._channel)
def getPixelColor(self, n):
"""Get the 24-bit RGB color value for the LED at position n."""
return self._led_data[n]
def clear(self):
self._led_data[:] = np.zeros(self.size)
self.show()
|
#!/usr/bin/python
import sys
def making_change(amount, denominations):
total = 0
if amount == 0:
return 1
if not denominations and amount > 0:
return 0
if amount < min(denominations):
return 0
# for every denomination less than the amount:
# - determine how many of that denomination could be used (0 through amount // denomination)
# - for every possible count of that denomination:
# - find the number of ways of making change for remaining amount without that denomination
# end cases:
# amount zero is passed because that count of a denomination sufficed: return 1
# amount is less than the smallest denomination: return 0
# no denominations are passed but value remains: return 0
denominations = sorted(denominations)[::-1]
remaining_denoms = denominations.copy()
for denom in denominations:
remaining_denoms.remove(denom)
if amount >= denom:
maximum = amount // denom
for count in range(1, maximum+1):
remainder = amount - (denom * count)
total += making_change(remainder, remaining_denoms)
return total
making_change(5, [1,5,10])
if __name__ == "__main__":
# Test our your implementation from the command line
# with `python making_change.py [amount]` with different amounts
if len(sys.argv) > 1:
denominations = [1, 5, 10, 25, 50]
amount = int(sys.argv[1])
print("There are {ways} ways to make {amount} cents.".format(ways=making_change(amount, denominations), amount=amount))
else:
print("Usage: making_change.py [amount]") |
# Generated by Django 3.0.1 on 2020-01-24 16:04
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='LightBulb',
fields=[
('id', models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),
('title', models.CharField(max_length=64)),
('address', models.CharField(blank=True, default='', max_length=64, null=True)),
('port_number', models.CharField(default='-', max_length=4)),
('state', models.CharField(default='-', max_length=1000)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='lightbulb', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='LightBulbSettings',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('reset', models.IntegerField(blank=True, choices=[(1, 'Reset')], null=True)),
('default_state', models.CharField(choices=[('off', 'Off'), ('on', 'On')], default='off', max_length=8)),
('auto_off', models.IntegerField(blank=True, default=0, null=True)),
('state', models.CharField(default='-', max_length=1000)),
('schedule', models.BooleanField(default=False)),
('schedule_rules', models.CharField(blank=True, default='', help_text='Format: 0900-on,0930-off', max_length=1000, null=True)),
('lightbulb', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='lightbulb_settings', to='lights.LightBulb')),
],
),
migrations.CreateModel(
name='LightBulbRelay',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('set_state', models.CharField(choices=[('off', 'Off'), ('on', 'On'), ('toggle', 'Toggle')], default='off', max_length=8)),
('timer', models.IntegerField(blank=True, default=0, null=True)),
('state', models.CharField(default='-', max_length=1000)),
('lightbulb', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='lightbulb_relay', to='lights.LightBulb')),
],
),
migrations.CreateModel(
name='LedLight',
fields=[
('id', models.UUIDField(default=uuid.uuid4, primary_key=True, serialize=False)),
('title', models.CharField(max_length=64)),
('address', models.CharField(blank=True, default='', max_length=64, null=True)),
('port_number', models.CharField(default='-', max_length=4)),
('state', models.CharField(choices=[('-', 'inactive'), ('0', 'Off'), ('1', 'On'), ('2', 'blink')], default='-', max_length=1)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='ledlight', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['state', 'owner'],
},
),
]
|
"""
http://www.diveintopython3.net/xml.html
"""
from xml.dom import minidom
import os
xmlfile=os.path.dirname(os.path.abspath(__file__))+'/staff.xml'
doc = minidom.parse(xmlfile) #~ json.dumps(file,indent=)
# doc.getElementsByTagName returns NodeList
name = doc.getElementsByTagName("name")[0]
#print("Node Name : %s" % name.nodeName) #1print...format
print("Node Name : ",name.nodeName)#2
print("Node Name : {}".format(name.nodeName))#3
print(f"Node Name : {name.nodeName}")#4
print("Name Data :",name.firstChild.data)
print("Name FirstChild :",name.firstChild.data)
staffs = doc.getElementsByTagName("staff")
for staff in staffs:
sid = staff.getAttribute("id")
nickname = staff.getElementsByTagName("nickname")[0]
salary = staff.getElementsByTagName("salary")[0]
print("id:%s, nickname:%s, salary:%s" %
(sid, nickname.firstChild.data, salary.firstChild.data))
|
'''
1277. Count Square Submatrices with All Ones
Medium
2575
40
Add to List
Share
Given a m * n matrix of ones and zeros, return how many square submatrices have all ones.
Example 1:
Input: matrix =
[
[0,1,1,1],
[1,1,1,1],
[0,1,1,1]
]
Output: 15
Explanation:
There are 10 squares of side 1.
There are 4 squares of side 2.
There is 1 square of side 3.
Total number of squares = 10 + 4 + 1 = 15.
Example 2:
Input: matrix =
[
[1,0,1],
[1,1,0],
[1,1,0]
]
Output: 7
Explanation:
There are 6 squares of side 1.
There is 1 square of side 2.
Total number of squares = 6 + 1 = 7.
Constraints:
1 <= arr.length <= 300
1 <= arr[0].length <= 300
0 <= arr[i][j] <= 1
'''
class Solution:
def countSquares(self, matrix: List[List[int]]) -> int:
for i in range(1,len(matrix)):
for j in range(1,len(matrix[0])):
if matrix[i][j]:
matrix[i][j] += min(matrix[i-1][j-1],matrix[i-1][j],matrix[i][j-1])
res = sum([sum(el) for el in matrix])
return res
|
# Generated by Django 2.0.6 on 2018-07-15 15:41
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('course', '0007_auto_20180715_2337'),
]
operations = [
migrations.RenameField(
model_name='course',
old_name='or_banner',
new_name='is_banner',
),
]
|
txt = "H\te\tl\tl\to"
print(txt)
print(txt.expandtabs())
print(txt.expandtabs(2))
print(txt.expandtabs(4))
print(txt.expandtabs(8))
print(txt.expandtabs(10))
txt = "H\te\tl\tl\to"
x = txt.expandtabs(2)
print(x)
txt = "Hello, welcome to my world."
x = txt.endswith("my world.", 5, 11)
print(x)
txt = "Hello, welcome to my world."
x = txt.endswith("my world.")
print(x)
txt = "Hello, welcome to my world."
x = txt.endswith(".")
print(x)
txt = "My name is Ståle"
print(txt.encode(encoding="ascii",errors="backslashreplace"))
print(txt.encode(encoding="ascii",errors="ignore"))
print(txt.encode(encoding="ascii",errors="namereplace"))
print(txt.encode(encoding="ascii",errors="replace"))
print(txt.encode(encoding="ascii",errors="xmlcharrefreplace"))
#print(txt.encode(encoding="ascii",errors="strict"))
txt = "My name is Ståle"
x = txt.encode()
print(x)
txt = "I love apples, apple are my favorite fruit"
x = txt.count("apple", 10, 24)
print(x)
txt = "I love apples, apple are my favorite fruit"
x = txt.count("apple")
print(x)
txt = "banana"
x = txt.center(20, "O")
print(x)
txt = "banana"
x = txt.center(20)
print(x)
txt = "Hello, And Welcome To My World!"
x = txt.casefold()
print(x)
txt = "hello, and welcome to my world."
x = txt.capitalize()
print (x)
#A backslash followed by an 'x' and a hex number represents a hex value:
txt = "\x48\x65\x6c\x6c\x6f"
print(txt)
#A backslash followed by three integers will result in a octal value:
txt = "\110\145\154\154\157"
print(txt)
#This example erases one character (backspace):
txt = "Hello \bWorld!"
print(txt)
txt = "Hello\tWorld!"
print(txt)
txt = "Hello\rWorld!"
print(txt)
txt = "Hello\nWorld!"
print(txt)
txt = "This will insert one \\ (backslash)."
print(txt)
txt = 'It\'s alright.'
print(txt)
txt = "We are the so-called \"Vikings\" from the north."
print(txt)
quantity = 3
itemno = 567
price = 49.95
myorder = "I want to pay {2} dollars for {0} pieces of item {1}."
print(myorder.format(quantity, itemno, price))
quantity = 3
itemno = 567
price = 49.95
myorder = "I want {} pieces of item {} for {} dollars."
print(myorder.format(quantity, itemno, price))
age = 36
txt = "My name is John, and I am {}"
print(txt.format(age))
a = "Hello"
b = "World"
c = a + b
print(c)
txt = "The rain in Spain stays mainly in the plain"
x = "ain" not in txt
print(x)
txt = "The rain in Spain stays mainly in the plain"
x = "ain" in txt
print(x)
a = "Hello, World!"
print(a.split(",")) # returns ['Hello', ' World!']
a = "Hello, World!"
print(a.replace("H", "J"))
a = "Hello, World!"
print(a.upper())
a = "Hello, World!"
print(a.lower())
a = " Hello, World! "
print(a.strip()) # returns "Hello, World!"
a = "Hello, World!"
print(len(a))
b = "Hello, World!"
print(b[-5:-2])
b = "Hello, World!"
print(b[:5])
b = "Hello, World!"
print(b[1:])
b = "Hello, World!"
print(b[2:5])
a = "Hello, World!"
print(a[1])
a = '''Lorem ipsum dolor sit amet,
consectetur adipiscing elit,
sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua.'''
print(a)
a = """Lorem ipsum dolor sit amet,
consectetur adipiscing elit,
sed do eiusmod tempor incididunt
ut labore et dolore magna aliqua."""
print(a)
a = "Hello"
print(a)
#You can use double or single quotes:
print("Hello")
print('Hello')
|
with open('words.txt','r') as file :
words = file.readlines()
for i in words:
a = i.strip('\n')
if a == a[::-1]:
print(a)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.