seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
15731112153 | import cv2
from cv2 import *
import numpy as np
from .cv2pynq import *
from pynq.lib.video import *
__version__ = 0.3
c = cv2pynq()
video = c.ol.video #cv2pynq uses the pynq video library and the Pynq-Z2 video subsystem
def Sobel(src, ddepth, dx, dy, dst=None, ksize=3, scale=1, delta=0, borderType=cv2.BORDER_DEFAULT):
"""dst = cv.Sobel( src, ddepth, dx, dy[, dst[, ksize[, scale[, delta[, borderType]]]]] )
Executes the Sobel operator on hardware if input parameters fit to hardware constraints.
Otherwise the OpenCV Sobel function is called."""
if (ksize == 3 or ksize == 5) and (scale == 1) and (delta == 0) and (borderType == cv2.BORDER_DEFAULT) :
if (src.dtype == np.uint8) and (src.ndim == 2) :
if (src.shape[0] <= cv2pynq.MAX_HEIGHT) and (src.shape[0] > 0) and (src.shape[1] <= cv2pynq.MAX_WIDTH) and (src.shape[1] > 0) :
if ((ddepth == -1) and (dx == 1) and (dy == 0)) or ((ddepth == -1) and (dx == 0) and (dy == 1)) :
return c.Sobel(src, ddepth, dx, dy, dst, ksize)
return cv2.Sobel(src, ddepth, dx, dy, dst, ksize, scale, delta, borderType)
def Laplacian(src, ddepth, dst=None, ksize=1, scale=1, delta=0, borderType=cv2.BORDER_DEFAULT):
"""dst = cv.Laplacian( src, ddepth[, dst[, ksize[, scale[, delta[, borderType]]]]] )
Executes the Laplacian operator on hardware if input parameters fit to hardware constraints.
Otherwise the OpenCV Laplacian function is called."""
if (ksize == 1 or ksize ==3 or ksize == 5) and (scale == 1) and (delta == 0) and (borderType == cv2.BORDER_DEFAULT) :
if (src.dtype == np.uint8) and (src.ndim == 2) :
if (src.shape[0] <= cv2pynq.MAX_HEIGHT) and (src.shape[0] > 0) and (src.shape[1] <= cv2pynq.MAX_WIDTH) and (src.shape[1] > 0) :
if (ddepth == -1) :
return c.Laplacian(src, ddepth, dst, ksize)
return cv2.Laplacian(src, ddepth, dst, ksize, scale, delta, borderType)
def close():
'''this function should be called after using the cv2pynq library.
It cleans up the internal state and frees the used CMA-buffers.
'''
c.close() | JinChen-tw/PYNQ-Z2 | E_Elements_labs/cv2pynq/__init__.py | __init__.py | py | 2,147 | python | en | code | 9 | github-code | 13 |
21800376232 | # Time Limit per Test: 1 seconds
# Memory Limit per Test: 256 megabytes
# Using: PyPy 3-64
# Solution Link: https://codeforces.com/contest/1775/submission/189010130
'''
Question Link: https://codeforces.com/contest/1775/problem/C
Petya and his friend, robot Petya++, like to solve exciting math problems.
One day Petya++ came up with the numbers 𝑛 and 𝑥 and wrote the following equality on the board:
𝑛 & (𝑛+1) & … & 𝑚=𝑥,
where & denotes the bitwise AND operation. Then he suggested his friend Petya find such a minimal 𝑚 (𝑚≥𝑛) that the equality on the board holds.
Unfortunately, Petya couldn't solve this problem in his head and decided to ask for computer help. He quickly wrote a program and found the answer.
Can you solve this difficult problem?
'''
'''
Sample Input:
5
10 8
10 10
10 42
20 16
1000000000000000000 0
Sample Output:
12
10
-1
24
1152921504606846976
'''
import sys
input = sys.stdin.readline
rounds = int(input())
for ii in range(rounds):
out=0
start,goal=map(int,input().split())
if goal==start:
print(start)
elif goal>start:
print(-1)
else:
num1=bin(start)[2:]
num2=bin(goal)[2:]
if len(num1)>len(num2):
num2='0'*(len(num1)-len(num2))+num2
first=False
diff=-1
same=-1
for l in range(len(num1)):
if num1[l]=='1' and num2[l]=='0':
if first==False:
first=True
diff=l
break
if num1[l]=='1' and num2[l]=='1':
same=l
nope=False
for d in range(diff,len(num2)):
if num2[d]=='1':
print(-1)
nope=True
break
if not nope and diff==0:
print(2**len(num1))
continue
if not nope and same==diff-1 and same>-1:
print(-1)
nope=True
continue
if not nope:
out=0
for y in range(diff):
if num1[y]=='1':
out+=(2**(len(num1)-y-1))
out+=(2**(len(num1)-diff))
print(out)
| Squirtleee/AlgoPractice | Solutions/Interesting Sequence.py | Interesting Sequence.py | py | 1,942 | python | en | code | 0 | github-code | 13 |
12156328110 | import pymongo
import datetime
import customers
def getAllFilms(client):
try:
movies_collection = client["rentals"]
movies = movies_collection.find().sort("_id",1)
retList = []
for m in movies:
m_obj = {"Title" : m["Title"], "Category" : m["Category"], "id" : m["_id"],
"Rating" : m["Rating"], "Description" : m["Description"], "Rental Duration": m["Rental Duration"]}
retList.append(m_obj)
return(retList, 200)
except Exception as e:
print(e)
return("Could not retrieve film list", 500)
def getFilmWithID(client, filmID):
movies_collection = client["rentals"]
try:
movie = movies_collection.find_one({"_id": int(filmID)})
if(movie is None):
return("Movie not found", 404)
renters = customers._getRentersForFilmTitle(client, movie["Title"])
m_obj = {"Title" : movie["Title"], "Category" : movie["Category"], "id" : movie["_id"],
"Rating" : movie["Rating"], "Description" : movie["Description"],
"Rental Duration" : movie["Rental Duration"]}
return({"info" : m_obj, "renters": renters}, 200)
except Exception as e:
print(e)
return("Could not retrieve film data", 500)
| Ekhemlin/flask_mongo_assesment | backend/films.py | films.py | py | 1,283 | python | en | code | 0 | github-code | 13 |
43594177885 | import requests
from pprint import pprint
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s')
log = logging.getLogger('root')
# CONSTANT
COINDESK_URL = 'https://api.coindesk.com/v1/bpi/currentprice.json'
def main():
bitcoin_num = get_user_bitcoin_value()
conversion_value = get_conversion_to_dollars(bitcoin_num)
display(bitcoin_num, conversion_value)
def display(bitcoin_num, conversion_value):
if conversion_value is not None:
print(f'With your {bitcoin_num:.2f} of bitcoin, you will be able to get {conversion_value}USD.')
else:
print('Error in converting. Please try again later')
def get_user_bitcoin_value():
while True:
try:
bitcoin_num = float(input('How many bitcoin do you have?\t'))
if bitcoin_num <= 0:
raise ValueError('Invalid input. Please enter value that is more than 0')
return bitcoin_num
except:
print('Invalid input. Please enter data in decimal form only.')
def get_conversion_rate():
data = get_current_exchange_rate()
if data is not None:
return data['bpi']['USD']['rate_float']
else:
return None
def get_conversion_to_dollars(bitcoin_num):
conversion_rate = get_conversion_rate()
if conversion_rate is not None:
return bitcoin_num * conversion_rate
else:
return None
def get_current_exchange_rate():
try:
response = requests.get(COINDESK_URL)
response.raise_for_status()
data = response.json()
return data
except Exception as e:
log.debug(f'Error occurred while requesting data from \'{COINDESK_URL}\'. More detail: {e}')
return None
if __name__ == '__main__':
main()
| xd3262nd/lab-8 | bitcoin.py | bitcoin.py | py | 1,802 | python | en | code | 0 | github-code | 13 |
34809869814 | import codecs
import json
import cv2
import tensorflow as tf
import matplotlib.pyplot as plt
IMAGE_SIZE = 64
feature_description = {
'label': tf.io.FixedLenFeature([], tf.int64, default_value=-1),
'data': tf.io.FixedLenFeature([], tf.string)
}
def json_labels_read_from_file(file_path):
with codecs.open(file_path, "r", "utf-8") as fp:
load_dict = json.load(fp)
print("读取出的数据为:{}".format(load_dict))
return load_dict
def read_and_decode(example_string):
'''
从TFrecord格式文件中读取数据
'''
feature_dict = tf.io.parse_single_example(example_string, feature_description)
image = feature_dict['data']
label = feature_dict['label']
image = tf.io.decode_raw(image, tf.uint8)
print(image.shape)
image = tf.reshape(image, [IMAGE_SIZE, IMAGE_SIZE, 3])
print(image.shape)
image = tf.cast(image, tf.float32) * (1. / 255) - 0.5 # 归一化
label = tf.cast(label, dtype='int32')
return image, label
def load_dataset(file_name):
dataset = tf.data.TFRecordDataset(file_name)
dataset = dataset.map(read_and_decode) # 解析数据
return dataset
# 将输入的图像大小统一
def resize_image(image, height=IMAGE_SIZE, width=IMAGE_SIZE):
top, bottom, left, right = 0, 0, 0, 0
# 获取图像大小
h, w, _ = image.shape
# 对于长宽不一的,取最大值
longest_edge = max(h, w)
# 计算较短的边需要加多少像素
if h < longest_edge:
dh = longest_edge - h
top = dh // 2
bottom = dh - top
elif w < longest_edge:
dw = longest_edge - w
left = dw // 2
right = dw - left
else:
pass
# 定义填充颜色
BLACK = [0, 0, 0]
# 给图像增加边界,使图片长、宽等长,cv2.BORDER_CONSTANT指定边界颜色由value指定
constant_image = cv2.copyMakeBorder(image, top, bottom, left, right, cv2.BORDER_CONSTANT, value=BLACK)
return cv2.resize(constant_image, (height, width))
def check_dataset(dataset):
dataset.test_images[0]
print(dataset.test_labels[0])
for i in range(16):
print(i // 4)
plt.subplot(4, 4, i + 1)
dataset.test_images[i] = (dataset.test_images[i] + 0.5) * 255
image = tf.cast(dataset.test_images[i], tf.uint8)
label = dataset.test_labels[i]
print(label)
label = label.tolist()
label_index = label.index(max(label))
print(label_index)
name = classes.get(str(label_index))
plt.title(name)
plt.imshow(image)
plt.show()
if __name__ == '__main__':
print(tf.__version__)
classes = json_labels_read_from_file('dataset/labels.json')
print(classes.get('0'))
dataset = load_dataset('dataset/train.tfrecords')
#for image, label in dataset:
#print(image.numpy(), label.numpy())
#print(label.numpy())
| 1984xunhuan/face_classify | load_dataset.py | load_dataset.py | py | 2,912 | python | en | code | 3 | github-code | 13 |
35451305782 | # -*- coding:utf-8 -*-
# REG_PATTERN
REG_PATTERN = {
'video': r'/video/av(\d+)',
'article': r'/read/cv(\d+)',
'user': r'/(\d+)/',
'tag': r'/tag/(\d+)',
'online': r'/x/web-interface/online',
'reply': r'/reply'
}
# Bilibili category maps
CATEGORY_MAP = {
"douga": {
"name": "动画",
"code": "douga",
"id": 1
},
"anime": {
"name": "番剧",
"code": "anime",
"id": 2
},
"guochuang": {
"name": "国创",
"code": "guochuang",
"id": 3
},
"music": {
"name": "音乐",
"code": "music",
"id": 4
},
"dance": {
"name": "跳舞",
"code": "dance",
"id": 5
},
"game": {
"name": "游戏",
"code": "game",
"id": 6
},
"technology": {
"name": "科技",
"code": "technology",
"id": 7
},
"life": {
"name": "生活",
"code": "life",
"id": 8
},
"kichiku": {
"name": "鬼畜",
"code": "kichiku",
"id": 9
},
"fashion": {
"name": "时尚",
"code": "fashion",
"id": 10
},
"ad": {
"name": "广告",
"code": "ad",
"id": 11
},
"ent": {
"name": "娱乐",
"code": "ent",
"id": 12
},
"cinephile": {
"name": "影视",
"code": "cinephile",
"id": 13
},
"documentary": {
"name": "纪录片",
"code": "documentary",
"id": 14
},
"movie": {
"name": "电影",
"code": "movie",
"id": 15
}
} | SatoKoi/BilibiliSpider | BilibiliSpider/map/defaults.py | defaults.py | py | 1,654 | python | en | code | 15 | github-code | 13 |
32293934235 | import os
import sys
import shutil
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import init_ops
from tensorflow.contrib.layers.python.layers import regularizers
module_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), "..")
if module_path not in sys.path:
sys.path.append(module_path)
from datasets.batch_generator import datasets
slim = tf.contrib.slim
tf.reset_default_graph()
trunc_normal = lambda stddev: init_ops.truncated_normal_initializer(0.0, stddev)
# Contants
image_channels = 3
time_frames_to_consider = 4
time_frames_to_predict = 4
interval=4 # frames to jump !
heigth_train= 64
width_train= 64
custom_test_size=[160,240]
heigth_test, width_test = custom_test_size
# regularizer !
l2_val = 0.00005
# Adam optimizer !
adam_learning_rate = 0.0004
# Tensorboard images to show
batch_size = 8
number_of_images_to_show = 4
assert number_of_images_to_show <= batch_size, "images to show should be less !"
timesteps=16
file_path = os.path.abspath(os.path.dirname(__file__))
data_folder = os.path.join(file_path, "../../data/")
log_dir_file_path = os.path.join(file_path, "../../logs/")
model_save_file_path = os.path.join(file_path, "../../checkpoint/")
output_video_save_file_path = os.path.join(file_path, "../../output/")
iterations = "iterations/"
best = "best/"
checkpoint_iterations = 100
best_model_iterations = 100
test_model_iterations = 25
best_loss = float("inf")
heigth, width = heigth_train, width_train
channels = 3
assert timesteps>=time_frames_to_consider and timesteps>=time_frames_to_predict, "time steps must be greater !"
#==================== COPIED CODE ===============================================
#
# TENSORBOARD VISUALIZATION FOR SHARPNESS AND (Peak Signal to Noise Ratio){PSNR}
#=================================================================================
def log10(t):
"""
Calculates the base-10 log of each element in t.
@param t: The tensor from which to calculate the base-10 log.
@return: A tensor with the base-10 log of each element in t.
"""
numerator = tf.log(t)
denominator = tf.log(tf.constant(10, dtype=numerator.dtype))
return numerator / denominator
def psnr_error(gen_frames, gt_frames):
"""
Computes the Peak Signal to Noise Ratio error between the generated images and the ground
truth images.
@param gen_frames: A tensor of shape [batch_size, height, width, 3]. The frames generated by the
generator model.
@param gt_frames: A tensor of shape [batch_size, height, width, 3]. The ground-truth frames for
each frame in gen_frames.
@return: A scalar tensor. The mean Peak Signal to Noise Ratio error over each frame in the
batch.
"""
shape = tf.shape(gen_frames)
num_pixels = tf.to_float(shape[1] * shape[2] * shape[3])
square_diff = tf.square(gt_frames - gen_frames)
batch_errors = 10 * log10(1 / ((1 / num_pixels) * tf.reduce_sum(square_diff, [1, 2, 3])))
return tf.reduce_mean(batch_errors)
def sharp_diff_error(gen_frames, gt_frames):
"""
Computes the Sharpness Difference error between the generated images and the ground truth
images.
@param gen_frames: A tensor of shape [batch_size, height, width, 3]. The frames generated by the
generator model.
@param gt_frames: A tensor of shape [batch_size, height, width, 3]. The ground-truth frames for
each frame in gen_frames.
@return: A scalar tensor. The Sharpness Difference error over each frame in the batch.
"""
shape = tf.shape(gen_frames)
num_pixels = tf.to_float(shape[1] * shape[2] * shape[3])
# gradient difference
# create filters [-1, 1] and [[1],[-1]] for diffing to the left and down respectively.
# TODO: Could this be simplified with one filter [[-1, 2], [0, -1]]?
pos = tf.constant(np.identity(3), dtype=tf.float32)
neg = -1 * pos
filter_x = tf.expand_dims(tf.stack([neg, pos]), 0) # [-1, 1]
filter_y = tf.stack([tf.expand_dims(pos, 0), tf.expand_dims(neg, 0)]) # [[1],[-1]]
strides = [1, 1, 1, 1] # stride of (1, 1)
padding = 'SAME'
gen_dx = tf.abs(tf.nn.conv2d(gen_frames, filter_x, strides, padding=padding))
gen_dy = tf.abs(tf.nn.conv2d(gen_frames, filter_y, strides, padding=padding))
gt_dx = tf.abs(tf.nn.conv2d(gt_frames, filter_x, strides, padding=padding))
gt_dy = tf.abs(tf.nn.conv2d(gt_frames, filter_y, strides, padding=padding))
gen_grad_sum = gen_dx + gen_dy
gt_grad_sum = gt_dx + gt_dy
grad_diff = tf.abs(gt_grad_sum - gen_grad_sum)
batch_errors = 10 * log10(1 / ((1 / num_pixels) * tf.reduce_sum(grad_diff, [1, 2, 3])))
return tf.reduce_mean(batch_errors)
## =================== COPIED CODE ENDS ======================
def l2_loss(generated_frames, expected_frames):
losses = []
for each_scale_gen_frames, each_scale_exp_frames in zip(generated_frames, expected_frames):
losses.append(tf.nn.l2_loss(tf.subtract(each_scale_gen_frames, each_scale_exp_frames)))
loss = tf.reduce_mean(tf.stack(losses))
return loss
def gdl_loss(generated_frames, expected_frames, alpha=2):
"""
difference with side pixel and below pixel
"""
scale_losses = []
for i in xrange(len(generated_frames)):
# create filters [-1, 1] and [[1],[-1]] for diffing to the left and down respectively.
pos = tf.constant(np.identity(3), dtype=tf.float32)
neg = -1 * pos
filter_x = tf.expand_dims(tf.stack([neg, pos]), 0) # [-1, 1]
filter_y = tf.stack([tf.expand_dims(pos, 0), tf.expand_dims(neg, 0)]) # [[1],[-1]]
strides = [1, 1, 1, 1] # stride of (1, 1)
padding = 'SAME'
gen_dx = tf.abs(tf.nn.conv2d(generated_frames[i], filter_x, strides, padding=padding))
gen_dy = tf.abs(tf.nn.conv2d(generated_frames[i], filter_y, strides, padding=padding))
gt_dx = tf.abs(tf.nn.conv2d(expected_frames[i], filter_x, strides, padding=padding))
gt_dy = tf.abs(tf.nn.conv2d(expected_frames[i], filter_y, strides, padding=padding))
grad_diff_x = tf.abs(gt_dx - gen_dx)
grad_diff_y = tf.abs(gt_dy - gen_dy)
scale_losses.append(tf.reduce_sum((grad_diff_x ** alpha + grad_diff_y ** alpha)))
# condense into one tensor and avg
return tf.reduce_mean(tf.stack(scale_losses))
def total_loss(generated_frames, expected_frames, lambda_gdl=1.0, lambda_l2=1.0):
total_loss_cal = (lambda_gdl * gdl_loss(generated_frames, expected_frames) +
lambda_l2 * l2_loss(generated_frames, expected_frames))
return total_loss_cal
#===================================================================
# Model
#===================================================================
class SkipAutoEncoder:
def __init__(self, heigth_train, width_train, heigth_test, width_test):
self.heigth_train = heigth_train
self.width_train = width_train
self.heigth_test = heigth_test
self.width_test = width_test
self.feature_maps = [32,64,128,256,512]
self.kernel_size = [3,3,3,3,3]
self.stride_size = [1,2,2,2,2]
assert len(self.kernel_size) == len(self.feature_maps)==len(self.stride_size), "lens must be equal"
# Placeholders for inputs and outputs ... !
self.input_train = tf.placeholder(dtype=tf.float32, shape=[None, self.heigth_train, self.width_train, time_frames_to_consider * image_channels])
self.output_train = tf.placeholder(dtype=tf.float32, shape=[None, self.heigth_train, self.width_train, image_channels])
self.input_test = tf.placeholder(dtype=tf.float32, shape=[None, self.heigth_test, self.width_test, time_frames_to_consider * image_channels])
self.output_test = tf.placeholder(dtype=tf.float32, shape=[None, self.heigth_test, self.width_test, image_channels])
self.model_output = self.create_graph(self.input_train, self.output_train,reuse=None)
# reuse graph at time of test !
self.model_output_test = self.create_graph(self.input_test, self.output_test,reuse=True)
self.loss()
self.tf_summary()
def conv_layer(self, conv_input,reuse):
layers_for_skip = []
net = conv_input
with tf.variable_scope('conv_autoencoder',reuse=reuse):
for i, (each_feat_map, each_kernel_size, each_stride) in enumerate(zip(self.feature_maps, self.kernel_size, self.stride_size)):
net = slim.conv2d(net, each_feat_map, [each_kernel_size, each_kernel_size], stride=each_stride,
scope='conv_'+str(i), weights_initializer=trunc_normal(0.01),
weights_regularizer=regularizers.l2_regularizer(l2_val))
layers_for_skip.append(net)
return net, layers_for_skip
def deconv_layer(self, deconv_input, layers_to_skip, reuse):
feature_maps = self.feature_maps[:-1][::-1] + [image_channels,]
kernel_size = self.kernel_size[:-1][::-1] + [3,]
stride_size = self.stride_size[1:][::-1] + [1,]
assert len(kernel_size) == len(feature_maps)==len(stride_size), "lens must be equal"
layers_to_skip_d = layers_to_skip[:-1][::-1]
net = deconv_input
with tf.variable_scope('deconv_autoencoder',reuse=reuse):
for i, (each_feat_map, each_kernel_size, each_stride) in enumerate(zip(feature_maps, kernel_size, stride_size)):
activation = tf.nn.relu
if i==(len(stride_size)-1):
# last layer !
activation = tf.nn.tanh
if i>0:
# not first layer !
net = tf.concat([net,layers_to_skip_d[i-1]],axis=3)
net = slim.conv2d_transpose(net, each_feat_map, [each_kernel_size, each_kernel_size], stride=each_stride,
activation_fn = activation,
scope='deconv_'+str(i), weights_initializer=trunc_normal(0.01),
weights_regularizer=regularizers.l2_regularizer(l2_val))
return net
def create_graph(self, input_data, ground_truths, reuse):
encoded_op, layers_to_skip = self.conv_layer(input_data, reuse=reuse)
return self.deconv_layer(encoded_op, layers_to_skip,reuse=reuse)
def loss(self):
# gdl and l2 loss !
self.combined_loss = total_loss([self.model_output], [self.output_train])
self.optimizer = tf.train.AdamOptimizer(adam_learning_rate)
global_step = tf.Variable(0,name="global_step_var",trainable=False)
self.step = self.optimizer.minimize(self.combined_loss, global_step=global_step)
def tf_summary(self):
train_loss = tf.summary.scalar("train_loss", self.combined_loss)
val_loss = tf.summary.scalar("val_loss", self.combined_loss)
with tf.variable_scope('image_measures'):
psnr_error_train = psnr_error(self.model_output, self.output_train)
psnr_error_train_s = tf.summary.scalar("train_psnr",psnr_error_train)
psnr_error_val_s = tf.summary.scalar("val_psnr",psnr_error_train)
sharpdiff_error_train = sharp_diff_error(self.model_output,self.output_train)
sharpdiff_error_train_s = tf.summary.scalar("train_shardiff",sharpdiff_error_train)
sharpdiff_error_val_s = tf.summary.scalar("val_shardiff",sharpdiff_error_train)
images_to_show_train = []
images_to_show_val = []
images_to_show_train.append(tf.summary.image('train_output', self.model_output,
number_of_images_to_show))
images_to_show_train.append(tf.summary.image('train_ground_truth', self.output_train,
number_of_images_to_show))
images_to_show_val.append(tf.summary.image('val_output', self.model_output,
number_of_images_to_show))
images_to_show_val.append(tf.summary.image('val_ground_truth', self.output_train,
number_of_images_to_show))
psnr_error_test = psnr_error(self.model_output_test, self.output_test)
psnr_error_test_s = tf.summary.scalar("test_psnr",psnr_error_test)
sharpdiff_error_test = sharp_diff_error(self.model_output_test,self.output_test)
sharpdiff_error_test_s = tf.summary.scalar("test_shardiff",sharpdiff_error_test)
images_to_show_test = []
images_to_show_test.append(tf.summary.image('test_output', self.model_output_test,
number_of_images_to_show))
images_to_show_test.append(tf.summary.image('test_ground', self.output_test,
number_of_images_to_show))
self.train_summary_merged = tf.summary.merge([train_loss, psnr_error_train_s, sharpdiff_error_train_s]+images_to_show_train)
self.test_summary_merged = tf.summary.merge([psnr_error_test_s, sharpdiff_error_test_s]+images_to_show_test)
self.val_summary_merged = tf.summary.merge([val_loss, psnr_error_val_s, sharpdiff_error_val_s]+images_to_show_val)
# ======================== MODEL ENDS ========================
def log_directory_creation(sess):
if tf.gfile.Exists(log_dir_file_path):
tf.gfile.DeleteRecursively(log_dir_file_path)
tf.gfile.MakeDirs(log_dir_file_path)
# model save directory
if os.path.exists(model_save_file_path):
x_folder = iterations
print ("loading model from ",x_folder)
restore_model_session(sess, x_folder + "skip_autoencoder_model")
else:
os.makedirs(model_save_file_path + iterations)
os.makedirs(model_save_file_path + best)
# output dir creation
if not os.path.exists(output_video_save_file_path):
os.makedirs(output_video_save_file_path)
def save_model_session(sess, file_name):
saver = tf.train.Saver()
save_path = saver.save(sess, model_save_file_path + file_name)
def restore_model_session(sess, file_name):
saver = tf.train.Saver() # tf.train.import_meta_graph(model_save_file_path + file_name + ".meta")
saver.restore(sess, model_save_file_path + file_name)
print ("graph loaded!")
def is_correct_batch_shape(X_batch, y_batch, info="train",heigth=heigth, width=width):
# info can be {"train", "val"}
if (X_batch is None or y_batch is None or
X_batch.shape[1:] != (timesteps, heigth, width, channels) or
y_batch.shape[1:] != (timesteps, heigth, width, channels)):
print ("Warning: skipping this " + info + " batch because of shape")
return False
return True
def images_to_channels(X_batch):
"""
This utility convert (Batch Size, TimeSteps, H, W, C) => (Batch Size, H, W, C, TimeSteps) => (Batch Size, H, W, C * TimeSteps)
Refer Input of Mutli Scale Architecture !
"""
input_data = X_batch.transpose(0,2,3,4,1)
input_data = input_data.reshape(list(input_data.shape[:-2])+[-1])
return input_data
def remove_oldest_image_add_new_image(X_batch,y_batch):
"""
While frame predications each time step remove oldest image and newest image
"""
removed_older_image = X_batch[:,:,:,channels:]
new_batch = np.append(removed_older_image, y_batch, axis=3)
return new_batch
def validation(sess, model, data, val_writer, val_step):
loss = []
for X_batch, y_batch, _ in data.val_next_batch():
if not is_correct_batch_shape(X_batch, y_batch, "val"):
print ("validation batch is skipping ... ")
continue
X_input = X_batch[:,:time_frames_to_consider]
X_input = images_to_channels(X_input)
# ground truth ... for loss calculation ... !
output_train = X_batch[:,time_frames_to_consider,:,:,:]
Y_output = np.zeros((len(X_input),time_frames_to_predict,heigth,width,channels))
for each_time_step in range(time_frames_to_predict):
# gen predict on real data => predicated
y_current_step, combined_loss, val_summary_merged = sess.run([model.model_output, model.combined_loss,model.val_summary_merged], feed_dict={
model.input_train : X_input,
model.output_train : output_train})
loss.append(combined_loss)
val_writer.add_summary(val_summary_merged, val_step)
val_step += 1
Y_output[:,each_time_step,:,:,:] = y_current_step
X_input = remove_oldest_image_add_new_image(X_input,y_current_step)
output_train = X_batch[:,time_frames_to_predict+each_time_step+1,:,:,:]
if len(loss)==0:
return (val_step, float("inf"))
return (val_step, sum(loss)/float(len(loss)))
def test(sess, model, data, test_writer, test_step, is_store_output=False):
for X_batch, y_batch, file_names in data.get_custom_test_data():
if not is_correct_batch_shape(X_batch, y_batch, "test",heigth=custom_test_size[0], width=custom_test_size[1]):
print ("test batch is skipping ... ")
continue
X_input = X_batch[:,:time_frames_to_consider]
X_input = images_to_channels(X_input)
# ground truth ... for loss calculation ... !
output_train = X_batch[:,time_frames_to_consider,:,:,:]
# store output ...
Y_output = np.zeros((len(X_batch),time_frames_to_predict,custom_test_size[0],custom_test_size[1],channels))
for each_time_step in range(time_frames_to_predict):
# gen predict on real data => predicated
y_current_step, test_summary_merged = sess.run([model.model_output_test, model.test_summary_merged], feed_dict={
model.input_test : X_input,
model.output_test : output_train})
test_writer.add_summary(test_summary_merged, test_step)
test_step += 1
Y_output[:,each_time_step,:,:,:] = y_current_step
X_input = remove_oldest_image_add_new_image(X_input,y_current_step)
output_train = X_batch[:,time_frames_to_predict+each_time_step+1,:,:,:]
if is_store_output:
# save with filnames
expected_frames = X_batch[:,time_frames_to_consider:time_frames_to_consider+time_frames_to_predict,:,:,:]
# image post processing is happening inside of store ...
# store
store_file_names_gen = data.frame_ext.generate_output_video(Y_output, file_names, ext_add_to_file_name="_generated_large")
store_file_names_exp = data.frame_ext.generate_output_video(expected_frames, file_names, ext_add_to_file_name="_expected_large")
speed = 1
data.frame_ext.generate_gif_videos(store_file_names_gen,speed=speed)
data.frame_ext.generate_gif_videos(store_file_names_exp,speed=speed)
return test_step
def test_wrapper():
with tf.Session() as sess:
model = SkipAutoEncoder(heigth_train, width_train, heigth_test, width_test)
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
sess.run(init)
# clear logs !
log_directory_creation(sess)
# summary !
test_writer = tf.summary.FileWriter(log_dir_file_path + "test", sess.graph)
global_step = 0
test_count_iter = 0
val_loss_seen = float("inf")
# data read iterator
data = datasets(batch_size=batch_size, height=heigth, width=width,
custom_test_size=custom_test_size,time_frame=timesteps, interval=interval)
test_count_iter = test(sess, model, data, test_writer, test_count_iter, is_store_output=True)
def train():
global best_loss
with tf.Session() as sess:
model = SkipAutoEncoder(heigth_train, width_train, heigth_test, width_test)
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
sess.run(init)
# clear logs !
log_directory_creation(sess)
# summary !
train_writer = tf.summary.FileWriter(log_dir_file_path + "train", sess.graph)
test_writer = tf.summary.FileWriter(log_dir_file_path + "test", sess.graph)
val_writer = tf.summary.FileWriter(log_dir_file_path + "val", sess.graph)
global_step = 0
train_count_iter = 0
val_count_iter = 0
test_count_iter = 0
val_loss_seen = float("inf")
while True:
try:
# data read iterator
data = datasets(batch_size=batch_size, height=heigth, width=width,
custom_test_size=custom_test_size,time_frame=timesteps, interval=interval)
for X_batch, y_batch, _ in data.train_next_batch():
# print ("X_batch", X_batch.shape, "y_batch", y_batch.shape)
if not is_correct_batch_shape(X_batch, y_batch, "train"):
# global step not increased !
continue
for each_timesteps in range(time_frames_to_consider, timesteps-time_frames_to_consider):
input_train = X_batch[:, each_timesteps-time_frames_to_consider:each_timesteps, :,:,:]
input_train = images_to_channels(input_train)
output_train = X_batch[:,each_timesteps,:,:,:]
_, train_summary = sess.run([model.step ,model.train_summary_merged], feed_dict={
model.input_train : input_train,
model.output_train : output_train})
train_writer.add_summary(train_summary, train_count_iter)
train_count_iter += 1
if global_step % checkpoint_iterations == 0:
save_model_session(sess, iterations + "skip_autoencoder_model")
if global_step % best_model_iterations == 0:
val_count_iter, curr_loss = validation(sess, model, data, val_writer, val_count_iter)
if curr_loss < val_loss_seen:
val_loss_seen = curr_loss
save_model_session(sess, best + "skip_autoencoder_model")
if global_step % test_model_iterations == 0:
test_count_iter = test(sess, model, data, test_writer, test_count_iter)
print ("Iteration ", global_step, " best_loss ", val_loss_seen)
global_step += 1
except:
print ("error occur ... skipping ... !")
train_writer.close()
test_writer.close()
val_writer.close()
def main():
train()
if __name__ == '__main__':
main()
| kabrapratik28/DeepVideos | model/model_skip_autoencoder.py | model_skip_autoencoder.py | py | 23,767 | python | en | code | 4 | github-code | 13 |
73729121296 | import cv2
import numpy as np
import os
from tqdm import tqdm
import matplotlib.pyplot as plt
CLASS_NAMES = ("None",
"Road",
"Sign",
"Car",
"Pedestrian")
colorB = [0, 0, 255, 255, 69]
colorG = [0, 0, 255, 0, 47]
colorR = [0, 255, 0, 0, 142]
N_CLASSES = len(colorB)
CLASS_COLOR = list()
for i in range(0, N_CLASSES):
CLASS_COLOR.append([colorR[i], colorG[i], colorB[i]])
COLORS = np.array(CLASS_COLOR, dtype="float32")
def give_color_to_seg_img(seg, n_classes):
if len(seg.shape)==3:
seg = seg[:,:,0]
seg_img = np.zeros((seg.shape[0],seg.shape[1],3)).astype('float')
#colors = sns.color_palette("hls", n_classes) #DB
colors = COLORS #DB
for c in range(n_classes):
segc = (seg == c)
seg_img[:,:,0] += (segc*( colors[c][0]/255.0 ))
seg_img[:,:,1] += (segc*( colors[c][1]/255.0 ))
seg_img[:,:,2] += (segc*( colors[c][2]/255.0 ))
return seg_img
def NormalizeImageArr(path, H, W):
NORM_FACTOR = 255
img = cv2.imread(path, 1)
img = cv2.resize(img, (H, W), interpolation=cv2.INTER_NEAREST)
img = img.astype(np.float32)
img = img/NORM_FACTOR
return img
def LoadSegmentationArr(path , nClasses, width ,height, train_is=True):
seg_labels = np.zeros((height, width, nClasses))
if train_is:
img = cv2.imread(path, 1)
img = cv2.resize(img, (width, height), interpolation=cv2.INTER_NEAREST)
img = img[:, : , 0]
for c in range(nClasses):
seg_labels[: , : , c ] = (img == c ).astype(int)
return seg_labels
else:
img = cv2.imread(path, 0)
img = cv2.resize(img, (width, height), interpolation=cv2.INTER_NEAREST)
return img
def IoU(Yi,y_predi):
## mean Intersection over Union
## Mean IoU = TP/(FN + TP + FP)
IoUs = []
Nclass = int(np.max(Yi)) + 1
print(Nclass)
for c in range(Nclass):
TP = np.sum( (Yi == c)&(y_predi == c))
FP = np.sum( (Yi != c)&(y_predi == c))
FN = np.sum( (Yi == c)&(y_predi != c))
IoU = TP/float(TP + FP + FN)
#print("class {:02.0f}: #TP={:7.0f}, #FP={:7.0f}, #FN={:7.0f}, IoU={:4.3f}".format(c,TP,FP,FN,IoU))
print("class (%2d) %12.12s: #TP=%7.0f, #FP=%7.0f, #FN=%7.0f, IoU=%4.3f" % (c, CLASS_NAMES[c],TP,FP,FN,IoU))
IoUs.append(IoU)
mIoU = np.mean(IoUs)
print("_________________")
print("Mean IoU: {:4.3f}".format(mIoU))
return
# Visualize the model performance
def visualize_model_performance(X_test, y_pred1_i, y_test1_i, n_classes):
for k in range(len(X_test)):
i = k
img_is = (X_test[i] + 1)*(255.0/2)
seg = y_pred1_i[i]
segtest = y_test1_i[i]
fig = plt.figure(figsize=(10,30))
ax = fig.add_subplot(1,3,1)
ax.imshow(img_is/255.0)
ax.set_title("original")
ax = fig.add_subplot(1,3,2)
ax.imshow(give_color_to_seg_img(seg,n_classes))
ax.set_title("predicted class")
ax = fig.add_subplot(1,3,3)
ax.imshow(give_color_to_seg_img(segtest,n_classes))
ax.set_title("true class")
plt.savefig("output/output_" + str(i) + ".png")
plt.show()
| madara-tribe/SetupSystems | ML/segmentation/IOUScore/utils.py | utils.py | py | 3,251 | python | en | code | 0 | github-code | 13 |
7290416706 | #!/usr/bin/env python3
import xml.etree.cElementTree as ET
import re
from matplotlib import pyplot as plt
import numpy as np
from svg.path import parse_path
from functools import lru_cache
it = ET.iterparse('RS1096.svg')
for _, el in it:
if '}' in el.tag:
el.tag = el.tag.split('}', 1)[1] # strip all namespaces
root = it.root
parent_map = dict((c, p) for p in root.getiterator() for c in p)
def ancestor_has_transform(element):
current = element
while current in parent_map.keys():
if 'transform' in current.attrib.keys():
return True
current = parent_map[current]
return False
def get_matrix(element):
"Recursive method, returns the net transformation matrix of a SVG element"
matrix = np.matrix([ [1,0,0],[0,1,0],[0,0,1] ])
if 'transform' in element.attrib.keys():
matrix_text = element.attrib['transform']
matrix_values = [float(val) for val in re.findall(r'[-+]?[0-9]*\.?[0-9]+', matrix_text)]
if matrix_text.startswith('matrix'):
matrix = np.matrix([ [matrix_values[0], matrix_values[2], matrix_values[4]] , \
[matrix_values[1], matrix_values[3], matrix_values[5]] , \
[0, 0, 1] ])
if matrix_text.startswith('translate'):
matrix = np.matrix([ [1, 0, matrix_values[0]] , \
[0, 1, matrix_values[1]] , \
[0, 0, 1] ])
if matrix_text.startswith('scale'):
matrix = np.matrix([ [matrix_values[0], 0, 0], \
[0, matrix_values[1], 0], \
[0, 0, 1] ])
if ancestor_has_transform(element):
matrix = get_matrix(parent_map[element]) * matrix
return matrix
class Patch(object):
def __init__(self, element):
self.element = element
self.id = element.attrib['id']
self.matched = False
self.color = '000000'
if 'style' in element.attrib:
style = element.attrib[ 'style' ]
match = re.search( r'stroke\s*:\s*#(?P<color>\w+)\s*;', style )
if match:
self.color = match.group( 'color' )
self.z = -4.5 if self.color == '0000ff' else 0.0
#print ('Creating patch for', self.id)
def get_centroid(self):
x,y = 0,0
corners = self.get_corners()
for pos in corners:
x, y = x + pos[0], y + pos[1]
return (x/len(corners), y/len(corners))
def distance_to(self, position):
x,y = self.get_centroid()
return np.sqrt( (x-position[0])**2 + (y-position[1])**2 )
@lru_cache(maxsize=None)
def get_corners(self):
transform = get_matrix( parent_map[self.element] )
corners = []
for p in parse_path( self.element.attrib['d']):
coords = np.matrix([ [1,0,p.start.real],[0,1,p.start.imag],[0,0,1] ])
res = transform*coords
if len(corners):
dx = corners[-1][0] - res[0,2]
dy = corners[-1][1] - res[1,2]
if dx*dx + dy*dy < 0.01:
continue
corners.append( (res[0,2], res[1,2]) )
return corners
def get_boundaries( self, minx, maxx, miny, maxy ):
corners = self.get_corners()
prevx, prevy = corners[0]
inside = prevx > minx and prevx < maxx and \
prevy > miny and prevy < maxy
for pt in corners[1:]:
x, y = pt
pt_inside = x > minx and x < maxx and y > miny and y < maxy
class Electrode(object):
all_patches = [Patch(p) for p in root.iterfind('.//g/path')]
def __init__(self, name):
self.name = name
self.patches = [self._get_patch(i)
for i in range(len(self.get_label_positions()))]
@lru_cache(maxsize=None)
def get_label_positions(self):
label_positions = []
for element in root.iterfind('.//tspan'):
if not element.text == self.name: continue
x = [float(num) for num in element.attrib['x'].split()] #values are for each letter
y = [float(num) for num in element.attrib['y'].split()]
x_avg, y_avg = sum(x)/len(x) , sum(y)/len(y)
final = get_matrix(parent_map[element]) * np.matrix([ [1,0,x_avg],[0,1,y_avg],[0,0,1] ])
label_positions.append( (final[0,2], final[1,2]) )
return label_positions
def _get_patch(self, index):
"Return patches closest to each of the electrodes"
delta = float('inf')
nearest = None
for patch in Electrode.all_patches:
if patch.matched: continue
d = patch.distance_to( self.get_label_positions()[index] )
if d < delta:
delta = d
nearest = patch
nearest.matched = True
return nearest
all_verts = {}
def __repr__( self ):
npoints = [ len( p.get_corners() ) for p in self.patches ]
s = '{} {}\n'.format( self.name, len(npoints) )
patch_strings = []
for patch, patch_npoints in zip( self.patches, npoints ):
points = patch.get_corners()
#for p in points[:-1]:
#for k, v in self.all_verts.items():
#if abs( p[0] - v[0] ) < 0.05 and abs( p[1] - v[1] ) < 0.05:
#print( "OVERLAP: {}-{}".format( k, self.name ) )
#self.all_verts[ "{}:{}:{}".format( self.name, p[0], p[1] ) ] = p
points.reverse()
patch_strings.append( '{} {}'.format( patch_npoints,
' '.join( '{} {} {}'.format(
p[0]*100./9, p[1]*100./9, patch.z )
for p in points ) ) )
return s + '\n'.join( patch_strings )
def main():
electrode_names = set([elem.text for elem in root.iterfind('.//tspan')])
electrodes = [Electrode(n) for n in electrode_names]
xpos, ypos = [], []
for electrode in electrodes:
for positions in electrode.get_label_positions():
xpos.append( positions[0] )
ypos.append( positions[1] )
#print ('Name : ', electrode.name)
#print ('Labels at : ', electrode.get_label_positions())
#print ('Patch names : ', [p.id for p in electrode.patches])
#print ('Patch colors : ', [p.color for p in electrode.patches])
print ( electrode )
plt.plot(xpos,ypos, 'b.')
## Plot the labels
#for x,y,l in zip(xpos,ypos,labels):
# plt.text(x,y,l)
## Plot the shapes
for patch in Electrode.all_patches:
#print( patch.get_corners() )
cent = patch.get_centroid()
plt.plot( cent[0], cent[1], 'rx' )
#plt.text(cent[0], cent[1], patch.id)
corners = patch.get_corners()
x,y = [l[0] for l in corners], [l[1] for l in corners]
plt.plot( x, y, 'g-' )
plt.show()
if __name__ == "__main__":
main()
| wenlintan/musiqcWashington | PyPlay/trap_fe/geometry/hoa-geometry.py | hoa-geometry.py | py | 7,028 | python | en | code | 0 | github-code | 13 |
2355560597 | # 344 - Reverse String
# https://leetcode.com/problems/reverse-string/
class Solution:
# list the methods to be run against the test cases
implementations = ["reverse_string"]
def reverse_string(self, s: list[str]) -> list[str]:
"""
Use two pointers, left and right, and swap opposing elements of s,
moving both pointers inwards on each iteration
Time: O(n) (n/2 swaps are performed)
Space: O(1) (constant space)
"""
# set up pointers at the beginning and end of s
left = 0
right = len(s) - 1
# swap left and right, move inwards, and repeat until left/right cross
while left < right:
s[left], s[right] = s[right], s[left]
left += 1
right -= 1
return s
# =============================== DRIVER CODE ================================
if __name__ == "__main__":
from class_print_tests import PrintTests as PT
# enter test cases: ['description', [inputs], expected_result]
test_cases = [
["Example 1", [["h", "e", "l", "l", "o"]], ["o", "l", "l", "e", "h"]],
[
"Example 2",
[["H", "a", "n", "n", "a", "h"]],
["h", "a", "n", "n", "a", "H"],
],
["Single Element", [["a"]], ["a"]],
["Two Elements", [["a", "b"]], ["b", "a"]],
]
# run test cases and print results using PrintTests class
pt = PT(Solution(), test_cases)
pt.run()
| andrewt110216/algorithms-and-data-structures | leetcode/p0344_solution.py | p0344_solution.py | py | 1,481 | python | en | code | 0 | github-code | 13 |
73662999056 | # -*- encoding: utf-8 -*-
import os
import warnings
__all__ = [
'check_pid',
'warn_if_not_float'
]
def warn_if_not_float(X, estimator='This algorithm'):
"""Warning utility function to check that data type is floating point.
Returns True if a warning was raised (i.e. the input is not float) and
False otherwise, for easier input validation.
"""
if not isinstance(estimator, str):
estimator = estimator.__class__.__name__
if X.dtype.kind != 'f':
warnings.warn("%s assumes floating point values as input, "
"got %s" % (estimator, X.dtype))
return True
return False
def check_pid(pid):
"""Check For the existence of a unix pid."""
try:
os.kill(pid, 0)
except OSError:
return False
else:
return True
def check_true(p):
if p in ("True", "true", 1, True):
return True
return False
def check_false(p):
if p in ("False", "false", 0, False):
return True
return False
def check_none(p):
if p in ("None", "none", None):
return True
return False
def check_for_bool(p):
if check_false(p):
return False
elif check_true(p):
return True
else:
raise ValueError("%s is not a bool" % str(p))
def get_max_index(num_list, topk=3):
num_dict = {}
for i in range(len(num_list)):
num_dict[i] = num_list[i]
res_list = sorted(num_dict.items(), key=lambda e: e[1])
max_num_index = [x[0] for x in res_list[::-1][:topk]]
return max_num_index
def get_most(num_list):
temp = 0
for i in num_list:
if num_list.count(i) > temp:
max_num = i
temp = num_list.count(i)
return max_num | dingdian110/alpha-ml | alphaml/utils/common.py | common.py | py | 1,738 | python | en | code | 1 | github-code | 13 |
14508410119 | import unittest
import os
from pathlib import Path
def sys_path_init():
import sys
# For tests/main.py Path
path = Path(os.path.realpath(__file__)).parent.parent.parent.absolute()
sys.path.append(str(path))
# For test.sh Path
path = Path(os.path.realpath(__file__)).parent.parent.absolute()
sys.path.append(str(path))
def db_init():
print('============ DB Initialize Start ============')
from sprint.board import model as board_model
from sprint.category import model as category_model
from sprint.comment import model as comment_model
from sprint.team import model as team_model
from sprint.user import model as user_model
from database import engine
models = [
board_model,
category_model,
comment_model,
user_model,
team_model,
]
for model in models:
model.Base.metadata.create_all(engine)
print('============ DB Initialize Complete ============')
print()
def print_errors(elements):
for element in elements:
print(element[0]) # Error class.function_name
print(element[1]) # Error Trace Back
print()
def print_progress_line(first, second, third):
print(f'============ {first:5s} {second:7s} {third:8s} ============')
def run_test_by_folder_name(folder_name: str, test_name: str, module_strings):
print_progress_line(test_name, 'Test', 'Start')
testSuite = unittest.TestSuite()
module_strings = [folder_name + '.' + model_str for model_str in module_strings]
[__import__(model_str) for model_str in module_strings]
suites = [unittest.TestLoader().loadTestsFromName(model_str) for model_str in module_strings]
[testSuite.addTest(suite) for suite in suites]
result = unittest.TestResult()
testSuite.run(result)
print_progress_line(test_name, 'Result', 'Count')
print(result)
if len(result.errors) != 0:
print_progress_line(test_name, 'Error', 'List')
print_errors(result.errors)
if len(result.failures) != 0:
print_progress_line(test_name, 'Failure', 'List')
print_errors(result.failures)
print()
def service_test():
# Crud Test module 이름들을 생성 순서에 맞게 작성합니다
module_strings = ['user', 'team', 'board', 'category', 'comment'
]
run_test_by_folder_name('service', 'CRUD', module_strings)
def api_test():
pass
# API Test module 이름들을 생성 순서에 맞게 작성합니다
# module_strings = ['auth',
# ]
# run_test_by_folder_name('api', 'API', module_strings)
def init_dummy_data():
sys_path_init()
db_init()
service_test()
api_test()
if __name__ == "__main__":
init_dummy_data()
| soo4767/sprint-backend-v2 | tests/main.py | main.py | py | 2,781 | python | en | code | 0 | github-code | 13 |
31509237214 | import unittest
from unittest import TestCase
from enum import Enum
from functools import wraps
from typing import (
Callable,
Optional,
List,
Any
)
class TestNumber(TestCase):
def setUp(self) -> None:
self.incorrect_values: List[Any] =\
['3', 1.2, -2, 'asf', []]
self.correct_values: List[int] =\
[1, 2, 3, 4, 5, 6, 7, 8, 9]
# def test_incorrect_values(self):
# for value in self.incorrect_values:
# self.assertRaises(ValueError, Number, value)
def test_int_sqrt(self):
for value in self.correct_values:
number = Number(value)
print(number.get_int_sqrt())
class ErrorMessages(str, Enum):
INCORRECT_TYPE = 'Incorrect type of value'
NEGATIVE_VALUE = 'Number must be positive or zero'
def value_exception_handler(method: Callable):
@wraps(wrapped=method)
def wrapper(self: 'Number', value: Any):
try:
return method(self, value)
except:
print('Incorrect value')
return wrapper
class Number:
def __init__(self, value: Any) -> None:
# self._value = value
self._value: Optional[int] = None
self.set_value(value)
def get_value(self) -> int:
return self._value
@value_exception_handler
def set_value(self, value: Any) -> None:
# Retrieving value must be a natural
if not isinstance(value, int):
raise ValueError(ErrorMessages.INCORRECT_TYPE.value)
if value < 0:
raise ValueError(ErrorMessages.NEGATIVE_VALUE.value)
self._value = value
def get_int_sqrt(self) -> int:
low: int = 1
high: int = self._value
mid: int = high
while low <= high:
mid = (low + high) // 2
checked = mid ** 2
if checked == self._value:
return mid
elif checked < self._value:
low = mid + 1
elif checked > self._value:
high = mid - 1
return high
class Solution:
def mySqrt(self, x: int) -> int:
number = Number(x)
return number.get_int_sqrt()
if __name__ == '__main__':
unittest.main()
| madjar-code/LeetCode-Solutions | Binary Search/Sqrt(x).py | Sqrt(x).py | py | 2,242 | python | en | code | 0 | github-code | 13 |
13939558511 | import sqlite3
from sqlite3 import Error
import PySimpleGUI as gui
gui.theme('DarkAmber') # color
# inside window
layout = [ [gui.Text('Tables')],
[gui.Text('Search'), gui.InputText()],
[gui.Text('Command'), gui.InputText()],
[gui.Button('Ok'), gui.Button('Cancel')] ]
# Create the Window
window = gui.Window('EFT info', layout)
# Event Loop to process "events" and get the "values" of the inputs
while True:
event, values = window.read()
if event == gui.WIN_CLOSED or event == 'Cancel': # if user closes window or clicks cancel
break
print('Displaying ', values[0])
window.close() | griimgir/Database | FinalProject/eftBB-Prototypes/Draftprototype2.py | Draftprototype2.py | py | 643 | python | en | code | 0 | github-code | 13 |
71473315537 | import numpy as np
from onnx.reference.op_run import OpRun
class CenterCropPad(OpRun):
def _run(self, input_data, shape, axes=None): # type: ignore
axes = axes or self.axes # type: ignore
input_rank = len(input_data.shape)
if axes is None:
axes = list(range(input_rank))
else:
axes = [axis if axis >= 0 else axis + input_rank for axis in axes]
pad_slices = [slice(0, s) for s in input_data.shape]
crop_slices = [slice(0, s) for s in input_data.shape]
new_shape = list(input_data.shape)
for a, sh in zip(axes, shape):
dim = input_data.shape[a]
if sh == a:
pass
elif sh < dim:
new_shape[a] = sh
d = dim - sh
if d % 2 == 0:
d //= 2
sl = slice(d, dim - d)
else:
d //= 2
sl = slice(d, dim - d - 1)
crop_slices[a] = sl
else: # sh > dim
new_shape[a] = sh
d = sh - dim
if d % 2 == 0:
d //= 2
sl = slice(d, sh - d)
else:
d //= 2
sl = slice(d, sh - d - 1)
pad_slices[a] = sl
res = np.zeros(tuple(new_shape), dtype=input_data.dtype)
cropped = input_data[tuple(crop_slices)]
res[tuple(pad_slices)] = cropped
return (res,)
| onnx/onnx | onnx/reference/ops/op_center_crop_pad.py | op_center_crop_pad.py | py | 1,525 | python | en | code | 15,924 | github-code | 13 |
25139376309 | """Test of the hypercube function that is used to to assign positions
to the channels.
"""
import math
from typing import Dict, Tuple
import pytest
import torch
from topography.core.distance import hypercube
expected_grids: Dict[Tuple[int, int], Dict[bool, torch.Tensor]] = {
(5, 1): {
True: torch.tensor([[0], [1], [2], [3], [4]]),
False: torch.tensor([[0], [0.25], [0.5], [0.75], [1.0]]),
},
(9, 2): {
True: torch.tensor(
[
[0, 0],
[1, 0],
[2, 0],
[0, 1],
[1, 1],
[2, 1],
[0, 2],
[1, 2],
[2, 2],
]
),
False: torch.tensor(
[
[0.0, 0.0],
[0.5, 0.0],
[1.0, 0.0],
[0.0, 0.5],
[0.5, 0.5],
[1.0, 0.5],
[0.0, 1.0],
[0.5, 1.0],
[1.0, 1.0],
]
),
},
(7, 2): {
True: torch.tensor(
[[0, 0], [1, 0], [2, 0], [0, 1], [1, 1], [2, 1], [0, 2]]
),
False: torch.tensor(
[
[0.0, 0.0],
[0.5, 0.0],
[1.0, 0.0],
[0.0, 0.5],
[0.5, 0.5],
[1.0, 0.5],
[0.0, 1.0],
]
),
},
}
@pytest.mark.parametrize(
"num_points,dimension,integer_positions",
[
(5, 1, True),
(5, 1, False),
(9, 2, True),
(9, 2, False),
(7, 2, True),
(7, 2, False),
],
)
def test_simple_grid(num_points, dimension, integer_positions):
grid = hypercube(num_points, dimension, integer_positions=integer_positions)
expected_grid = expected_grids[(num_points, dimension)][integer_positions]
assert torch.equal(grid, expected_grid)
@pytest.mark.parametrize("dimension", [1, 2, 3])
def test_hypercube(dimension):
num_axis = 10
num_points = num_axis**dimension
coords = hypercube(num_points, dimension)
assert coords.shape == (num_points, dimension)
other_num_points = num_axis**dimension - num_axis + 1
other_coords = hypercube(other_num_points, dimension)
assert torch.equal(coords[:other_num_points], other_coords)
@pytest.mark.parametrize("num_points", [2, 11, 20, 64, 256, 512, 1000, 111111])
def test_explicit_expression(num_points):
dimension = 2
num_axis = int(math.ceil(num_points ** (1 / dimension)))
ref = hypercube(num_points, dimension)
ref_integer = hypercube(num_points, dimension, integer_positions=True)
assert torch.allclose(ref, ref_integer / (num_axis - 1))
explicit = torch.zeros((num_points, dimension))
for idx in range(num_points):
explicit[idx][0] = idx % num_axis
explicit[idx][1] = idx // num_axis
assert torch.allclose(ref, explicit / (num_axis - 1))
| bootphon/topography | tests/test_hypercube.py | test_hypercube.py | py | 2,960 | python | en | code | 4 | github-code | 13 |
6538058478 | from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
duration = .1 # Duration in seconds
signal_frequency = 600
sample_frequency = 1000
# timesteps
t = np.arange(0, duration*sample_frequency)/sample_frequency
# the signal
x = np.sin(2*np.pi*signal_frequency*t)
# number of samples
n = x.size
# hanning window
h = 0.5 - 0.5 * np.cos(2 * np.pi * np.arange(n)/(n-1))
# h = 1 - np.abs(n/2 - np.arange(n))/(n/2)
# DFT
X = np.fft.fft(x)
X_db = 20 * np.log10(2 * np.abs(X)/n)
# DFT with hanning window
Xh = np.fft.fft(x *h)
Xh_db = 20 * np.log10(2 * np.abs(Xh/n))
# create the x axis of frequencies
f = np.arange(0, n) * sample_frequency/n
plt.subplot(2,1,1)
plt.plot(t, x, label='signal')
plt.plot(t, x * h, label='signal * hanning')
plt.legend()
plt.subplot(2,1,2)
plt.plot(f, X_db, label='fft')
plt.plot(f, Xh_db, label='fft w/ hanning')
plt.legend()
# plt.grid()
plt.show() | IzzyBrand/ledvis | testing/hanning_test.py | hanning_test.py | py | 907 | python | en | code | 40 | github-code | 13 |
26973434535 | import sys
def sumMaxRange(array):
totalIndex = len(array)
sum = max = array[0]
for index in range(1, len(array)):
if sum < 0:
sum = array[index]
else:
sum += array[index]
if sum > max:
max = sum
return max
def main(argv):
arrayExercise = [31, -41, 59, 26, -53, 58, 97, -93, -23, 84]
sum = sumMaxRange(arrayExercise)
print(sum)
if __name__ == "__main__":
main(sys.argv) | laerciovacca/BitsOfBytes | sources/python/exercise.py | exercise.py | py | 400 | python | en | code | 0 | github-code | 13 |
33558395733 |
import sparknlp
from pyspark.ml import PipelineModel
spark = sparknlp.start(m1=True)
import streamlit as st
@st.cache(allow_output_mutation=True)
def load_pipeline(name):
return PipelineModel.load(name)
@st.cache(allow_output_mutation=True)
def process_text(model_name, text):
pipeline = load_pipeline(model_name)
data = spark.createDataFrame([[text]]).toDF("tweet")
return pipeline.transform(data).first()["category"][0]["result"]
# Init model
process_text("model", "init")
text_input = st.text_input("Enter any tweet 👇")
if text_input:
st.write("This tweet is: ")
result = process_text('model', text_input)
if result == 'fake':
st.error("Fake")
else:
st.success("Real") | hannnnk1231/Covid-19-Fake-News-Detector | demo.py | demo.py | py | 734 | python | en | code | 1 | github-code | 13 |
41470194233 | import cv2
from cv2 import blur
#use of blur to remove noise and augment dataset for noisy images
img = cv2.imread('F:\Edge ai\images\\balloons_noisy.png')
blurIMG = cv2.blur(img,(5,5))
cv2.imshow("OG img",img)
cv2.imshow("Blurred img",blurIMG)
cv2.waitKey(0)
| kunal118/Edge-ai | Class 3/blur.py | blur.py | py | 267 | python | en | code | 0 | github-code | 13 |
4321498591 | from financepy.utils.global_types import OptionTypes
from financepy.models.sabr import SABR
from financepy.models.sabr import vol_function_sabr
import numpy as np
def test_SABR():
nu = 0.21
f = 0.043
k = 0.050
t = 2.0
alpha = 0.2
beta = 0.5
rho = -0.8
params = np.array([alpha, beta, rho, nu])
vol = vol_function_sabr(params, f, k, t)
assert round(vol, 4) == 0.8971
alpha = 0.3
beta = 1.0
rho = 0.0
params = np.array([alpha, beta, rho, nu])
vol = vol_function_sabr(params, f, k, t)
assert round(vol, 4) == 0.3028
alpha = 0.1
beta = 2.0
rho = 0.8
params = np.array([alpha, beta, rho, nu])
vol = vol_function_sabr(params, f, k, t)
assert round(vol, 4) == 0.0148
def test_SABR_Calibration():
alpha = 0.28
beta = 0.5
rho = -0.09
nu = 0.1
strikeVol = 0.1
f = 0.043
k = 0.050
r = 0.03
texp = 2.0
call_optionType = OptionTypes.EUROPEAN_CALL
put_optionType = OptionTypes.EUROPEAN_PUT
df = np.exp(-r * texp)
# Make SABR equivalent to lognormal (Black) model
# (i.e. alpha = 0, beta = 1, rho = 0, nu = 0, shift = 0)
modelSABR_01 = SABR(0.0, 1.0, 0.0, 0.0)
modelSABR_01.set_alpha_from_black_vol(strikeVol, f, k, texp)
impliedLognormalVol = modelSABR_01.black_vol(f, k, texp)
impliedATMLognormalVol = modelSABR_01.black_vol(k, k, texp)
impliedLognormalSmile = impliedLognormalVol - impliedATMLognormalVol
assert impliedLognormalSmile == 0.0, "In lognormal model, smile should be flat"
calibrationError = round(strikeVol - impliedLognormalVol, 6)
assert calibrationError == 0.0
# Volatility: pure SABR dynamics
modelSABR_02 = SABR(alpha, beta, rho, nu)
modelSABR_02.set_alpha_from_black_vol(strikeVol, f, k, texp)
impliedLognormalVol = modelSABR_02.black_vol(f, k, texp)
impliedATMLognormalVol = modelSABR_02.black_vol(k, k, texp)
impliedLognormalSmile = impliedLognormalVol - impliedATMLognormalVol
calibrationError = round(strikeVol - impliedLognormalVol, 6)
assert calibrationError == 0.0
# Valuation: pure SABR dynamics
valueCall = modelSABR_02.value(f, k, texp, df, call_optionType)
valuePut = modelSABR_02.value(f, k, texp, df, put_optionType)
assert round(valueCall - valuePut, 12) == round(df*(f - k), 12), \
"The method called 'value()' doesn't comply with Call-Put parity"
| domokane/FinancePy | tests/test_FinModelSABR.py | test_FinModelSABR.py | py | 2,419 | python | en | code | 1,701 | github-code | 13 |
19189801083 | #!/usr/bin/python3.7
# -*-coding:Utf-8 -*
# version 1.0-alpha
import datetime, time
import os, shutil, sys, glob
from os import path
varld4 = 0
varactu = 0
home = os.environ['HOME']
chemindst = 0
dstf = 0
def moove_question():
print("\nQue voulez-vous déplacer ? :\n")
print("1 - De(s) fichier(s).")
print("2 - De(s) dossier(s).")
print("3 - Revenir au menu.\n")
try:
choix2 = int(input('\nChoix :'))
if choix2 == 1:
moove_files()
elif choix2 == 2:
moove_dir()
elif choix2 == 3:
menu()
else:
moove_question()
except ValueError:
print("refaire")
moove_question()
'''choix2 = int(input('\n choix :'))
if choix2 == 1:
moove()
elif choix2 == 2:
moove_dir()
elif choix2 == 3:
menu()
else:
moove_question()'''
def verif_saisie(ld):
actuelle = input("\nEcrire le(s) dossier(s) concerné(s) [écrire plus de 3 caractères et écrire les majuscules]:").split(", ")
#print(actuelle)
actuelle = [x.strip() for x in actuelle]
'''correspondant à
actuelle2 = []
for x in actuelle:
actuelle2.append(x.strip())'''
ld4 = {}
for saisie in actuelle:
ld4.update({saisie: []})
for saisie in actuelle:
for value in ld.values():
#print(value)
for elem in value:
#elemtest = elem
#print(f'{saisie} {elem} {elemtest} {value}')
if saisie in elem:#split() retourne sous forme de liste dans liste in liste = not
print(f'{saisie} in {elem}')
ld4[f'{saisie}'].append(elem)
else:
print(f'{saisie} not in {elem}')
reponselen = []
for key, value in ld4.items():
#print(f'{key} {value}')
if len(value) == 0:
print(f'Nous navons trouvés aucune documents avec votre saisie {key}')
reponselen.append(key)
else:
print(f'Voici les documents sélectionner pour votre saisie {key} ')
print(f'{value}')
if len(reponselen) == 1:
for elem in reponselen:
print(f'Pour votre saisie incorrect {elem}, refaites une saisie.\n ')
verif_saisie(ld)
elif len(reponselen) > 1:
reponseselect = ""
while reponseselect != "oui" and reponseselect != "non":
print(f' Souhaitez-vous refaire une reselection de dossiers dû aux saisies suivantes qui nont rien donné ? (oui ou non) : ')
for elem in reponselen:
print(elem)
reponseselect = input("\nChoix :")
if reponseselect == "oui":
verif_saisie(ld)
elif reponseselect == "non":
print("Annulation de la demande de reselection de dossiers")
else:
print("Mauvaise réponse, refaites")
elif len(reponselen) == 0:
print("Pas d'erreur de saisie .")
else:
print("problème quelque part au cas où)")
global varld4
varld4 = ld4
global varactu
varactu = actuelle
def verif_saisie_f(lf):
actuelle = input("\nEcrire le(s) fichier(s) concerné(s) [écrire plus de 3 caractères et écrire les majuscules]:").split(", ") #utiliser ", " comme délimiteur et sont ignoré dans la liste
#print(actuelle)
actuelle = [x.strip() for x in actuelle] # création d'une nouvelle liste actuelle pour que chaque élément présent dans actuelle aie un .strip() pour supprimer les espaces étant au début et à la fin de la chaine de caractère (si l'user saisie un espace)
'''correspondant à
actuelle2 = []
for x in actuelle:
actuelle2.append(x.strip())'''
lf4 = {}
for saisie in actuelle:
lf4.update({saisie: []})
for saisie in actuelle:
for value in lf.values():
#print(value)
for elem in value:
#elemtest = elem
#print(f'{saisie} {elem} {elemtest} {value}')
if saisie in elem:
print(f'{saisie} in {elem}')
lf4[f'{saisie}'].append(elem) #ajoute dans le dictionnaire que l'elem en tant que valeur de la clé saisie
else:
print(f'{saisie} not in {elem}')
reponselen = []
for key, value in lf4.items(): #lf4 permet de savoir si on a trouvé des fichiers pour notre saisie
#print(f'{key} {value}')
if len(value) == 0:
print(f'Nous navons trouvés aucune fichier(s) avec votre saisie {key}')
reponselen.append(key)
else:
print(f'Voici le(s) fichier(s) sélectionné pour votre saisie {key} ')
print(f'{value}')
if len(reponselen) == 1:
for elem in reponselen:
print(f'Pour votre saisie incorrect {elem}, refaites une saisie.\n ')
verif_saisie_f(lf) # on appel à nouveau la fonction afin de refaire la saisie
elif len(reponselen) > 1:
reponseselect = ""
while reponseselect != "oui" and reponseselect != "non":
print(f' Souhaitez-vous refaire une reselection de fichiers dû aux saisies suivantes qui nont rien donné ? (oui ou non) : ')
for elem in reponselen:
print(elem)
reponseselect = input("\nChoix :")
if reponseselect == "oui":
verif_saisie(lf)
elif reponseselect == "non":
print("Annulation de la demande de reselection de fichiers.")
else:
print("Mauvaise réponse, refaites")
elif len(reponselen) == 0:
print("Pas d'erreur de saisie .")
else:
print("problème quelque part au cas où)")
global varld4
varld4 = lf4
global varactu
varactu = actuelle
def search_dir_dst():
dossier = input("\nEcrire le nom du dossier de destination auquel vous souhaitez déplacer : ")
chemin = []
for home2, dirs, files in os.walk(home, topdown=True): #permet de parcourir tous les éléments présent dans home
#os.walk a 3 tuples : dirtpath, dirnames, filesnames
#print(home2)#chemin vers le path
#print(dirs)#liste avec nom des sous répertoires de dirtpath
#print(files)#liste avec nom des fichier hors répertoires dans dirtpath
for element in dirs:
if dossier.lower() in element.lower(): #compare la saisie de dossier à l'élément parcouru
dst = os.path.join(home2, element) #on forme le chemin du dossier
chemin.append(dst) #on l'ajoute dans la liste chemin ,
if len(chemin) > 1: # si dans la liste chemin, + de 1 chemin donc plusieur dossiers
print("Il existe plusieurs dossiers à ce nom :\n")
for lechemin in chemin: #affiche tous les chemins présent dans la liste
print(f'{lechemin}\n')
elif len(chemin) == 1: # si il nya que 1 seul chemin dans la liste
print("Il existe un dossier à ce nom là :")
for lechemin in chemin:
print(lechemin)
else:
print(f'\nAucun dossier nexiste pour {dossier}.\n')
dstfinal = "" #initialiser la variable
while dstfinal != "1" and dstfinal !="2" and dstfinal != "3":
print("Souhaitez-vous :")
print("1 - Récrire le dossier de destination.")
print('2 - Afficher une arborescence de votre répertoire personnel.')
print("3 - Retourner au menu")
dstfinal = input('Choix :')
if dstfinal == "1":
search_dir_dst()
elif dstfinal == "2":
print_tree()
search_dir_dst()
elif dstfinal == "3":
menu()
else:
print("Mauvais choix. Refaire.\n")
global chemindst
chemindst = chemin #on donne à la variable globale la valeur de chemin, pour que l'on puisse utilisé le "resulat" de chemin dans les autre fonctions
def verif_dir_dst(dst,source,chemin):
dst = input("Choisir le dossier (copier/coller le chemin correspondant) :")
ldst = []
erreur = ""
#print(type(dst))
for value in chemindst: #on utilise la variable globale dans lequel on a insérer les valeurs de la fonction précédente
print(value)
print(chemindst)
if dst == value:
print(f'{dst} in {value}')
ldst.append(value) #on rajoute dans ldst , value
#
else:
#print(f'{dst} not in {value}')
#print("erreur")
erreur = 0
print(value)
print(type(value))
global dstf
dstf = dst #pour la réutiliser dans d'autre fonctions
if len(ldst) == 1: #si ldst q'une valeur alors correspond à notre saisie, permet surtout de vérifier la saisie si elle existe bien
print(f'\nVoici le dossier de destination que vous avez sélectionné pour votre saisie "{dst}" ')
for elem in ldst:
print(elem)
elif len(ldst) == 0: # si ldst 0 valuer, alors aucun dossier trouvé avec la saisie de l'user
print(f'\nNous navons aucun document correspondant à votre saisie {dst}')
reponseselect = ""
while reponseselect != "1" and reponseselect != "2":
print(f'\nSouhaitez-vous :')
print("1 - Refaire saisie")
print("2 - Afficher arborescence répertoire personnelle")
reponseselect = input("Choix :")
if reponseselect == "1":
if len(chemin) > 1: #réaffiche les dossiers existant à noms
print("Il existe plusieurs dossiers à ce nom :\n")
for lechemin in chemin:
print(f'{lechemin}\n')
elif len(chemin) == 1:
print("Il existe un dossier à ce nom là :")
for lechemin in chemin:
print(lechemin)
verif_dir_dst(dst,source,chemindst) #refait appel à la fonction pour recommencer la saisie
elif reponseselect == "2":
print_tree()
verif_dir_dst(dst,source,chemindst)
else:
print("Mauvaise réponse, refaites")
else:
print("problème quelque part")
def menu_fin():
choix3 = ""
while choix3!="1" and choix3!="2":
print("\n Que voulez-vous faire ? :\n")
print("1 - Revenir au menu.")
print("2 - Fermer le programme.")
choix3 = input('\n choix :')
if choix3 == "1":
menu()
elif choix3 == "2":
exit
else:
print("erreur saisie")
def print_tree():
printtree = ""
while printtree != "oui" and printtree != "non":
printtree = input("Souhaitez-vous peut-être voir quels dossiers existent sur votre répertoire personnel à travers une arborescence ? oui ou non : ")
choixtree = ""
if printtree == 'oui':
while choixtree !="1" and choixtree !="2" and choixtree !="3": #affiche différentes options
print("\nQue voulez-vous faire ?: ")
print("1 - Afficher un arbre avec que la liste des dossiers. ")
print("2 - Afficher un arbre avec la liste des dossiers et la liste des fichiers.")
print("3 - Ne rien faire car j'ai changé d'avis.")
choixtree = input("\nChoix :")
if choixtree == "1":
os.system("tree $HOME -d ")
elif choixtree == "2":
os.system("tree $HOME")
elif choixtree == "3":
print("Annulation de la demande d'affichage de l'arborescence.")
else:
print("Erreur de choix, refaire.\n") #si choix différents des caractère 1 ,2, et 3
elif printtree == "non":
print("Non affichage de l'arborescence.")
else:
print("Erreur de choix. Refaire.\n")
def search_dir():
src = ""
dst = ""
c = []
#c = input('écrire le nom du fichier :').split(",")
#c = "maison.txt"
c = input('Ecrire le(s) nom(s) de(s) dossier(s) que vous sohaitez rechercher sur votre répertoire personnel:').split(",")
#path.exists()
ld = {}
dir2 = []
source = {}
presence2 = []
#chemin = glob.glob(f'/home/porteur/**/*{c}*', recursive=True)
for saisie in c:
ld.update({saisie: []})
#print(chemin)
for saisie in c:
for home2, dirs, files in os.walk(home, topdown=True):#os.walk a 3 tuples : dirtpath, dirnames, filesnames
#print(home2)#chemin vers le path
#print(dirs)#liste avec nom des sous répertoires de dirtpath
#print(files)#liste avec nom des fichier hors répertoires dans dirtpath
#files2 = files.copy()
for dossiers in dirs:
presence2.append(dossiers)
for dossier in dirs: #pour comparé avec plusieurs élément de la list files
if saisie.lower().strip() not in dossier.lower().strip():#renvoie une COPIE de la chaine de caractère masi en minuscule
presence2.remove(dossier)
#print(fichier2)
else:
src = os.path.join(home2, dossier)
source[dossier] = src
ld[saisie].append(dossier)
#lf2[f'{test22}'].append(fichier2)
#test22 += 1
#print(lf)
#print(premier)
'''for elemlf in lf:
print(elemlf)'''
for key, value in ld.items():
if len(value) > 1:
print(f'\nIl existe plusieurs dossiers correspondant à {key} : {value}, {len(value)}')
elif len(value) == 1:
print(f'Il existe un dossier correspondant à {key} : {value}, {len(value)}')
elif len(value) == 0:
print(f'Il nexiste pas de dossiers correspondant à {key}, {len(value)}')
print_tree()
search_dir()
else:
print(f'problème')
def nodirs(nodir, event):
if len(nodir) > 0:
print("\nPour le dossier / l'un des dossiers que vous souhaitiez rechercher, nous n'avons rien trouvé sur ce poste y correspondant.")
print_tree()
RouP = ""
while RouP !="1" and RouP != "2":
print(event.count ("+dirs"))
print(event.count("1dir"))
if (event.count ("+dirs") > 0) or (event.count("1dir") > 0): #pour lorsque il ya des fichiers qui existent
print("\nSouhaitez-vous:")
print("1 - Refaire à nouveau une recherche.")
print("2 - Poursuivre avec les éléments déjà trouvé.")
print("3 - Revenir au menu")
RouP = input("Choix :")
print("\n")
if RouP == "1":
moove_dir()
elif RouP == "2":
print("\nPoursuite de la démarche pour le déplacement. ")
elif RouP == "3":
menu()
else:
print("Erreur de choix.\n")
else:
print("\nSouhaitez-vous:")
print("1 - Refaire à nouveau une recherche.")
print("2 - Revenir au menu")
RouP = input("Choix :")
print("\n")
if RouP == "1":
moove_dir()
elif RouP == "2":
menu()
else:
print("Erreurs de choix. \n")
else:
print('Recherche de correspondance par rapport à votre saisie terminé.')
def nofiles(nofile, event):
if len(nofile) > 0:
print("\nPour le(s) fichier(s) que vous souhaitiez rechercher, nous n'avons rien trouvé sur ce poste y correspondant.")# améliorer pour afficher quels fichiers n'a pas été trouvé.
print_tree()
RouP = ""
while RouP !="1" and RouP != "2":
#print(event.count ("+files")) #regarde combien de fois est présent +files dans event
#print(event.count("1file")) # regarde combien de fois est présent 1file dans event
if (event.count ("+dirs") > 0) or (event.count("1file") > 0): #pour lorsque il ya des fichiers qui existent mais il le fait quand même car il est dans le if de lorsque il ya nofile pour un fichier
print("\nSouhaitez-vous:")
print("1 - Refaire à nouveau une recherche.")
print("2 - Poursuivre avec les éléments déjà trouvé.")# ce if permet surtout de proposer à l'user d'ignorer le fichier non trouvé pour poursuivre avec ce qui est déjà trouvé
print("3 - Revenir au menu")
RouP = input("Choix :")
print("\n")
if RouP == "1":
moove_dir()
elif RouP == "2":
print("\nPoursuite de la démarche pour le déplacement. ")
elif RouP == "3":
menu()
else:
print("Erreur de choix.\n")
else:
print("\nSouhaitez-vous:")
print("1 - Refaire à nouveau une recherche.") #s'affiche lorsque il n'ya aucun fichier de trouvé pour toutes les saisies
print("2 - Revenir au menu")
RouP = input("Choix :")
print("\n")
if RouP == "1":
moove_dir()
elif RouP == "2":
menu()
else:
print("Erreurs de choix. \n")
else:
print('Recherche de correspondance par rapport à votre saisie terminé.')
def moove_dir():
src = ""
dst = ""
c = []
#c = input('écrire le nom du fichier :').split(",")
#c = "maison.txt"
c = input('Ecrire le(s) nom(s) de(s) dossier(s) que vous souhaitez rechercher sur votre répertoire personnel:').split(", ")
#path.exists()
ld = {}
dir2 = []
source = {}
presence2 = []
#chemin = glob.glob(f'/home/porteur/**/*{c}*', recursive=True)
for saisie in c:
ld.update({saisie: []})
#print(chemin)
for saisie in c:
for home2, dirs, files in os.walk(home, topdown=True):#os.walk a 3 tuples : dirtpath, dirnames, filesnames
#print(home2)#chemin vers le path
#print(dirs)#liste avec nom des sous répertoires de dirtpath
#print(files)#liste avec nom des fichier hors répertoires dans dirtpath
#files2 = files.copy()
for dossiers in dirs:
presence2.append(dossiers)
for dossier in dirs: #pour comparé avec plusieurs élément de la list files
if saisie.lower().strip() not in dossier.lower().strip():#renvoie une COPIE de la chaine de caractère masi en minuscule
presence2.remove(dossier)
#print(fichier2)
else:
src = os.path.join(home2, dossier)
source[dossier] = src
ld[saisie].append(dossier)
#lf2[f'{test22}'].append(fichier2)
#test22 += 1
#print(lf)
#print(premier)
'''for elemlf in lf:
print(elemlf)'''
nodir = []
event = []
for key, value in ld.items():
if len(value) > 1:
print(f'Il existe plusieurs dossiers correspondant à {key} : {value}, {len(value)}')
event.append("+dirs")
elif len(value) == 1:
print(f'Il existe un dossier correspondant à {key} : {value}, {len(value)}')
event.append("1dir")
elif len(value) == 0:
print(f'Il nexiste pas de dossiers correspondant à {key}, {len(value)}')
nodir.append("nodir")
else:
print(f'problème')
print(event)
nodirs(nodir, event)
choix5 = ""
while choix5 != "oui" and choix5 != "non":
print("\nAfficher les chemins du(es) dossier(s) trouvés ? (oui ou non) : \n")
choix5 = input("Choix :")
if choix5 == "oui":
for key, value in source.items():
print(f'\nChemin du dossier {key} : {value}\n')
elif choix5 == "non":
print("\nEtape de l'affichage des chemins annulé.\n")
else:
print("Erreur de choix.\n")
verif_saisie(ld)
#print(ld)
'''
ld34 = {}
presence34 = []
for saisie in actuelle:
for home2, dirs, files in os.walk(home, topdown=True):#os.walk a 3 tuples : dirtpath, dirnames, filesnames
#print(home2)#chemin vers le path
#print(dirs)#liste avec nom des sous répertoires de dirtpath
#print(files)#liste avec nom des fichier hors répertoires dans dirtpath
#files2 = files.copy()
for dossier4 in dirs:
presence34.append(dossier4)
#print(files2)
for dossier2 in dirs: #pour comparé avec plusieurs élément de la list files
if saisie.strip() not in dossier2.strip():#renvoie une COPIE de la chaine de caractère masi en minuscule
presence34.remove(dossier2)
#print(fichier2)
else:
#test = os.path.join(home2, fichier2)
#source[fichier2] = src
ld34[saisie].append(dossier2)
#lf2[f'{test22}'].append(fichier2)
#test22 += 1
#print(lf)
#print(premier)
#print(lf34)
for key, value in ld34.items():
if len(ld34[key]) > 0:
print(f'\nUn dossier existe bien pour la saisie {key} : {value}\n')
else:
print(f'Soucis avec la saisie de {key}')'''
print("Avant d'écrire le dossier de destination pour le déplacement.")
print_tree()
search_dir_dst()
verif_dir_dst(dst,source,chemindst)
print("\nListe totale des dossiers à déplacer : ")
for key, value in varld4.items():
print(f'{value}')
repdir = ""
while repdir != "oui" and repdir != "non":
repdir = input(f'Êtes-vous sûr de vouloir déplacer le(s) dossier(s) suivant(s) à {dstf} ? (oui ou non):')
if repdir == 'oui':
for saisie in varactu:
for key,value in source.items():
if saisie.strip() in key.strip():
shutil.move(value, dstf)
print(f'\nLe dossier {key} a été déplacé de {value} à {dstf}')
elif repdir == "non":
print("\nAnnulation de la demande de déplacement de fichiers.")
else:
print("erreur")
menu_fin()
#mettre .lower pour la casse en minuscule
def moove_files():
src = ""
dst = ""
home = os.environ['HOME']
c = []
#c = input('écrire le nom du fichier :').split(",")
#c = "maison.txt"
c = input('\nEcrire le(s) nom(s) du(es) fichier(s) que vous souhaitez rechercher sur votre répertoire personnel :').split(", ")
#path.exists()
lf = {}
source = {}
presence2 = []
#chemin = glob.glob(f'/home/porteur/**/*{c}*', recursive=True)
for saisie in c:
lf.update({saisie: []})
#print(chemin)
for saisie in c:
for home2, dirs, files in os.walk(home, topdown=True):#os.walk a 3 tuples : dirtpath, dirnames, filesnames
#print(home2)#chemin vers le path
#print(dirs)#liste avec nom des sous répertoires de dirtpath
#print(files)#liste avec nom des fichier hors répertoires dans dirtpath
for fichier in files:
presence2.append(fichier) #ajout dans une liste de tous les fichiers trouvés
for fichier2 in files: #pour comparé avec plusieurs élément de la list files avec notre saisie dans c
if saisie.lower().strip() not in fichier2.lower().strip():#renvoie une COPIE de la chaine de caractère masi en minuscule
presence2.remove(fichier2) #supprime de la liste tous les fichiers dans lequel notre saisie dans c n'est pas dans la chaine de caractère (des fichiers supprimés)
#print(fichier2)
else:
src = os.path.join(home2, fichier2) #on creer le chemin du fichier
source[fichier2] = src #on ajoute ce chemin dans un dictionnaire afin de les stocker
lf[saisie].append(fichier2) #on ajoute le fichier qui correspondant à notre saisie dans c dans un dictionnaire (lf), afin de dire que le fichier trouvé est une valeur de la clé "saisie dans c".
#lf2[f'{test22}'].append(fichier2)
#test22 += 1
#print(lf)
#print(premier)
'''for elemlf in lf:
print(elemlf)'''
nofile = [] #on creer une liste
event = []
for key, value in lf.items(): #on parcours tous les éléments du dictionnaire
if len(value) > 1:# pour chaque clés, si il ya plus de 1 valeurs
print(f'\nIl existe plusieurs fichiers correspondant à {key} : {value}, {len(value)}')
event.append("+files") #ajoute dans event qu'il ya plusieurs fichiers trouvé
elif len(value) == 1: # pour chaque clés, si il ya que 1 seule valeur
print(f'\nIl existe un fichier correspondant à {key} : {value}, {len(value)}')
event.append("1file") #ajout dans event le fait qu'il y est 1 seul fichier trouver
elif len(value) == 0: #ajout dans nofile le fait qu'il n'ya aucun fichier trouvé
print(f'\nIl nexiste pas de fichier correspondant à {key}, {len(value)}')
nofile.append("nofile")
else:
print(f'problème')
#print(event)
nofiles(nofile, event) #pour vérifier s'il un fichier n'a pas été trouvé
choix5 =""
while choix5 != "oui" and choix5 != "non":
print("\nAfficher les chemins du(es) fichier(s) trouvés ? (oui ou non) : \n")
choix5 = input("Choix :")
if choix5 == "oui":
for key, value in source.items(): #on parcours le dictionnaire de tous les chemins pour les affichers
print(f'\nChemin du fichier {key} : {value}\n')
elif choix5 == "non":
print("\nEtape de l'affichage des chemins annulé.\n")
else:
print("Erreur de choix.\n")
verif_saisie_f(lf) #étape pour saisir les fichiers trouvés que l'on souhaite déplacer
print("Avant d'écrire le dossier de destination pour le déplacement.")
print_tree()
search_dir_dst()
verif_dir_dst(dst,source,chemindst) #on appelle différent parametre de valeurs précédentes
print("\nListe totale de(s) fichier(s) à déplacer : ")
for key, value in varld4.items(): # affiche la liste totale des fichiers à déplacer , val
print(f'{value}')
repfile = ""
while repfile != "oui" and repfile != "non":
repfile = input(f'Êtes-vous sûr de vouloir déplacer le(s) fichier(s) suivant(s) à {dstf} ? (oui ou non):')
if repfile == 'oui':
for saisie in varactu:
print(varactu)
for key,value in source.items():
if saisie.strip() in key.strip(): # supprime les espaces au début et à la fin de la chaine de caractère key
shutil.move(value, dstf)
print(f'\nLe fichier {key} a été déplacé de {value} à {dstf}')
elif repfile == "non":
print("\nAnnulation de la demande de déplacement de fichiers.")
else:
print("erreur")
menu_fin()
def remove():
src = ""
dst = ""
print("bonjour")
home = os.environ['HOME']
c = []
#c = input('écrire le nom du fichier :').split(",")
#c = "maison.txt"
c = input('écrire les noms des fichiers à supprimer :').split(", ")#saisie des chaines de caractère des nom à trouver, ajouté dans une ligne dans laquelle on ignore les ", "
#path.exists()
lf = {}
source = {}
presence2 = []
#chemin = glob.glob(f'/home/porteur/**/*{c}*', recursive=True)
for saisie in c:
lf.update({saisie: []})
#print(chemin)
for saisie in c:
for home2, dirs, files in os.walk(home, topdown=True):#os.walk a 3 tuples : dirtpath, dirnames, filesnames
#print(home2)#chemin vers le path
#print(dirs)#liste avec nom des sous répertoires de dirtpath
#print(files)#liste avec nom des fichier hors répertoires dans dirtpath
#files2 = files.copy()
for fichier4 in files:
presence2.append(fichier4)
#print(files2)
for fichier2 in files: #pour comparé avec plusieurs élément de la list files
if saisie.lower().strip() not in fichier2.lower().strip():#renvoie une COPIE de la chaine de caractère masi en minuscule
presence2.remove(fichier2) #on supprime de la liste si ne correspant pas.
#print(fichier2)
else:
src = os.path.join(home2, fichier2) #on forme le chemin du fichier
source[fichier2] = src #on l'ajoute dans le dcitonnaire le chemin
lf[saisie].append(fichier2) #on ajoute dans le dictionnaire ce qui correspondat à notre saisie
#lf2[f'{test22}'].append(fichier2)
#test22 += 1
#print(lf)
#print(premier)
'''for elemlf in lf:
print(elemlf)'''
for key, value in lf.items():
if len(value) > 1: # si valeurs (fichiers trouvés) de la clé en cours (saisie) est supérieur à 1, alors plusieurs fichier
print(f'il existe plusieurs fichiers correspondant à {key} : {value}, {len(value)}')
elif len(value) == 1:
print(f'il existe un fichier correspondant à {key} : {value}, {len(value)}')
elif len(value) == 0:
print(f'il nexiste pas de fichier correspondant à {key}, {len(value)}')
else:
print(f'problème')
actuelle = input("Ecrire le(s) fichier(s) concerné(s) :").split(",")
lf34 = {}
presence34 = []
for saisie in actuelle:
lf34.update({saisie: []})
for saisie in actuelle:
for home2, dirs, files in os.walk(home, topdown=True):#os.walk a 3 tuples : dirtpath, dirnames, filesnames
#print(home2)#chemin vers le path
#print(dirs)#liste avec nom des sous répertoires de dirtpath
#print(files)#liste avec nom des fichier hors répertoires dans dirtpath
#files2 = files.copy()
for fichier4 in files:
presence34.append(fichier4)
#print(files2)
for fichier2 in files: #pour comparé avec plusieurs élément de la list files
if saisie.lower().strip() not in fichier2.lower().strip():#renvoie une COPIE de la chaine de caractère masi en minuscule
presence34.remove(fichier2)
#print(fichier2)
else:
#test = os.path.join(home2, fichier2)
#source[fichier2] = src
lf34[saisie].append(fichier2)#ajout dans le dictionnaire du fichier qui existe
#lf2[f'{test22}'].append(fichier2)
#test22 += 1
#print(lf34)
#partie à revoir
for key, value in lf34.items():
if len(lf34[key]) > 0: # si dans la clé, il y'a au moins une valeurs, alors au moins un fichier trouvés et saisie vérifier
print(f'fichier existe bien pour saisie {key} : {value}')
else:
print(f'soucis avec {key}')
question = input(f'Êtes-vous sûr de vouloir supprimer le(s) fichier(s) suivant(s) : {lf34.values()} (oui ou non)')
if question == 'oui':
for saisie in actuelle:
for key, value in source.items():
if saisie.lower().strip() in key.lower().strip():
os.remove(value)
print(f'fichier {key} de {value} à été supprimé')
print(f'Le(s) fichier(s) en question ont été supprimé(s).')
else:
print("Annulation de la demande de suppréssion du/des fichier(s).")
menu_fin()
def scp():
src = ""
dst = ""
print("bonjour")
home = os.environ['HOME']
c = []
#c = input('écrire le nom du fichier :').split(",")
#c = "maison.txt"
c = input('écrire les noms des fichiers (séparer par ", ") :').split(",")
#path.exists()
lf = []
files2 = []
source = {}
#chemin = glob.glob(f'/home/porteur/**/*{c}*', recursive=True)
#print(chemin)
for elem in c:
for home2, dirs, files in os.walk(home, topdown=True):#os.walk a 3 tuples : dirtpath, dirnames, filesnames
#print(home2)#chemin vers le path
#print(dirs)#liste avec nom des sous répertoires de dirtpath
#print(files)#liste avec nom des fichier hors répertoires dans dirtpath
#files2 = files.copy()
for fichier4 in files:
files2.append(fichier4)
#print(files2)
for fichier2 in files: #pour comparé avec plusieurs élément de la list files
if elem.lower().strip() not in fichier2.lower().strip():#renvoie une COPIE de la chaine de caractère masi en minuscule
files2.remove(fichier2)
#print(fichier2)
else:
src = os.path.join(home2, fichier2)
source[fichier2] = src
lf.append(fichier2)
#print(lf)
#print(premier)
for elemlf in lf:
print(elemlf)
if len(lf) > 1:
print("il existe plusieurs fichiers par rapport à votre demande")
print(lf)
actuelle = input("Ecrire le(s) fichier(s) concerné(s) à déplacer/copier vers l'autre poste utilisateur :").split(",")
user = input("nom de l'user :")
ip = input("ip : ")
chemin = input("chemin :")
for saisie in actuelle:
for key,value in source.items():
if saisie.lower().strip() in key.lower().strip():
os.system(f'scp -p {value} {user}@{ip}:{chemin}') # -p Preserves modification times, access times, and modes from the original file.
elif len(lf) == 1:
print(f'Voici le fichier qui existe : {lf}')
user = input("nom de l'user :")
ip = input("ip : ")
chemin = input("chemin du dossier de destination:")
for saisie in lf:
for key,value in source.items():
if saisie.lower().strip() in key.lower().strip():
os.system(f'scp -p {value} {user}@{ip}:{chemin}') # -p Preserves modification times, access times, and modes from the original file.
else:
print(f'Il nya pas de fichier à ce nom là')
menu_fin()
def synchro():
print("Ecrire les informations nécessaires pour la synchronisation de ce que vous souhaitez")
source = input("dossier source que l'on souhaite synchro : ")
dest = input("Dossier destination pour stocker la sauvegarde/synchro : ")
user = input("User : ")
ip = input("ip : ")
os.system(f'''sudo rsync -avz {source}/ {user}@{ip}:{dest}/ ''')
print(f'Syncro du dossier {source} et de son contenu au poste distant {ip} terminé.\n')
#--exclude-from=/home/porteur/Documents/exclude.txt
#os.system(f'''sudo rsync -e ssh -avz --delete-after {source}/ {user}@{ip}:{dest}/ ''')
#-a, mode archivage, permet de copier de manière récursive, de préserver les permissions et de ne pas suivre les liens symboliques
#-z, compress, permet de compresser les données avant de les transférer
#--delete-after : à la fin du transfert, supprime les fichiers dans le dossier de destination ne se trouvant pas dans le dossier source.
#-e ssh : utilise le protocole SSH
#rsync source/ destination/ : copie le _contenu_ du dossier source dans le dossier destination
#quand répertoire est exclu, tous les fichiers dans repertoires sont exclues aussi
#il faut respecter l'ordre include puis exclude
# –progress affiche des informations détaillées d’avancement de l’exécution de rsync
menu()
def ssh_choix():
print("1 - Copier vers le pc distant.")
print("2 -Déplacer des fichiers vers le pc distant")
repchoix = int(input("\n Que souhaitez-vous faire sur le poste distant ?"))
if repchoix == 1:
scp()
elif repchoix == 2:
ssh_moove()
else:
print("mauvais choix")
def ssh_moove():
src = ""
dst = ""
print("bonjour")
home = os.environ['HOME']
c = []
#c = input('écrire le nom du fichier :').split(",")
#c = "maison.txt"
c = input('écrire les noms des fichiers :').split(",")
#path.exists()
lf = []
files2 = []
source = {}
#chemin = glob.glob(f'/home/porteur/**/*{c}*', recursive=True)
#print(chemin)
for elem in c:
for home2, dirs, files in os.walk(home, topdown=True):#os.walk a 3 tuples : dirtpath, dirnames, filesnames
#print(home2)#chemin vers le path
#print(dirs)#liste avec nom des sous répertoires de dirtpath
#print(files)#liste avec nom des fichier hors répertoires dans dirtpath
#files2 = files.copy()
for fichier4 in files:
files2.append(fichier4)
#print(files2)
for fichier2 in files: #pour comparé avec plusieurs élément de la list files
if elem.lower().strip() not in fichier2.lower().strip():#renvoie une COPIE de la chaine de caractère masi en minuscule
files2.remove(fichier2)
#print(fichier2)
else:
src = os.path.join(home2, fichier2)
source[fichier2] = src
lf.append(fichier2)
#print(lf)
#print(premier)
for elemlf in lf:
print(elemlf)
if len(lf) > 1:
print("il existe plusieurs fichiers par rapport à votre demande")
print(lf)
actuelle = input("Ecrire le(s) fichier(s) concerné(s) à déplacer/copier vers l'autre poste utilisateur :").split(",")
user = input("nom de l'user :")
ip = input("ip : ")
chemin = input("chemin :")
for saisie in actuelle:
for key,value in source.items():
if saisie.lower().strip() in key.lower().strip():
os.system(f'scp -p {value} {user}@{ip}:{chemin}') # -p Preserves modification times, access times, and modes from the original file.
elif len(lf) == 1:
print(f'Voici le fichier qui existe : {lf}')
user = input("nom de l'user :")
ip = input("ip : ")
chemin = input("chemin :")
for saisie in lf:
for key,value in source.items():
if saisie.lower().strip() in key.lower().strip():
os.system(f'scp -p {value} {user}@{ip}:{chemin}') # -p Preserves modification times, access times, and modes from the original file.
else:
print(f'Il nya pas de fichier à ce nom là')
for saisie in actuelle:
for key,value in source.items():
if saisie.lower().strip() in key.lower().strip():
os.system(f'rm {value} ')
print(f' {value} supprimé du pc local ')
def menu():
print('####MENU######\n')
print('Que souhaitez-vous faire ?.\n')
print('1 - Déplacer des fichier ou dossiers.')
print('2 - Supprimer des fichiers.')
print('3 - Copier/Déplacer/ vers un autre poste.')
print('4 - Synchroniser votre répertoire/dossier vers un autre poste/server.')
print('5 - Quitter le programme.')
try:
choix = int(input('\n choix :'))
if choix == 1:
moove_question()
elif choix == 2:
remove()
elif choix == 3:
ssh_choix()
elif choix == 4:
synchro()
elif choix == 5:
exit
else:
menu()
except ValueError:
print("refaire")
menu()
'''choix = int(input('\n choix :'))
if choix == 1:
moove_question()
elif choix == 2:
remove()
elif choix == 3:
ssh_choix()
elif choix == 4:
synchro()
elif choix == 5:
exit
else:
menu()'''
if __name__ == "__main__":
menu()
'''
B = "/home/porteur/Bureau/B/"
A = "/home/porteur/Bureau/A/"
#dir = glob.glob(path)
listel = list()
listel = [] #création d'une liste pour stocker les entrées
print( listel )
size = list()
size = []
for element in listel:
fichier2 = os.path.join(A, element)
info = os.stat(fichier2)
size.append( info.st_size )
print( size )
def get_files_by_date(A):
files = [(os.stat(f)[ST_CTIME], f) for f in os.listdir(A) if os.path.isfile(f)]
files.sort()
return [f for s,f in files]
#os.rename('/home/porteur/Bureau/Test/jean.txt', '/home/porteur/Bureau/test2/jean.txt')
#shutil.move('/home/porteur/Bureau/Test/jean.txt', '/home/porteur/Bureau/Test2/jean.txt ')
#strftime( %Y-%m-%d %H:%M:%S, datetime.now() )
f= open("/home/porteur/Bureau/historique.txt", "a")
#ft = f.read()
d= datetime.datetime.today()
f.write("La date est " )
f.write(str(d) )
f.write("Liste des fichiers déplacer")
f.close()
'''
| Porteur98/Script-python-pour-d-placement-de-fichier-et-dossier | script_version_1.0-alpha.py | script_version_1.0-alpha.py | py | 45,405 | python | fr | code | 0 | github-code | 13 |
16707436806 | """
Written by Lorenzo Vainigli
This program provides a correct solution for the following problem:
https://www.facebook.com/codingcompetitions/hacker-cup/2019/qualification-round/problems/C
"""
import re
filename = "mr_x"
DEBUG = 0
if DEBUG:
input_filename = filename + "_example_input.txt"
output_filename = filename + "_example_output.txt"
else:
input_filename = filename + "_input.txt"
output_filename = filename + "_output.txt"
class Node:
def __init__(self, val, left, right):
self.val = val
self.left = left
self.right = right
# from https://stackoverflow.com/questions/522372/finding-first-and-last-index-of-some-value-in-a-list-in-python
def find_closepar(expr):
open_par = expr.index("(")
i = open_par+1
skips = 0
close_par = -1
while i < len(expr):
if expr[i] == "(":
skips = skips + 1
if expr[i] == ")":
if skips == 0:
close_par = i
else:
skips = skips - 1
i = i + 1
return open_par, close_par
def eval_core(expr, is_x_true):
if expr == "x":
return int(is_x_true)
elif expr == "X":
return int(not is_x_true)
elif expr in ["0", "1"]:
return int(expr)
elif re.search("^(.)([&|^])(.)$", expr):
m = re.search("^(.)([&|^])(.)$", expr)
if m.group(2) == "&":
return eval_core(m.group(1), is_x_true) and eval_core(m.group(3), is_x_true)
if m.group(2) == "|":
return eval_core(m.group(1), is_x_true) or eval_core(m.group(3), is_x_true)
if m.group(2) == "^":
return eval_core(m.group(1), is_x_true) ^ eval_core(m.group(3), is_x_true)
else:
open_par, close_par = find_closepar(expr)
return eval_core(expr[:open_par]
+ str(eval_core(expr[open_par+1:close_par], is_x_true))
+ expr[close_par+1:], is_x_true)
def eval(expr):
return eval_core(expr, is_x_true=True)
def eval_neg(expr):
return eval_core(expr, is_x_true=False)
def solve(expr):
if eval(expr) == eval_neg(expr):
# The value of x doesn't affect the result of the expression
return 0
else:
"""
We need to compute the minimum amount of substitution to make the result of the expression independent
from the value of x. To do that, we start from the top level expression (i.e. the root).
If this is a single term expression, we can simply change the x to 1 or 0.
Otherwise, if is a binary operator we have to go on recursion to find the top-level operator and modify
it to make the other ineffective:
0 with &: 0 and x is always false;
1 with |: 1 or x is always true.
The ^ is a little bit complicated: if one of the operands is x, the other must be x or X, so we can have
x^x = False or X^x = True regardless of the value of x.
In all cases discussed above, 1 substitution is enough.
"""
return 1
in_data = []
out_data = []
with open(input_filename, 'r') as fileinput:
for line in fileinput:
in_data.append(line.rstrip().split(" "))
fileinput.close()
del in_data[0]
for i in range(len(in_data)):
out_data.append("Case #" + str(i + 1) + ": " + str(solve(in_data[i][0])))
with open(output_filename, 'w') as fileoutput:
for line in out_data:
fileoutput.write(line + "\n")
fileoutput.close()
| lorenzovngl/meta-hacker-cup | 2019/qualification_round/mr_x/mr_x.py | mr_x.py | py | 3,471 | python | en | code | 1 | github-code | 13 |
21524253114 | import cherrypy
from Networking.statuscodes import StatusCodes
from Networking.network import Network
class Networking(Network):
_config = None
def __init__(self, config):
self._registerStatusCodes()
self._config = config
from tophat import TophatMain
TophatMain(self._config)
def _registerStatusCodes(self):
StatusCodes.NONE = 0
StatusCodes.OK = 200
StatusCodes.CREATED = 201
StatusCodes.BAD_REQUEST = 400
StatusCodes.UNAUTHORISED = 401
StatusCodes.NOT_FOUND = 404
StatusCodes.METHOD_NOT_ALLOWED = 405
StatusCodes.SERVER_ERROR = 500
StatusCodes.UNIMPLEMENTED = 501 | tcd-tophat/TopHat-Platform | Networking/Protocols/Tpcustom/networking.py | networking.py | py | 600 | python | en | code | 4 | github-code | 13 |
13416275499 | import face_recognition as face
import numpy as np
import cv2
video_capture = cv2.VideoCapture("sample.mp4")
pop_image = face.load_image_file("pop.jpg")
pop_face_encoding = face.face_encodings(pop_image)[0]
face_location = []
face_encodings = []
face_names = []
face_percenrt = []
process_this_frame = True
known_face_encodings = [pop_face_encoding]
known_face_names = ["PONGKUL"]
while True:
ret, frame = video_capture.read() #ret is success or not = true,false
if ret:
small_frame = cv2.resize(frame, (0,0), fx=0.5, fy=0.5)
rgb_small_frame = small_frame[:,:,::-1]
face_name = []
face_percent = []
if process_this_frame:
face_location = face.face_locations(rgb_small_frame, model="cnn")
face_endcodings = face.face_encodings(rgb_small_frame, face_location)
for face_encoding in face_encodings:
face_distance = face.face_distance(known_face_encodings, face_encoding)
best = np.argmin(face_distance)
face_percent_value = 1 - face_distance[best]
if face_percent_value >= 0.5:
name = known_face_names[best]
percent = round(percent*100,2)
face_percent.append(percent)
else:
name = "UNKNOWN"
face_percent.append(0)
face_name.append(name)
for(top, right, bottom, left), name, percent in zip(face_location, face_names, face_percent):
top = 2
right = 2
bottom = 2
left = 2
if name == "UNKNOWN":
color = [45,2,209]
else:
color = [255,102,51]
cv2.rectangle(frame, (left, top), (right, bottom), color, 3)
cv2.rectangle(frame, (left-1, top -30), (right-1, top), color, cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left-6, top-6), font, 0.6, (255, 255,255), 1)
cv2.putText(frame, "MATCH: "+ str(percent) + "%", (left-6, bottom+23), font, 0.6, (255, 255,255), 1)
process_this_frame = not process_this_frame
cv2.imshow("Video", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break | Sasina21/FacialRecognition | FacialRecognition.py | FacialRecognition.py | py | 2,339 | python | en | code | 0 | github-code | 13 |
1280636508 | from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler, MinMaxScaler, RobustScaler
class PostProcess:
"""
This class performs post-processing on features to cluster them based on the specified method.
It supports scaling the features using different scaling methods before clustering.
Parameters:
- number_of_people (int): The desired number of clusters or people.
- cluster_method (str): The clustering method to use. Currently, only 'kmeans' is supported.
- scale_method (str): The scaling method to use. Available options are 'standard', 'minmax', and 'robust'.
Methods:
- run(features): Performs clustering on the given features and returns the cluster labels.
"""
def __init__(self, number_of_people, cluster_method, scale_method):
self.n = number_of_people
if cluster_method == 'kmeans':
self.cluster_method = KMeans(n_clusters=self.n, random_state=0)
else:
raise NotImplementedError("Unsupported clustering method. Only 'kmeans' is currently supported.")
if scale_method == 'standard':
self.scaler = StandardScaler()
elif scale_method == 'minmax':
self.scaler = MinMaxScaler(feature_range=(0, 1))
elif scale_method == 'robust':
self.scaler = RobustScaler()
else:
raise NotImplementedError("Unsupported scaling method. Available options are 'standard', 'minmax', and 'robust'.")
def run(self, features):
"""
Performs clustering on the given features.
Parameters:
- features (array-like): The input features to be clustered.
Returns:
- cluster_labels (array): The cluster labels assigned to each feature.
"""
print('Start Clustering')
# Scale or normalize the features
scaled_features = self.scaler.fit_transform(features)
# Fit the clustering algorithm
self.cluster_method.fit(scaled_features)
print('Finish Clustering')
return self.cluster_method.labels_
| JordanWhite34/Multi-Object-Tracking | baseline/Processing.py | Processing.py | py | 2,094 | python | en | code | 0 | github-code | 13 |
22528155731 | class AFN:
def __init__(self,inicial, final, estadosOrigen, estadoDestino, transicion, estadosPosibles):
self.inicial = inicial
self.final = final
self.estadosOrigen = estadosOrigen
self.estadoDestino = estadoDestino
self.transicion = transicion
self.estadosPosibles = estadosPosibles
def establecer_inicial(nuevoInicial,Estados):
if nuevoInicial in Estados:
self.inicial=nuevoInicial
else:
self.inicial=self.inicial
def establecer_finales(nuevoFinal,Estados):
if nuevoFinal in Estados:
self.final=nuevoFinal
else:
self.final=self.final
def agregar_transicion(Origen,Destino,Simbolo):
if Origen in estadosPosibles:
estadosOrigen.append(Origen)
estados_destino.append(Destino)
transicion.append(simbolo)
def guardar_en(nombre):
archivo=open(nombre,"w")
archivo.write("inicio:"+self.inicial+"\nfinal:"+self.final+"\n")
for item in self.origen,self.destino,self.transiciones:
archivo.write(origen[item]+"->"+destino[item]+","+transiciones[item]+"\n")
archivo.close()
def esAFN():
if E in self.transicion:
return true
else:
return false
def esAFD():
if E in self.transicion:
return false
else:
return true
def eliminar_transicion(Origen,Destino,Simbolo):
if(self.estadosOrigen.index(Origen,Destino,Simbolo)):
Eliminar_porIndice = self.estadosOrigen.index(Origen,Destino,Simbolo)
self.estadosOrigen.remove(Eliminar_porIndice)
def cargar_desde(Documento):
ListaConSaltos=[]
listaLimpia=[]
estadosOrigen=[]
estadoDestino=[]
estadosPosibles=[]
transicion=[]
#inicia la lectura del archivo
with open(Documento) as automata:
for linea in automata:
ListaConSaltos.append(linea)
for item in ListaConSaltos:
listaLimpia.append((item.rstrip()))
inicial=obtener_inicial(listaLimpia)
final=obtener_final(listaLimpia)
for linea in listaLimpia[2:]:
linea=linea.replace("->",",")
linea=linea.split(",")
estadosOrigen.append(int(linea[0]))
estadoDestino.append(int(linea[1]))
transicion.append(linea[2])
estadosPosibles=list((set(estadosOrigen)))
return AFN(inicial,final,estadosOrigen,estadoDestino,transicion,estadosPosibles)
def obtener_inicial(Lista):
Inicio=Lista[0]
inicial=int(Inicio[8:])
return inicial
def obtener_final(Lista):
Finales=Lista[1]
final=int(Finales[8:])
final=final
return final
cargar_desde("Automata.af") | JoseZapataJ/Compiladores | Practica1/Automatas.py | Automatas.py | py | 2,361 | python | es | code | 0 | github-code | 13 |
906387567 | import re
import string
import sys
class VerifierReader(object):
def __init__(self, text):
self.text = text
self.position = 0
def HasNext(self):
return self.position < len(self.text)
def Read(self, target):
actual = self.text[self.position : self.position + len(target)]
assert actual == target, "Could not read '{0}'".format(target)
self.position += len(target)
def ReadSpace(self):
self.Read(' ')
def ReadEndl(self):
self.Read('\n')
def ReadEof(self):
assert self.position == len(self.text), 'Found junk at the end of the file'
def ReadChar(self):
assert self.position < len(self.text), 'Unexpected EOF'
char = self.text[self.position]
self.position += 1
return char
def ReadLine(self, valid_characters):
old_position = self.position
while True:
assert self.position < len(self.text), 'Unexpected EOF'
c = self.text[self.position]
self.position += 1
if c == '\n':
break
assert c in valid_characters
assert self.position - old_position > 1, 'Nothing before next newline'
return self.text[old_position:self.position - 1]
def ReadRegex(self, target_regex):
match = re.compile(target_regex).match(self.text, self.position)
assert match is not None, 'Could not match /%s/' % target_regex
self.position = match.end()
return match.group(0)
def ReadList(self, n, read_fn, *args):
ls = []
for i in range(n):
if i > 0:
self.Read(' ')
ls.append(read_fn())
self.ReadEndl()
return ls
def ReadInt(self, inclusive_min, inclusive_max):
value = int(self.ReadRegex(r'0|(-?[1-9][0-9]*)'))
assert inclusive_min <= value <= inclusive_max, (
'Failed on %d <= %d <= %d' % (inclusive_min, value, inclusive_max))
return value
def ReadIntList(self, n, inclusive_min, inclusive_max):
return self.ReadList(n, lambda: self.ReadInt(inclusive_min, inclusive_max))
def ReadString(self, valid_characters=string.ascii_letters + string.digits,
inc_min_len=1, inc_max_len=10**6):
old_position = self.position
while True:
assert self.position < len(self.text), 'Unexpected EOF'
c = self.text[self.position]
if c not in valid_characters:
break
self.position += 1
st = self.text[old_position:self.position]
assert inc_min_len <= len(st) <= inc_max_len, (
'Invalid string length: %d' % len(st))
return st
def ReadStringList(self, n,
valid_characters=string.ascii_letters + string.digits,
inc_min_len=1, inc_max_len=10**6):
return self.ReadList(
n,
lambda: self.ReadString(valid_characters, inc_min_len, inc_max_len))
def IsClockwise(a, b, c):
return (c[0] - b[0]) * (b[1] - a[1]) >= (c[1] - b[1]) * (b[0] - a[0])
def IsDegenerate(polygon):
return sum(
[
polygon[i][0] * polygon[i + 1][1] - polygon[i][1] * polygon[i + 1][0]
for i in range(len(polygon) - 1)
]) == 0
def main(argv):
vr = VerifierReader(sys.stdin.read())
N = vr.ReadInt(1, 500)
assert vr.ReadChar() == ' ', 'Input is not space separated'
M = vr.ReadInt(2, 500)
assert vr.ReadChar() == ' ', 'Input is not space separated'
S = vr.ReadInt(1, 99)
vr.ReadEndl()
military = [tuple(vr.ReadIntList(2, -10**9, 10**9)) for _ in range(N)]
communication = [tuple(vr.ReadIntList(2, -10**9, 10**9)) for _ in range(M)]
combined = military + communication
assert len(combined) == len(set(combined)), 'Found duplicate tower position'
communication = sorted(communication)
chull = [communication[0]]
for i in range(1, len(communication)):
while len(chull) > 1 and not IsClockwise(chull[-2], chull[-1],
communication[i]):
chull.pop()
chull.append(communication[i])
limit = len(chull)
for i in range(len(communication) - 2, -1, -1):
while len(chull) > limit and not IsClockwise(chull[-2], chull[-1],
communication[i]):
chull.pop()
chull.append(communication[i])
is_degenerate = IsDegenerate(chull)
min_x_chull = min([point[0] for point in chull])
max_x_chull = max([point[0] for point in chull])
min_y_chull = min([point[1] for point in chull])
max_y_chull = max([point[1] for point in chull])
for i in range(len(military)):
for j in range(len(chull) - 1):
assert IsClockwise(chull[j], chull[j + 1], military[i]), (
'Military base {} is initially not online'.format(i))
if is_degenerate:
assert (
min_x_chull <= military[i][0] <= max_x_chull and
min_y_chull <= military[i][1] <= max_y_chull), (
'Military base {} is initially not online'.format(i))
vr.ReadEof()
if __name__ == "__main__":
sys.exit(main(sys.argv))
| jonathanirvings/icpc-jakarta-2020 | robust/verifier.py | verifier.py | py | 4,866 | python | en | code | 11 | github-code | 13 |
5979348171 | # importing json and urllib library
import json
from urllib.request import urlopen
def main():
try:
# Storing URL in url
url = "https://raw.githubusercontent.com/prust/wikipedia-movie-data/master/movies.json"
# storing the url response
response = urlopen(url)
# storing JSON response from url in data
data = json.loads(response.read())
# creating empty dictonary
except:
print("Invalid url")
d ={}
#iterating through thr each set in the data
for i in data:
if i["year"] == 2018:
#counting the films in each genre in year 2018
for k in i["genres"]:
d[k] = d.get(k,0)+1
#converting dictonary to list of tuples
lst = list(d.items())
#Bubble sort
for i in range(len(lst)-1):
for j in range(len(lst)-i-1):
if(lst[j][1] < lst[j+1][1]):
t = lst[j]
lst[j] = lst[j+1]
lst[j+1] = t
d = dict(lst)
for key in d:
print(key, d[key])
if __name__ == "__main__":
main() | Mahesh3655/Assignment2 | NumberOfMovies.py | NumberOfMovies.py | py | 1,088 | python | en | code | 0 | github-code | 13 |
2895548416 | # Variables
# As seen so far, we don't put data type for variables, it automatically judges the data type
a=10
b=10.0
c="10"
d='10'
e='''10'''
print(a,b,c,d,e)
# use type() for getting it's type
print(type(a),type(b),type(c),type(d),type(e))
# we can assign values like this also
a,b,c=10,20,30
#this could be used for exchange in 2 numbers
a,b=b,a
print(a,b)
#NOTE-: everything in python is an Object and belong to a class{ more on this later }
#--------------------------------
# variable are of 2 types, global and local
# global variable could be used through out the module{this python file}
# local variable could be used only within some certain bounds
x=10
def val(x):# x in this function will be treated as local
print(x)
def val2():# x in this function will be treated as global x
global x
print(x)
def main():
x=2
val(x)
val2()
if __name__=="__main__":
main()
#---------------------------------
# python has many data types
# 1.Tuples
# 2.List
# 3.Dictonary
# 4.set
#---------------------------------
| AdarshRise/Python-Nil-to-Hill | 1. Nil/7. Variable.py | 7. Variable.py | py | 1,061 | python | en | code | 0 | github-code | 13 |
20346821283 | from .porttypebase import DPWSPortTypeBase, WSDLMessageDescription, WSDLOperationBinding, mk_wsdl_two_way_operation
from .porttypebase import msg_prefix
from sdc11073.dispatch import DispatchKey
from sdc11073.namespaces import PrefixesEnum
class GetService(DPWSPortTypeBase):
port_type_name = PrefixesEnum.SDC.tag('GetService')
WSDLMessageDescriptions = (WSDLMessageDescription('GetMdState',
(f'{msg_prefix}:GetMdState',)),
WSDLMessageDescription('GetMdStateResponse',
(f'{msg_prefix}:GetMdStateResponse',)),
WSDLMessageDescription('GetMdib',
(f'{msg_prefix}:GetMdib',)),
WSDLMessageDescription('GetMdibResponse',
(f'{msg_prefix}:GetMdibResponse',)),
WSDLMessageDescription('GetMdDescription',
(f'{msg_prefix}:GetMdDescription',)),
WSDLMessageDescription('GetMdDescriptionResponse',
(f'{msg_prefix}:GetMdDescriptionResponse',)),
)
WSDLOperationBindings = (WSDLOperationBinding('GetMdState', 'literal', 'literal'),
WSDLOperationBinding('GetMdib', 'literal', 'literal'),
WSDLOperationBinding('GetMdDescription', 'literal', 'literal'),)
def register_hosting_service(self, hosting_service):
super().register_hosting_service(hosting_service)
actions = self._sdc_device.mdib.sdc_definitions.Actions
msg_names = self._sdc_device.mdib.sdc_definitions.data_model.msg_names
hosting_service.register_post_handler(DispatchKey(actions.GetMdState, msg_names.GetMdState),
self._on_get_md_state)
hosting_service.register_post_handler(DispatchKey(actions.GetMdib, msg_names.GetMdib),
self._on_get_mdib)
hosting_service.register_post_handler(DispatchKey(actions.GetMdDescription, msg_names.GetMdDescription),
self._on_get_md_description)
def _on_get_md_state(self, request_data):
data_model = self._sdc_definitions.data_model
msg_node = request_data.message_data.p_msg.msg_node
get_md_state = data_model.msg_types.GetMdState.from_node(msg_node)
requested_handles = get_md_state.HandleRef
if len(requested_handles) > 0:
self._logger.debug('_on_get_md_state from {} req. handles:{}', request_data.peer_name, requested_handles)
else:
self._logger.debug('_on_get_md_state from {}', request_data.peer_name)
# get the requested state containers from mdib
state_containers = []
with self._mdib.mdib_lock:
if len(requested_handles) == 0:
# MessageModel: If the HANDLE reference list is empty, all states in the MDIB SHALL be included in the result list.
state_containers.extend(self._mdib.states.objects)
if self._sdc_device.contextstates_in_getmdib:
state_containers.extend(self._mdib.context_states.objects)
else:
if self._sdc_device.contextstates_in_getmdib:
for handle in requested_handles:
try:
# If a HANDLE reference does match a multi state HANDLE, the corresponding multi state SHALL be included in the result list
state_containers.append(self._mdib.context_states.handle.get_one(handle))
except (KeyError, ValueError):
# If a HANDLE reference does match a descriptor HANDLE, all states that belong to the corresponding descriptor SHALL be included in the result list
state_containers.extend(self._mdib.states.descriptor_handle.get(handle, []))
state_containers.extend(self._mdib.context_states.descriptor_handle.get(handle, []))
else:
for handle in requested_handles:
state_containers.extend(self._mdib.states.descriptor_handle.get(handle, []))
self._logger.debug('_on_get_md_state requested Handles:{} found {} states', requested_handles,
len(state_containers))
factory = self._sdc_device.msg_factory
response = data_model.msg_types.GetMdStateResponse()
response.MdState.State.extend(state_containers)
response.set_mdib_version_group(self._mdib.mdib_version_group)
created_message = factory.mk_reply_soap_message(request_data, response)
self._logger.debug('_on_get_md_state returns {}',
lambda: created_message.serialize())
return created_message
def _on_get_mdib(self, request_data):
self._logger.debug('_on_get_mdib')
if self._sdc_device.contextstates_in_getmdib:
mdib_node, mdib_version_group = self._mdib.reconstruct_mdib_with_context_states()
else:
mdib_node, mdib_version_group = self._mdib.reconstruct_mdib()
response = self._data_model.msg_types.GetMdibResponse()
response.set_mdib_version_group(mdib_version_group)
response.Mdib = mdib_node
response = self._sdc_device.msg_factory.mk_reply_soap_message(request_data, response)
return response
def _on_get_md_description(self, request_data):
"""
MdDescription comprises the requested set of MDS descriptors. Which MDS descriptors are included depends on the msg:GetMdDescription/msg:HandleRef list:
- If the HANDLE reference list is empty, all MDS descriptors SHALL be included in the result list.
- If a HANDLE reference does match an MDS descriptor, it SHALL be included in the result list.
- If a HANDLE reference does not match an MDS descriptor (any other descriptor), the MDS descriptor that is in the parent tree of the HANDLE reference SHOULD be included in the result list.
"""
# currently this implementation only supports a single mds.
# => if at least one handle matches any descriptor, the one mds is returned, otherwise empty payload
data_model = self._sdc_definitions.data_model
self._logger.debug('_on_get_md_description')
msg_node = request_data.message_data.p_msg.msg_node
get_md_state = data_model.msg_types.GetMdDescription.from_node(msg_node)
requested_handles = get_md_state.HandleRef
if len(requested_handles) > 0:
self._logger.info('_on_get_md_description requested Handles:{}', requested_handles)
response = self.mk_get_mddescription_response_message(
request_data, self._sdc_device.mdib, requested_handles)
self._logger.debug('_on_get_md_description returns {}',
lambda: response.serialize())
return response
def mk_get_mddescription_response_message(self, request_data, mdib, requested_handles):
"""For simplification reason this implementation returns either all descriptors or none."""
return_all = len(requested_handles) == 0 # if we have handles, we need to check them
dummy_response = self._sdc_definitions.data_model.msg_types.GetMdDescriptionResponse()
dummy_response.set_mdib_version_group(mdib.mdib_version_group)
response = self._sdc_device.msg_factory.mk_reply_soap_message(request_data, dummy_response)
# now add to payload_element
response_node = response.p_msg.payload_element
for handle in requested_handles:
# if at least one requested handle is valid, return all.
if mdib.descriptions.handle.get_one(handle, allow_none=True) is not None:
return_all = True
break
if return_all:
md_description_node, mdib_version_group = mdib.reconstruct_md_description()
# append all children of md_description_node to msg_names.MdDescription node in response
response_node[0].extend(md_description_node[:])
return response
def add_wsdl_port_type(self, parent_node):
"""
add wsdl:portType node to parent_node.
xml looks like this:
<wsdl:portType name="GetService" dpws:DiscoveryType="dt:ServiceProvider">
<wsp:Policy>
<dpws:Profile wsp:Optional="true"/>
</wsp:Policy>
<wsdl:operation name="GetMdState">
<wsdl:input message="msg:GetMdState"/>
<wsdl:output message="msg:GetMdStateResponse"/>
</wsdl:operation>
...
</wsdl:portType>
:param parent_node:
:return:
"""
port_type = self._mk_port_type_node(parent_node)
mk_wsdl_two_way_operation(port_type, operation_name='GetMdState')
mk_wsdl_two_way_operation(port_type, operation_name='GetMdib')
mk_wsdl_two_way_operation(port_type, operation_name='GetMdDescription')
| Draegerwerk/sdc11073 | src/sdc11073/provider/porttypes/getserviceimpl.py | getserviceimpl.py | py | 9,319 | python | en | code | 27 | github-code | 13 |
39812352529 | import itertools
import pandas as pd
import numpy as np
import pathlib
import sqlalchemy
import sys
def calculate_parameters(data):
data['rrt'] = data['RT'] / data['is_RT']
data['ion_ratio'] = data['peak_area'] / data['confirming_ion_area']
data[['rrt', 'ion_ratio']] = data[['rrt', 'ion_ratio']].apply(pd.to_numeric)
data = data.replace({'rrt': 0}, np.nan)
return data
def read_sql_results(db):
if db == 'sqlite':
engine = sqlalchemy.create_engine('sqlite:////Path/to/file/opiates.db')
elif db == 'postgresql':
engine = sqlalchemy.create_engine('postgresql://username:password@localhost/opiates')
else:
print('No appropriate db selected. Program will exit.')
sys.exit()
conn = engine.connect()
df = pd.read_sql_query("""
SELECT *
FROM "results"
""", conn)
df['Date'] = pd.to_datetime(df['Date'], format='%Y-%m-%d')
df = df[df['sample_type'].notna()]
df = calculate_parameters(df)
# build IS-Compound dict
sub_df = df.drop_duplicates(subset=['compound_id'], keep='first')
intstd_dict = dict(zip(sub_df['compound'], sub_df.int_std))
return df, intstd_dict
def read_data_csv(file):
opiate_df = pd.read_csv(file)
opiate_df['Date'] = pd.to_datetime(opiate_df['Date'], format='%Y-%m-%d')
opiate_df = opiate_df[opiate_df['sample_type'].notna()]
opiate_df = calculate_parameters(opiate_df)
# build IS-Compound dict
sub_df = opiate_df.drop_duplicates(subset=['compound_id'], keep='first')
intstd_dict = dict(zip(sub_df['compound'], sub_df.int_std))
return opiate_df, intstd_dict
def create_qa_dict(frame):
columns = ['compound_code', 'qa_id', 'amr_low', 'amr_high', 'signoise', 'int_std_peak_area',
'spike_low', 'spike_high', 'straight_metab_check', 'is_spike_recovery', 'delta']
frame = frame.drop(columns, axis=1, errors='ignore')
test = frame.groupby('qa_compound').apply(lambda x: x.set_index('qa_compound').to_dict(orient='index'))
qadict = dict()
for i in test:
for k, v in i.items():
qadict[k] = v
return qadict
def read_qa(qa_file, diction):
qa_df = pd.read_csv(qa_file)
cols = ['ion_ratio_avg', 'ion_ratio_cv', 'rel_reten_low', 'rel_reten_high', 'rel_reten',
'amr_low', 'amr_high', 'signoise_stda']
qa_df['rel_reten'] = (qa_df['rel_reten_high'] + qa_df['rel_reten_low']) / 2
qa_df[cols] = qa_df[cols].apply(pd.to_numeric)
qa_df ['delta'] = qa_df['ion_ratio_avg'] * qa_df['ion_ratio_cv']
qa_df['ion_ratio_low'] = qa_df['ion_ratio_avg'] - qa_df['delta']
qa_df['ion_ratio_high'] = qa_df['ion_ratio_avg'] + qa_df['delta']
qa_dict = create_qa_dict(qa_df)
qa_cop = qa_df.copy()
qa_cop['int_std'] = qa_cop['qa_compound'].map(diction)
qa_intstd_dict = dict(zip(qa_cop.int_std, qa_cop.int_std_peak_area))
del qa_cop
return qa_dict, qa_intstd_dict
def create_sample_type_list(sample_type_list):
combos = list()
for L in range(1, len(sample_type_list)):
for subset in itertools.combinations(sample_type_list, L):
combos.append(subset)
for c in combos:
if len(c) == 1:
continue
else:
item = str(c[0]) + "|" + str(c[1])
sample_type_list.append(item)
sample_type_list.append('All')
return sample_type_list
actual_dir = pathlib.Path().absolute()
df, compound_dict = read_data_csv(f'{actual_dir}/dashboard_files/sample_data.csv')
qa_compound_dict, int_std_dict = read_qa(f'{actual_dir}/dashboard_files/qa.csv',
compound_dict)
available_samples = create_sample_type_list(df['sample_type'].unique().tolist())
| pablouw/opiateDashboard | process_data.py | process_data.py | py | 3,715 | python | en | code | 0 | github-code | 13 |
42482317404 | #
# Bentobox
# SDK - Simulation
# Simulation
#
from typing import Iterable, List, Optional, Set
from bento.client import Client
from bento.ecs.grpc import Component, Entity
from bento.graph.compile import ConvertFn, compile_graph
from bento.protos import sim_pb2
from bento.spec.ecs import ComponentDef, EntityDef, SystemDef
from bento.spec.graph import Graph
from bento.spec.sim import SimulationDef
class Simulation:
# TODO(mrzzy): Add a more complete usage example into docs.
"""Represents a `Simulation` in running in the Bentobox Engine.
Example:
Building and running a simulation::
# either: define simulation with entities and components
sim = Simulation(name="sim", entities=[ ... ], components=[ ... ], client=client)
# or: load/hydrate a predefined simulation from a SimulationDef
sim = Simulation.from_def(sim_def)
# use an init graph to initalize attribute values
@sim.init
def init_fn():
# initialize values with: entity[Component].attribute = value
# implement systems running in the simulation
@sim.system
def system_fn():
# ...
# start-end the simulation using with block
with sim:
# run the simulation for one step
sim.step()
# ...
"""
def __init__(
self,
name: str,
components: Iterable[ComponentDef],
entities: Iterable[EntityDef],
client: Client,
system_fns: Iterable[ConvertFn] = [],
init_fn: Optional[ConvertFn] = None,
):
"""Create a new simulation with the given entities and component
Args:
name: Name of the the Simulation. The name should be unique among
registered simulation in the Engine.
entities: List of entities to use in the simulation.
components: List of component types in use in the simulation.
client: Client to use to communicate with the Engine when registering
and interacting with the simulation.
system_fns: List of `bento.graph.compile.compile_graph()` compilable
function implemnting the systems to run in the simulation.
init_fn: The `bento.graph.compile.compile_graph()` compilable
function containing the init code for the simulation
that runs the specified Simulation is registered/applied.
"""
self.name = name
self.client = client
self.started = False
self.component_defs = list(components)
self.entity_defs = list(entities)
# (system_fn, system id). 0 to signify unset system id.
self.system_fns = [(fn, 0) for fn in system_fns]
self.init_fn = init_fn
# register sim on engine
# obtain autogen ids for entities and the engine by recreating from applied proto
applied_proto = self.client.apply_sim(self.build(include_graphs=False))
self.entity_defs = [EntityDef.from_proto(e) for e in applied_proto.entities]
# unpack entity and components from proto
# unpack Entity protos into grpc backed entities (set(components) -> grpc entity)
self.entity_map = {
frozenset(e.components): Entity(
sim_name=self.name,
entity_id=e.id,
components=e.components,
client=self.client,
)
for e in self.entity_defs
}
@classmethod
def from_def(cls, sim_def: SimulationDef, client: Client):
"""
Create/Hydrate a Simulation from a `bento.spec.SimulationDef`.
Args:
sim_def: SimulationDef specification to load the Simulation from.
client: Client to use to communicate with the Engine.
"""
return cls(
name=sim_def.name,
components=sim_def.component_defs,
entities=sim_def.entity_defs,
system_fns=sim_def.system_fns,
init_fn=sim_def.init_fn,
client=client,
)
def build(self, include_graphs: bool = True) -> sim_pb2.SimulationDef:
"""
Build a `bento.eachproto.sim_pb2.SimulationDef` Proto from this Simulation.
Args:
include_graphs: Whether to compile & include graphs in the returned Proto.
This requires that id to be set for each entity as entity ids
are required for graph compilation to work.
Returns:
The `bento.proto.sim_pb2.SimulationDef` Proto equivalent of this Simulation.
"""
# compile graphs if requested to be included
system_defs, init_graph = [], Graph()
if include_graphs:
compile_fn = lambda fn: compile_graph(
fn, self.entity_defs, self.component_defs
)
# compile systems graphs
system_defs = [
SystemDef(
graph=compile_fn(fn),
system_id=system_id,
)
for fn, system_id in self.system_fns
]
# compile init graph
init_graph = (
compile_fn(self.init_fn) if self.init_fn is not None else Graph()
)
return sim_pb2.SimulationDef(
name=self.name,
entities=[e.proto for e in self.entity_defs],
components=[c.proto for c in self.component_defs],
systems=[s.proto for s in system_defs],
init_graph=init_graph.proto,
)
def start(self):
"""Starts this Simulation on the Engine.
If already started, calling `start()` again does nothing.
"""
# do nothing if already started
if self.started:
return
# commit entire simulation to (ie including systems/init graph added) to engine
applied_proto = self.client.apply_sim(self.build(include_graphs=True))
# obtain autogen ids for systems from the engine by recreating the applied proto
current_sys_fns = [system_fn for system_fn, _ in self.system_fns]
self.system_fns = [
# update system ids for systems by position
(fn, system_def.id)
for fn, system_def in zip(current_sys_fns, applied_proto.systems)
]
self.started = True
def stop(self):
"""Stops and removes this Simulation from the Engine.
Raises:
RuntimeError: If stop() is called on a simulation that has not started yet.
"""
if not self.started:
raise RuntimeError("Cannot stop a Simulation that has not started yet.")
# cleanup by remove simulation from engine
self.client.remove_sim(self.name)
self.started = False
def entity(self, components: Iterable[str]) -> Entity:
"""Lookup the gRPC entity with the components with the game attached.
Provides access to ECS entity on the Bentobox Engine via gRPC.
Args:
components: Set of the names of the component attached that
should be attached to the retrieved component.
Raises:
ValueError: if component names given contains duplicates
RuntimeError: If Simulation has not yet stated.
Returns:
The gRPC entity with the given list of components attached.
"""
if not self.started:
raise RuntimeError(
"Cannot obtain a gRPC Entity from a Simulation that has not started yet."
)
comp_set = frozenset([str(c) for c in components])
# check for duplicates in given components
if len(comp_set) != len(list(components)):
raise ValueError("Given component names should not contain duplicates")
return self.entity_map[comp_set]
def init(self, init_fn: ConvertFn):
"""Register given init_fn as the init graph for this simulation.
The init graph allows for the initization of attribute's values,
running on the simulation first step() call, before any systems run.
Compiles the `init_fn` into a computational graph and registers the
result as a init graph for this Simulation.
Example:
@sim.system
def system_fn(g):
# ... implementation of the system ..
Args:
system_fn: Function that contains the implementation of the system.
Must be compilable by `compile_graph()`.
"""
self.init_fn = init_fn
def system(self, system_fn: ConvertFn):
"""Register a ECS system with the given system_fn on this Simulation.
ECS Systems are run every step of simulation and encapsulate the logic of the simulation.
Compiles the `system_fn` into a computational graph and registers the
result as a ECS system to run on this Simulation.
Example:
@sim.system
def system_fn(g):
# ... implementation of the system ..
Args:
system_fn: Function that contains the implementation of the system.
Must be compilable by `compile_graph()`.
"""
# 0 to signify unset system id
self.system_fns.append((system_fn, 0))
def step(self):
"""Run this simulation for one step
Runs this simulation's systems in the order they are registered.
Blocks until all systems of that simulation have finished running.
The Simulation must have already started before running the simulation with step()`
Args:
RuntimeError: If step() is called on a simulation that has not started yet
or has already been stopped.
"""
if not self.started:
raise RuntimeError(
"Cannot step a simulation that has not started or already stopped."
)
self.client.step_sim(self.name)
@property
def entities(self) -> List[Entity]:
"""Get gRPC entities to this Simulation.
Returns:
List of entities belonging to this Simulation
"""
return list(self.entity_map.values())
def __enter__(self):
self.start()
def __exit__(self, exc_type, exc_value, traceback):
self.stop()
# never suppress exceptions inside with statement
return False
def __repr__(self):
return f"{type(self).__name__}<{self.name}>"
def __hash__(self):
return hash(self.name)
| bentobox-dev/bento-box | sdk/bento/sim.py | sim.py | py | 10,643 | python | en | code | 0 | github-code | 13 |
13429854622 | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path("busqueda_en_inventario/", views.busqueda_en_inventario, name='busqueda en inventario'),
path("reporte_de_inventario/", views.reporte_de_inventario, name='reporte de inventario'),
path("busqueda_de_clientes/", views.busqueda_de_clientes, name='busqueda de clientes'),
path("reporte_de_clientes/", views.reporte_de_clientes, name='reporte de clientes'),
path("orden_de_compra/", views.orden_de_compra, name='orden de compra'),
path("reporte_ordenes_de_compra/", views.reporte_ordenes_de_compra, name='reporte ordenes de compra'),
path("productos/", views.productos, name='productos'),
] | Baalzul/proyect | distribucion/urls.py | urls.py | py | 726 | python | es | code | 0 | github-code | 13 |
72855485457 | from __future__ import absolute_import
import os
import sys
from st2common import log as logging
from st2common.service_setup import setup as common_setup
from st2common.service_setup import teardown as common_teardown
from st2common.util.monkey_patch import monkey_patch
from st2actions.notifier import config
from st2actions.notifier import notifier
__all__ = [
'main'
]
monkey_patch()
LOG = logging.getLogger(__name__)
def _setup():
common_setup(service='notifier', config=config, setup_db=True, register_mq_exchanges=True,
register_signal_handlers=True)
def _run_worker():
LOG.info('(PID=%s) Actions notifier started.', os.getpid())
actions_notifier = notifier.get_notifier()
try:
actions_notifier.start(wait=True)
except (KeyboardInterrupt, SystemExit):
LOG.info('(PID=%s) Actions notifier stopped.', os.getpid())
actions_notifier.shutdown()
return 0
def _teardown():
common_teardown()
def main():
try:
_setup()
return _run_worker()
except SystemExit as exit_code:
sys.exit(exit_code)
except:
LOG.exception('(PID=%s) Results tracker quit due to exception.', os.getpid())
return 1
finally:
_teardown()
| kkkanil/mySt2 | st2actions/st2actions/cmd/st2notifier.py | st2notifier.py | py | 1,255 | python | en | code | 0 | github-code | 13 |
41837438414 | import argparse
import re
from tsm.util import read_file_to_lines, write_lines_to_file
parser = argparse.ArgumentParser()
parser.add_argument('input_file')
parser.add_argument('map_file')
parser.add_argument('output_file')
parser.add_argument('--col', type=int, help="starting from which column")
parser.add_argument('--delimiter', default="\s+")
args = parser.parse_args()
def line2word_syls(line):
cols = re.split(args.delimiter, line)
return cols[:args.col], list(filter(lambda col: col, cols[args.col:]))
syl_lines = read_file_to_lines(args.input_file)
syl_lexicon = list(map(line2word_syls, syl_lines))
def line2syl_phn(line):
idx = line.index(" ")
return line[:idx], line[idx+1:]
map_lines = read_file_to_lines(args.map_file)
mapping = dict(map(line2syl_phn, map_lines))
def map_syltone(syl):
tone = int(syl[-1])
phns = mapping[syl[:-1]]
return f"{phns}{tone}"
phn_lexicon = []
for before_syls, syls in syl_lexicon:
try:
tup = (" ".join(before_syls), " ".join([map_syltone(syl) for syl in syls]))
phn_lexicon.append(tup)
except KeyError:
continue
phn_lines = [f"{word} {phns}" for word, phns in phn_lexicon]
write_lines_to_file(args.output_file, phn_lines)
| Chung-I/ChhoeTaigiDatabase | syl2phone.py | syl2phone.py | py | 1,232 | python | en | code | null | github-code | 13 |
17342179866 | #!/usr/bin/python3
import argparse
import binascii
from textwrap import wrap
from intelhex import IntelHex as IH
parser = argparse.ArgumentParser(description='Analyze Zephyr FCB storage and print contents.')
parser.add_argument('file', help='binary dump of the storage partition')
def fcb_crc8(data):
crc8_ccitt_small_table = bytes([0x00, 0x07, 0x0e, 0x09, 0x1c, 0x1b, 0x12, 0x15,
0x38, 0x3f, 0x36, 0x31, 0x24, 0x23, 0x2a, 0x2d])
val = 0xFF
for b in data:
val ^= b
val = ((val << 4) & 0xFF) ^ crc8_ccitt_small_table[val >> 4]
val = ((val << 4) & 0xFF) ^ crc8_ccitt_small_table[val >> 4]
return val
def read_items(storage):
items = []
assert storage[:4] == b'\xee\xee\xff\xc0', 'no magic sequence detected!'
print('FCB version: %u' % storage[4])
assert storage[5] == 0xFF, 'padding not detected'
print('FD ID: %u' % (storage[6] + storage[7] << 8))
off = 8
while off < len(storage) and storage[off] != 0xFF:
if storage[off] & 0x80:
length = (storage[off] & 0x7f) | (storage[off + 1] << 7)
data_end = off + 2 + length
else:
length = storage[off]
data_end = off + 1 + length
data = storage[off + 1: data_end]
crc = storage[data_end]
if crc == fcb_crc8(storage[off:data_end]):
items.append(data)
else:
print('CRC check failed!')
off = data_end + 1
return items
def read_setting(item):
if len(item) == 13 and item[:6] == b'bt/id=':
print('bt/id:', end=' ')
id_type = item[6]
id = item[12:6:-1] # addr is reversed
print(':'.join(['%02X' % x for x in id]), 'type=' + str(id_type))
elif len(item) == 23 and item[:7] == b'bt/irk=':
print('bt/irk:', end=' ')
periph_irk = item[7:23]
print(binascii.hexlify(periph_irk).decode().upper())
elif len(item) == 74 and item[:8] == b'bt/keys/':
print('bt/keys:', end=' ')
print(':'.join(wrap(item[8:20].decode().upper(), 2)), 'type=' + bytes([item[20]]).decode(), end=' ')
assert item[21] == b'='[0]
print('enc_size=%u' % item[22], end=' ')
print('flags=%s' % bin(item[23]), end=' ')
print('keys=%s' % bin(item[24]), end=' ')
assert item[25] == b'\x00'[0] # padding[1]?
rand = item[26:34]
if rand != b'\x00' * 8:
print('RAND=%s' % binascii.hexlify(rand).decode().upper(), end=' ')
ediv = item[34:36]
if ediv != b'\x00' * 2:
print('EDIV=%s' % binascii.hexlify(ediv).decode().upper(), end=' ')
ltk = item[36:52]
print('LTK=%s' % binascii.hexlify(ltk).decode().upper(), end=' ')
central_irk = item[52:68]
print('IRK=%s' % binascii.hexlify(central_irk).decode().upper(), end=' ')
rpa = item[73:67:-1] # rpa[6], reversed address
if rpa != b'\x00' * 6:
print('RPA=', end='')
print(':'.join(['%02X' % x for x in rpa]), end='')
print('')
elif len(item) == 42 and item[:10] == b'space/key=':
print('space/key:', end=' ')
spacekey = item[10:42]
print(binascii.hexlify(spacekey).decode().upper())
elif len(item) == 52 and item[:6] == b'space/':
print('space:', end=' ')
print(':'.join(wrap(item[6:18].decode().upper(), 2)), 'type=' + bytes([item[18]]).decode(), end=' ')
assert item[19] == b'='[0]
print('spacekey=%s' % binascii.hexlify(item[20:52]).decode().upper())
else:
print(item)
if __name__ == '__main__':
args = parser.parse_args()
if args.file[-4:] == '.bin':
with open(args.file, "rb") as file:
storage = file.read()
elif args.file[-4:] == '.hex':
ih = IH(args.file)
storage = ih[0x32000:0x38000].tobinstr()
else:
print("unrecognized file extension", file=sys.stderr)
sys.exit(-1)
items = read_items(storage)
for i in items:
read_setting(i)
| maz3max/ble-coin | prod/analyze_fcb.py | analyze_fcb.py | py | 4,038 | python | en | code | 7 | github-code | 13 |
7165630295 | import time
class HtmlExporter:
"""Class to export NETSCAPE-Bookmark-file-1 format HTML bookmarks file.
NOTE: Data is immediately written through `output_file` handle in order to avoid memory overflows trying to
concatenate the text.
"""
def export_html(self, bookmarks_bar, bookmarks_menu, other_bookmarks):
"""Export NETSCAPE-Bookmark-file-1 format HTML bookmarks file."""
filepath = 'bookmarks_export.html'
output_file = open(filepath, mode='w', encoding='utf-8')
timestamp = str(int(time.time()))
# Header
output_file.write("""<!DOCTYPE NETSCAPE-Bookmark-file-1>
<META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=UTF-8">
<TITLE>Bookmarks</TITLE>
<H1>Bookmarks Menu</H1>
<DL><p>""".format(timestamp, timestamp))
self._create_html_node(output_file, bookmarks_menu, timestamp, 0)
output_file.write("""
<DT><H3 ADD_DATE="{}" LAST_MODIFIED="{}" UNFILED_BOOKMARKS_FOLDER="true">Other Bookmarks</H3>
""".format(timestamp, other_bookmarks['add_date']))
self._create_html_node(output_file, other_bookmarks['children'], timestamp, 1)
output_file.write("""
<DT><H3 ADD_DATE="{}" LAST_MODIFIED="{}" PERSONAL_TOOLBAR_FOLDER="true">Bookmarks Toolbar</H3>
<DD>Add bookmarks to this folder to see them displayed on the Bookmarks Toolbar
""".format(timestamp, other_bookmarks['add_date']))
self._create_html_node(output_file, bookmarks_bar, timestamp, 1)
output_file.write("\n</DL>\n")
output_file.close()
def _create_html_node(self, output_file, bookmarks, timestamp, level):
"""Recursive method to generate a HTML nodes for a given set of bookmarks."""
base_space = ' ' * level
if level > 0:
output_file.write("\n{}<DL><p>".format(base_space))
for item in bookmarks:
if item['type'] == 'bookmark':
output_file.write("""\n{}<DT><A HREF="{}" ADD_DATE="{}" LAST_MODIFIED="{}">{}</A>""".format(
base_space + (4 * ' '), item['url'], item['add_date'], timestamp, item['title']))
else:
output_file.write("""\n{}<DT><H3 ADD_DATE="{}" LAST_MODIFIED="{}">{}</H3>""".format(
base_space + (4 * ' '), item['add_date'], timestamp, item['title'], base_space + (8 * ' ')))
self._create_html_node(output_file, item['children'], timestamp, level + 1)
if level > 0:
output_file.write("\n{}</DL><p>".format(base_space))
| digital-engineering/bookmarks-consolidator | bookmarks_consolidator/html_exporter.py | html_exporter.py | py | 2,565 | python | en | code | 7 | github-code | 13 |
15486488741 | # To manage HttpResponses:
from django.shortcuts import render, redirect
from django.http import HttpResponse, HttpRequest
from django.template import Context
# To manage templates:
from django.template.loader import get_template
# For security:
from django.views.decorators.csrf import csrf_exempt
from django.template.context_processors import csrf
# To authenticate users:
from django.contrib.auth import logout, authenticate, login
# Importamos los modelos:
from .models import *
# Importamos las características:
from django.conf import settings
# Importamos el archivo y función que reside en el mismo para cargar medidas:
from .genera_medidas import get_CF_info, get_medidas_true;
# Para trabajar con JSON:
import json
import os
# Para el mail:
from .test_mail import send_mail;
# Para generar alertas:
from .generate_alerts import check_medida,alert_type,check_values
# Create your views here.
# ------------------------------------------------------------------------------
# Introduzco en un diccionario la lista de sitios a los que tiene acceso el usuario
def users_pages(user):
if user == 'urjc' or user == 'ciemat':
pages = ['ED_70','Clinica_Fuenlabrada','Lece']
elif user == 'com_mad':
pages = ['Clinica_Fuenlabrada']
return pages
# ------------------------------------------------------------------------------
# Hacemos el login mostrando la pagina principal si es GET y enviando informacion
# y redirigiendo si es un POST
@csrf_exempt
def user_login(request):
# send_mail()
# Proceso la solicitud
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is None: # Es usuario no válido
return redirect('/register')
else: # Es usuario válido
login(request,user)
new_url = '/'+str(user)+'/index'
return redirect(new_url)
else:
contexto = {'Login':"Hola"}
return render(request,'login.html',contexto)
# ------------------------------------------------------------------------------
# Hacemos logout y se nos redirige a la pagina principal -> login
def user_logout(request,peticion):
logout(request)
return redirect('/')
# ------------------------------------------------------------------------------
def register(request):
contexto = {'Registro':"Hola"}
return render(request,'register.html',contexto)
# return HttpResponse("Hello people")
# ------------------------------------------------------------------------------
@csrf_exempt
def index(request, peticion):
# Consigo el usuario que ha accedido al portal
user = request.user
# Llamo a la función que me dirá a que edificios tiene acceso ese usuario
pages_info_user = users_pages(str(user))
# Esto genera los JSON
# get_medidas_true()
# Lo inroduzco como contexto para representarlo despues
contexto = {'Builds':pages_info_user, 'User': user}
return render(request,'index.html',contexto)
# ------------------------------------------------------------------------------
def insert_idMota(medidas_test):
cant_motas = [] # Inicializo la lista donde guardare mota1, mota2...
for medidas in medidas_test: # medidas es cada uno de los diccionarios
id_mota = medidas.get("id_mota") # esto me permite saber que id de mota tengo
if not str(id_mota) in cant_motas: # asi me aseguro no guardar dos veces la misma mota.
cant_motas.append(str(id_mota))
# Ordeno las motas para que se muestren en orden
cant_motas = sorted(cant_motas,key=int)
return cant_motas
# ------------------------------------------------------------------------------
# Aquí cargo la información del JSON
def get_data():
dir = '/home/ppp23/Escritorio/TFG/data'
with open(os.path.join(dir, 'medidas.json')) as file: #-- Esto por si quiero meterlo en un directorio distinto al que me encuentro
data = json.load(file)
return data
# ------------------------------------------------------------------------------
# Proceso solicitud del POST recibido de slices pidiendo mota y medidas concretas
def procesolicitud(mota,medida,medidas_test):
esTMP = False
esHUM = False
esCO = False
# Segun la medida que me pidan solicitare luego en el JavaScript una medida u otra:
if medida == 'Temperatura':
esTMP = True
elif medida == 'Humedad':
esHUM = True
elif medida == 'CO2':
esCO = True
info_mota = [] # aqui almacenare solo las que el id de mota coincida con el pedido
for medidas in medidas_test: # medidas es cada uno de los diccionarios
if str(medidas.get("id_mota")) == str(mota): # comparo id como str sino no reconoce al ser str == int
info_mota.append(medidas)
return info_mota, esTMP, esHUM, esCO
# ------------------------------------------------------------------------------
@csrf_exempt
def slices(request, peticion):
# Consigo el usuario que ha accedido al portal
user = request.user
# Esto es para conseguir saber que slice me estan pidiendo
url = request.path
url_slices = url.split('/')[2] # Obtengo slices_ED_70, slices_Clinica_Fuenlabrada y slices_Lece
# Inicializo para evitar error
esTMP = False
esHUM = False
esCO = False
# Meto el nombre del edificio para personalizarlo más
if (url_slices == "slices_ED_70"):
nombre_ed = "Edificio 70, Ciemat"
elif (url_slices == "slices_Clinica_Fuenlabrada"):
nombre_ed = "Clínica de Fuenlabrada"
elif (url_slices == "slices_Lece"):
nombre_ed = "Plataforma Solar de Andalucía, Almería"
# Consigo los datos:
medidas_test = get_data()
# Esto me permite mostrar el menú desplegable
cant_motas = insert_idMota(medidas_test)
print("Número de motas es: "+str(cant_motas))
alerta = False
if request.method == 'POST':
mota_concreta = request.POST['mota'] # Averiguo sobre que mota en concreto solicitan info
print("Solicitan mota: "+str(mota_concreta))
medida_concreta = request.POST['medidatipo'] # Averiguo sobre que medida en concreto solicitan info
print("Solicitan medida: "+str(medida_concreta))
medidas_send, esTMP, esHUM, esCO = procesolicitud(mota_concreta,medida_concreta,medidas_test)
else:
medidas_send = medidas_test # si es un GET mando todas las medidas tal cual las recibo de get_medidas()
# Llamo a la funcion que me indica si hay alerta de valores anomalos o no:
alerta,motas_alerta,cuerpo_mail = check_values(medidas_test)
admin_mail = cuerpo_mail+"Por favor, como administrador del sistema, revise estos valores."
# send_mail(admin_mail)
print(admin_mail)
# Llamo a la función que me dirá a que edificios tiene acceso ese usuario
pages_info_user = users_pages(str(user))
# Tipos de medidas a elegir:
meds = ['Temperatura','Humedad','CO2']
# Lo inroduzco como contexto para representarlo despues
contexto = {'Builds':pages_info_user, 'User': user,'Medidas': medidas_send, 'alerta':alerta, 'Edificio': nombre_ed, 'Id_motas': cant_motas, 'Tipos_med': meds, 'esTMP': esTMP, 'esHUM': esHUM, 'esCO': esCO}
return render(request,'slices.html',contexto)
# ------------------------------------------------------------------------------
@csrf_exempt
def tables(request, peticion):
print(request)
# Consigo el usuario que ha accedido al portal
user = request.user
# usuario = str(user)
# print("User is: "+usuario)
# -------- Esto es para conseguir saber que tabla me estan pidiendo ----
url = request.path
url_tables = url.split('/')[2] # Obtengo tables_ED_70, tables_Clinica_Fuenlabrada y tables_Lece
print("Solicitan la tabla: "+url_tables)
# medidas_test = get_medidas(url_tables) # medidas_test es la lista de diccionarios
# Consigo los datos:
medidas_test = get_data()
# Des de aquiiiiiiiiiii
# Hasta aquiiiiiii se podria exportar a una funcion introduce id_mota --> ya hecho solo llamo a la funcion
cant_motas = insert_idMota(medidas_test)
if request.method == 'POST':
print(request.POST['mota'])
mota_concreta = request.POST['mota'] # Averiguo sobre que mota en concreto solicitan info
# medidas_test = get_medidas(url_tables) # medidas_test es la lista de diccionarios
info_mota = [] # aqui almacenare solo las que el id de mota coincida con el pedido
for medidas in medidas_test: # medidas es cada uno de los diccionarios
print("La verdadera: "+str(medidas.get("id_mota")))
if str(medidas.get("id_mota")) == str(mota_concreta): # comparo id como str sino no reconoce al ser str == int
info_mota.append(medidas)
medidas_send = info_mota # si es un POST mando las medidas de una mota concreta
else:
medidas_send = medidas_test # si es un GET mando todas las medidas tal cual las recibo de get_medidas()
# Llamo a la función que me dirá a que edificios tiene acceso ese usuario
pages_info_user = users_pages(str(user))
# Lo inroduzco como contexto para representarlo despues
contexto = {'Builds':pages_info_user, 'User': user,'Medidas': medidas_send, 'Id_motas': cant_motas}
return render(request,'tables.html',contexto)
# ------------------------------------------------------------------------------
def leaflet(request, peticion):
# print(request) # Esto devuelve <WSGIRequest: GET '/leaflet_ED70.html'>
# print(request.path) # Esto devuelve /urjc/leaflet_ED70.html
# -------- Esto es para conseguir saber que leaflet me estan pidiendo ----
url = request.path
url_leaflet = url.split('/')[2] # Obtengo leaflet_ED70, leaflet_CF o leaflet_Alm
print(url_leaflet)
contexto = {'URL':url_leaflet}
leaf_plantilla = url_leaflet+'.html'
# ---------------------------------------------------------------------------------------
# Consigo los datos:
medidas_test = get_data()
# Consigo el usuario que ha accedido al portal
user = request.user
print("User is: "+str(user))
# Llamo a la función que me dirá a que edificios tiene acceso ese usuario
pages_info_user = users_pages(str(user))
# Lo inroduzco como contexto para representarlo despues
contexto = {'Builds':pages_info_user,'Medidas':medidas_test}
return render(request,leaf_plantilla,contexto)
# ------------------------------------------------------------------------------
def maps(request,peticion):
# Consigo el usuario que ha accedido al portal
user = request.user
print("User is: "+str(user))
# Llamo a la función que me dirá a que edificios tiene acceso ese usuario
pages_info_user = users_pages(str(user))
# Lo inroduzco como contexto para representarlo despues
contexto = {'Builds':pages_info_user}
return render(request,'moving_planos.html',contexto)
# ------------------------------------------------------------------------------
def administration(request,peticion):
# print(request) # <WSGIRequest: GET '/urjc/logout'>
# print(peticion) # urjc
url = request.path
url_user = url.split('/')[1] # Obtengo urjc, ciemat o com_mad
print("El usuario usurpador es: "+url_user)
if url_user == 'urjc':
redireccion = '/admin'
else:
redireccion = '/'+url_user+'/index'
return redirect(redireccion)
# ------------------------------------------------------------------------------
def relations(request,peticion):
# Consigo el usuario que ha accedido al portal
user = request.user
print("User is: "+str(user))
# Llamo a la funcion de test de acceso a sql
dict = get_CF_info()
# Llamo a la función que me dirá a que edificios tiene acceso ese usuario
pages_info_user = users_pages(str(user))
# Lo inroduzco como contexto para representarlo despues
contexto = {'Builds':pages_info_user, 'User': user, 'Dict': dict}
return render(request,'relations.html',contexto)
| pablopavon23/TFG | motas/views.py | views.py | py | 12,192 | python | es | code | 0 | github-code | 13 |
37022962406 | from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.utils import np_utils
batch_size = 128
nb_epoch = 10
nb_filters = 32
nb_pool = 4
nb_conv = 3
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(X_train.shape[0], 1, 28, 28)
X_test = X_test.reshape(X_test.shape[0], 1, 28, 28)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
Y_train = np_utils.to_categorical(y_train, 10)
Y_test = np_utils.to_categorical(y_test, 10)
model = Sequential()
model.add(Convolution2D(nb_filters, nb_conv, nb_conv,
border_mode='valid',
input_shape=(1, 28, 28)))
model.add(Activation('relu'))
model.add(Convolution2D(nb_filters, nb_conv, nb_conv))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=1, validation_data=(X_test, Y_test))
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test accuracy:', score[1])
| vks4git/Machine-learning | task11/main_t11.py | main_t11.py | py | 1,516 | python | en | code | 0 | github-code | 13 |
38399548474 | import numpy as np
import tensorflow as tf
def SPP_layer(input, levels=3, name='SPP_layer', pool_type='max'):
shape = input.shape
with tf.variable_scope(name):
for l in range(levels):
l = 2 ** l
ksize = [1, np.ceil(shape[1] / l + 1).astype(np.int32), np.ceil(shape[2] / l + 1).astype(np.int32), 1]
strides = [1, np.floor(shape[1] / l + 1).astype(np.int32), np.floor(shape[2] / l + 1).astype(np.int32), 1]
if pool_type == 'max':
pool = tf.compat.v1.nn.max_pool(input, ksize=ksize, strides=strides, padding='SAME')
pool = tf.reshape(pool, (shape[0], -1))
else:
pool = tf.compat.v1.nn.avg_pool(input, ksize=ksize, strides=strides, padding='SAME')
pool = tf.reshape(pool, (shape[0], -1))
if l == 1:
x_flatten = tf.reshape(pool, (shape[0], -1))
else:
x_flatten = tf.concat((x_flatten, pool), axis=1)
print("SPP layer shape:\t", x_flatten.get_shape().as_list())
return x_flatten
# x = tf.ones((4, 128, 128, 3))
# x_sppl = SPP_layer(x, 3)
| saberholo/Anime_cnn | SPP.py | SPP.py | py | 1,153 | python | en | code | 0 | github-code | 13 |
3334881906 | import numpy as np
import os
import random
import datetime
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
import torch
from torch import nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from torch.utils.data.sampler import BatchSampler, SubsetRandomSampler
from tensorboardX import SummaryWriter
from config import args
from coinrun import setup_utils, make
from autoencoders.autoencoder import AutoEncoder
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
transition = np.dtype([('s', np.float64, (3, 64, 64))])
if args.tensorboard:
print('Init tensorboard')
writer = SummaryWriter(log_dir='runs/{}'.format(datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")))
class Model():
def __init__(self):
self.AE = AutoEncoder(args,latent_dim=args.latent_dim).double().to(device)
self.AE.train()
self.counter = 0
self.buffer = np.empty(args.buffer_capacity, dtype=transition)
setup_utils.setup_and_load(use_cmd_line_args=False)
self.env = make('standard',num_envs=args.num_envs)
self.optimizer = optim.Adam(self.AE.parameters(),lr = args.lr)
self.criterion = nn.MSELoss()
self.step=0
def store(self,x):
self.buffer['s'][self.counter] = x
self.counter += 1
if self.counter == args.buffer_capacity:
self.counter = 0
return True
else:
return False
def save_param(self):
if not os.path.exists('./Weights'):
os.makedirs('./Weights')
torch.save(self.AE.state_dict(),'./Weights/' + args.weight_path)
def load_param(self):
self.AE.load_state_dict(torch.load('./Weights/'+args.weight_path))
def make_img(self,img):
img = img.numpy()
img = np.transpose(img,(1,2,0))
return img
def update(self):
s = torch.tensor(self.buffer['s'],dtype=torch.double).to(device)
for _ in range(args.train_epochs):
print('New EPoch \n')
for index in BatchSampler(SubsetRandomSampler(range(args.buffer_capacity)),args.batch_size, False):
s_in = s[index]
z,s_hat = self.AE(s_in)
loss = self.criterion(s_hat,s_in)
print("Loss:\t",loss.item())
if args.tensorboard:
writer.add_scalar('Loss',loss.item(),self.step)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.step+=1
def run(self):
for step in range(args.max_steps):
act = np.array([self.env.action_space.sample() for _ in range(args.num_envs)])
# act = self.env.action_space.sample()
# print(np.int32(act),type(np.int32(act)))
obs,_,done,_ = self.env.step(act)
obs = np.transpose(np.squeeze(obs),(2,0,1))
if self.store((obs/255)):
print('Updating')
self.update()
if step % 1000 == 0 :
print('Saving Model')
self.save_param()
self.env.close()
def trainAE():
'''
Train single autoencoder
'''
model = Model()
model.run()
def test():
model = Model()
model.load_param()
# model.AE.test()
for step in range(5):
act = np.array([model.env.action_space.sample() for _ in range(args.num_envs)])
obs,_,done,_ = model.env.step(act)
obs = np.squeeze(obs)
obs = np.transpose(obs,(2,0,1))
obs = torch.DoubleTensor(obs/255)
z,obs_hat = model.AE(torch.unsqueeze(obs,0))
obs = model.make_img(torch.squeeze(obs).detach())
obs_hat = model.make_img(torch.squeeze(obs_hat).detach())
print(np.max(obs),np.max(obs_hat))
fig = plt.figure()
ax1 = fig.add_subplot(2,1,1)
ax1.imshow(obs)
ax2 = fig.add_subplot(2,1,2)
ax2.imshow(obs_hat)
plt.show()
model.env.close()
if __name__ == '__main__':
# trainAE()
test() | nsidn98/Transfer-Learning-for-RL | src/coinrun/main.py | main.py | py | 4,176 | python | en | code | 1 | github-code | 13 |
19734014881 | from flask import Flask
import connexion
from swagger_server import encoder
from flask_cors import CORS
# print a nice greeting.
def say_hello(username = "World"):
return '<p>Hello %s!</p>\n' % username
# some bits of text for the page.
header_text = '''
<html>\n<head> <title>EB Flask Test</title> </head>\n<body>'''
instructions = '''
<p><em>Hint</em>: This is a RESTful web service! Append a username
to the URL (for example: <code>/Thelonious</code>) to say hello to
someone specific.</p>\n'''
home_link = '<p><a href="/">Back</a></p>\n'
footer_text = '</body>\n</html>'
application = Flask(__name__)
if __name__ == '__main__':
# EB looks for an 'application' callable by default.
application = connexion.FlaskApp(__name__, specification_dir='./swagger_server/swagger/', swagger_ui=True)
application.debug = False
# add a rule for the index page.
application.add_url_rule('/', 'index', (lambda: header_text +
say_hello() + instructions + footer_text))
# add a rule when the page is accessed with a name appended to the site
# URL.
application.add_url_rule('/<username>', 'hello', (lambda username: header_text +
say_hello(username) + home_link + footer_text))
application.app.json_encoder = encoder.JSONEncoder
application.add_api('swagger.yaml', arguments={'title': 'FundsCorner Customer Acquisition System API'})
CORS(application.app)
assert isinstance(application.run, object)
application.run(host='0.0.0.0')
| HebbaleLabs/Python-Assessment-Template | application.py | application.py | py | 1,666 | python | en | code | 0 | github-code | 13 |
33018633786 | import os
import sol4
import time
def main():
experiments = ['living_room.mp4']
for experiment in experiments:
trans = True
exp_no_ext = experiment.split('.')[0]
os.system('mkdir dump')
path = 'dump/' + exp_no_ext
os.mkdir(path)
os.system('ffmpeg -i videos/%s dump/%s/%s%%03d.jpg' % (experiment, exp_no_ext, exp_no_ext))
s = time.time()
panorama_generator = sol4.PanoramicVideoGenerator('dump/%s/' % exp_no_ext, exp_no_ext, 2100)
panorama_generator.align_images(translation_only = trans)
panorama_generator.generate_panoramic_images(15)
print(' time for %s: %.1f' % (exp_no_ext, time.time() - s))
panorama_generator.save_panoramas_to_video()
if __name__ == '__main__':
main()
| damebrown/IMPR_ex4 | my_panorama.py | my_panorama.py | py | 818 | python | en | code | 0 | github-code | 13 |
7455792329 | import os
import subprocess
import numpy as np
import itertools
import time
import pickle
def make_idun_train_ann_test_job(
dim,
index,
I,
d,
K,
h,
tau,
it_max,
tol,
name):
filetext = f"""#!/bin/sh
#SBATCH --partition=CPUQ
#SBATCH --time=00:30:00
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH --cpus-per-task=1
#SBATCH --mem-per-cpu=1000MB
#SBATCH --job-name="{name}_Nummat_Neural_training"
#SBATCH --output=out.txt
module purge
module load GCCcore/.9.3.0
module load Python/3.8.2
python3 ../../../anntrainingtest.py {dim} {index} {I} {d} {K} {h} {tau} {it_max} {tol} {name}
"""
if not os.path.exists(f"{name}/"):
subprocess.call(f"mkdir {name}", shell=True)
else:
subprocess.call(f"rm {name}/*", shell=True)
with open(f"{name}/run.sh", "w") as f:
f.write(filetext)
def do_tests(dim, i, I, d, K, h, tau, it_max, tol, folder):
os.chdir("results")
if os.path.exists(folder):
subprocess.call(f"rm {folder}/* -r", shell=True)
else:
subprocess.call(f"mkdir {folder}", shell=True)
os.chdir(folder)
iterator = itertools.product(i, I, d, K, h, tau, it_max, tol)
lookup_table = {}
it_ind = 0
for x in itertools.product(i, I, d, K, h, tau, it_max, tol):
it_ind += 1
lookup_table[x] = it_ind
with open("table.pickle", "wb") as pickle_file:
pickle.dump(lookup_table, pickle_file)
print(f"Preping for {len(list(iterator))} amount of jobs")
time.sleep(1)
it_ind = 0
for (
i,
I,
d,
K,
h,
tau,
it_max,
tol) in itertools.product(
i,
I,
d,
K,
h,
tau,
it_max,
tol):
it_ind += 1
name = f"Job_{it_ind}"
make_idun_train_ann_test_job(
dim, i, I, d, K, h, tau, it_max, tol, name)
os.chdir(name)
subprocess.call("sbatch run.sh", shell=True)
os.chdir("..")
| TheBjorn98/nummat_p2 | make_idun_files.py | make_idun_files.py | py | 2,119 | python | en | code | 0 | github-code | 13 |
37719747621 | import numpy as np
import pandas as pd
import os
import random
import multiprocessing as mp
TRAIN_DATA = r'E:\NearXu\train_data\train_'
AUTOENCODER_TRAIN_PATH_CSV = r'E:\NearXu\autoencoder2\train_'
AUTOENCODER_TRAIN_CSV = r'E:\NearXu\autoencoder2\train.csv'
AUTOENCODER_TEST_CSV = r'E:\NearXu\autoencoder2\test.csv'
# global train_csv_file
# train_csv_file = open(AUTOENCODER_TRAIN_CSV, 'a+', encoding='utf-8')
# global test_csv_file
# test_csv_file = open(AUTOENCODER_TEST_CSV, 'a+', encoding='utf-8')
def merge_file():
Folder_Path = r'E:\NearXu\autoencoder\train_' # 要拼接的文件夹及其完整路径,注意不要包含中文
SaveFile_Path = r'E:\NearXu\autoencoder\train_all' # 拼接后要保存的文件路径
SaveFile_Name = r'all.csv' # 合并后要保存的文件名
# 修改当前工作目录
os.chdir(Folder_Path)
# 将该文件夹下的所有文件名存入一个列表
file_list = os.listdir()
# 读取第一个CSV文件并包含表头
df = pd.read_csv(Folder_Path + '\\' + file_list[0]) # 编码默认UTF-8,若乱码自行更改
# 将读取的第一个CSV文件写入合并后的文件保存
df.to_csv(SaveFile_Path + '\\' + SaveFile_Name, encoding="utf_8_sig", index=False)
# 循环遍历列表中各个CSV文件名,并追加到合并后的文件
for i in range(1, len(file_list)):
df = pd.read_csv(Folder_Path + '\\' + file_list[i])
df.to_csv(SaveFile_Path + '\\' + SaveFile_Name, encoding="utf_8_sig", index=False, header=False, mode='a+')
def process(trace_id):
#print(trace_id)
with open(AUTOENCODER_TRAIN_PATH_CSV + str(int(trace_id)) + '.csv', 'a+', encoding='utf-8') as csv_file:
with open(TRAIN_DATA + str(int(trace_id)) + '.txt', 'r+', encoding='utf-8') as file:
for line in file:
points = line.split(' ')
# print(len(points))
# random_number = np.random.randint(len(points))
# print(random_number)
# print(points[random_number])
for point in points:
xy = point.split(',')
# print(xy)
# print(len(xy))
if (len(xy) == 2):
x = xy[0]
y = xy[1]
try:
float(x)
float(y)
csv_str = str(x) + ',' + str(y) + '\n'
if csv_str.count(',') > 1:
print('count > 1')
else:
csv_file.writelines(csv_str)
except ValueError:
print("ValueError")
# xy = points[random_number].split(',')
# # print(xy)
# # print(len(xy))
# if (len(xy) == 2):
# x = xy[0]
# y = xy[1]
# try:
# float(x)
# float(y)
# csv_str = str(x) + ',' + str(y) + '\n'
# if csv_str.count(',') > 1:
# print('count > 1')
# else:
# csv_file.writelines(csv_str)
# except ValueError:
# print("ValueError")
def process_random(trace_id):
with open(AUTOENCODER_TRAIN_PATH_CSV + str(int(trace_id)) + '.csv', 'r+', encoding='utf-8') as csv_file:
print(trace_id)
lines = csv_file.readlines()
lines_number = len(lines)
print(lines_number)
random_list = range(0, lines_number)
random_train_id = random.sample(random_list, int(0.5 * lines_number))
random_test_id = random.sample(random_list, int(0.1 * lines_number))
for id in random_train_id:
train_csv_file.writelines(lines[id])
for id in random_test_id:
test_csv_file.writelines(lines[id])
def main():
# pool = mp.Pool(processes=1)
# jobs = []
# for i in range(29):
# trace_id = []
# trace_id.append(i)
# jobs.append(pool.apply_async(process_random, trace_id))
# for job in jobs:
# job.get()
# pool.close()
merge_file()
if __name__ == '__main__':
main()
| neardws/fog-computing-based-collision-warning-system | train_autoencoder/get_autoencoder_train_data.py | get_autoencoder_train_data.py | py | 4,510 | python | en | code | 10 | github-code | 13 |
9844722345 | # -*- coding: utf-8 -*-
# pylint: disable=too-many-lines
import logging
import random
import time
import gevent
from gevent.event import AsyncResult
from gevent.queue import Empty, Queue
from gevent.timeout import Timeout
from random import randint
from ethereum import slogging
from ethereum.utils import sha3
from raiden.messages import (
MediatedTransfer,
RefundTransfer,
RevealSecret,
Secret,
SecretRequest,
TransferTimeout,
)
from raiden.utils import lpex, pex
__all__ = (
'LogListenerTask',
'StartMediatedTransferTask',
'MediateTransferTask',
'EndMediatedTransferTask',
)
log = slogging.get_logger(__name__) # pylint: disable=invalid-name
REMOVE_CALLBACK = object()
DEFAULT_EVENTS_POLL_TIMEOUT = 0.5
DEFAULT_HEALTHCHECK_POLL_TIMEOUT = 1
ESTIMATED_BLOCK_TIME = 7
TIMEOUT = object()
class Task(gevent.Greenlet):
""" Base class used to created tasks.
Note:
Always call super().__init__().
"""
def __init__(self):
super(Task, self).__init__()
self.response_queue = Queue()
def on_completion(self, success):
self.transfermanager.on_task_completed(self, success)
return success
def on_response(self, response):
""" Add a new response message to the task queue. """
if log.isEnabledFor(logging.DEBUG):
log.debug(
'RESPONSE MESSAGE RECEIVED %s %s',
repr(self),
response,
)
self.response_queue.put(response)
class LogListenerTask(Task):
""" Task for polling for filter changes. """
def __init__(self, listener_name, filter_, callback, contract_translator,
events_poll_timeout=DEFAULT_EVENTS_POLL_TIMEOUT):
"""
Args:
listener_name (str): A name to distinguish listener tasks.
filter_ (raiden.network.rpc.client.Filter): A proxy for calling the
blockchain's filter api.
callback (function): A function to be called once an event happens.
contract_translator (ethereum.abi.ContractTranslator): A contract
translator to decode the event data.
events_poll_timeout (float): How long the tasks should sleep before
polling again.
"""
# pylint: disable=too-many-arguments
super(LogListenerTask, self).__init__()
self.listener_name = listener_name
self.filter_ = filter_
self.callback = callback
self.contract_translator = contract_translator
self.stop_event = AsyncResult()
self.sleep_time = events_poll_timeout
# exposes the AsyncResult timer, this allows us to raise the timeout
# inside this Task to force an update:
#
# task.kill(task.timeout)
#
self.timeout = None
def __repr__(self):
return '<LogListenerTask {}>'.format(self.listener_name)
def _run(self): # pylint: disable=method-hidden
stop = None
while stop is None:
filter_changes = self.filter_.changes()
for log_event in filter_changes:
log.debug('New Events', task=self.listener_name)
event = self.contract_translator.decode_event(
log_event['topics'],
log_event['data'],
)
if event is not None:
originating_contract = log_event['address']
try:
self.callback(originating_contract, event)
except: # pylint: disable=bare-except
log.exception('unexpected exception on log listener')
self.timeout = Timeout(self.sleep_time) # wait() will call cancel()
stop = self.stop_event.wait(self.timeout)
def stop_and_wait(self):
self.stop_event.set(True)
gevent.wait(self)
def stop_async(self):
self.stop_event.set(True)
class HealthcheckTask(Task):
""" Task for checking if all of our open channels are healthy """
def __init__(
self,
raiden,
send_ping_time,
max_unresponsive_time,
sleep_time=DEFAULT_HEALTHCHECK_POLL_TIMEOUT):
""" Initialize a HealthcheckTask that will monitor open channels for
responsiveness.
:param raiden RaidenService: The Raiden service which will give us
access to the protocol object and to
the asset manager
:param int sleep_time: Time in seconds between each healthcheck task
:param int send_ping_time: Time in seconds after not having received
a message from an address at which to send
a Ping.
:param int max_unresponsive_time: Time in seconds after not having received
a message from an address at which it
should be deleted.
"""
super(HealthcheckTask, self).__init__()
self.protocol = raiden.protocol
self.raiden = raiden
self.stop_event = AsyncResult()
self.sleep_time = sleep_time
self.send_ping_time = send_ping_time
self.max_unresponsive_time = max_unresponsive_time
def _run(self): # pylint: disable=method-hidden
stop = None
while stop is None:
keys_to_remove = []
for key, queue in self.protocol.address_queue.iteritems():
receiver_address = key[0]
asset_address = key[1]
if queue.empty():
elapsed_time = (
time.time() - self.protocol.last_received_time[receiver_address]
)
# Add a randomized delay in the loop to not clog the network
gevent.sleep(randint(0, int(0.2 * self.send_ping_time)))
if elapsed_time > self.max_unresponsive_time:
# remove the node from the graph
asset_manager = self.raiden.get_manager_by_asset_address(
asset_address
)
asset_manager.channelgraph.remove_path(
self.protocol.raiden.address,
receiver_address
)
# remove the node from the queue
keys_to_remove.append(key)
elif elapsed_time > self.send_ping_time:
self.protocol.send_ping(receiver_address)
for key in keys_to_remove:
self.protocol.address_queue.pop(key)
self.timeout = Timeout(self.sleep_time) # wait() will call cancel()
stop = self.stop_event.wait(self.timeout)
def stop_and_wait(self):
self.stop_event.set(True)
gevent.wait(self)
def stop_async(self):
self.stop_event.set(True)
class AlarmTask(Task):
""" Task to notify when a block is mined. """
def __init__(self, chain):
super(AlarmTask, self).__init__()
self.callbacks = list()
self.stop_event = AsyncResult()
self.wait_time = 0.5
self.chain = chain
self.last_block_number = self.chain.block_number()
def register_callback(self, callback):
""" Register a new callback.
Note:
This callback will be executed in the AlarmTask context and for
this reason it should not block, otherwise we can miss block
changes.
"""
if not callable(callback):
raise ValueError('callback is not a callable')
self.callbacks.append(callback)
def _run(self): # pylint: disable=method-hidden
stop = None
result = None
last_loop = time.time()
log.debug('starting block number', block_number=self.last_block_number)
while stop is None:
current_block = self.chain.block_number()
if current_block > self.last_block_number + 1:
difference = current_block - self.last_block_number - 1
log.error(
'alarm missed %s blocks',
difference,
)
if current_block != self.last_block_number:
self.last_block_number = current_block
log.debug('new block', number=current_block, timestamp=last_loop)
remove = list()
for callback in self.callbacks:
try:
result = callback(current_block)
except: # pylint: disable=bare-except
log.exception('unexpected exception on alarm')
else:
if result is REMOVE_CALLBACK:
remove.append(callback)
for callback in remove:
self.callbacks.remove(callback)
# we want this task to iterate in the tick of `wait_time`, so take
# into account how long we spent executing one tick.
work_time = time.time() - last_loop
if work_time > self.wait_time:
log.warning(
'alarm loop is taking longer than the wait time',
work_time=work_time,
wait_time=self.wait_time,
)
sleep_time = 0.001
else:
sleep_time = self.wait_time - work_time
stop = self.stop_event.wait(sleep_time)
last_loop = time.time()
def stop_and_wait(self):
self.stop_event.set(True)
gevent.wait(self)
def stop_async(self):
self.stop_event.set(True)
class BaseMediatedTransferTask(Task):
def _send_and_wait_time(self, raiden, recipient, transfer, timeout):
""" Utility to handle multiple messages for the same hashlock while
properly handling expiration timeouts.
"""
current_time = time.time()
limit_time = current_time + timeout
raiden.send_async(recipient, transfer)
while current_time <= limit_time:
# wait for a response message (not the Ack for the transfer)
try:
response = self.response_queue.get(
timeout=limit_time - current_time,
)
except Empty:
yield TIMEOUT
return
yield response
current_time = time.time()
if log.isEnabledFor(logging.DEBUG):
log.debug(
'TIMED OUT %s %s',
self.__class__,
pex(transfer),
)
def _send_and_wait_block(self, raiden, recipient, transfer, expiration_block):
""" Utility to handle multiple messages and timeout on a blocknumber. """
raiden.send_async(recipient, transfer)
current_block = raiden.chain.block_number()
while current_block < expiration_block:
try:
response = self.response_queue.get(
timeout=DEFAULT_EVENTS_POLL_TIMEOUT
)
except Empty:
pass
else:
if response:
yield response
current_block = raiden.chain.block_number()
if log.isEnabledFor(logging.DEBUG):
log.debug(
'TIMED OUT ON BLOCK %s %s %s',
current_block,
self.__class__,
pex(transfer),
)
yield TIMEOUT
def _wait_for_unlock_or_close(self, raiden, assetmanager, channel, mediated_transfer): # noqa
""" Wait for a Secret message from our partner to update the local
state, if the Secret message is not sent within time the channel will
be closed.
Note:
Must be called only once the secret is known.
Must call `on_hashlock_result` after this function returns.
"""
if not isinstance(mediated_transfer, MediatedTransfer):
raise ValueError('MediatedTransfer expected.')
block_to_close = mediated_transfer.lock.expiration - raiden.config['reveal_timeout']
hashlock = mediated_transfer.lock.hashlock
identifier = mediated_transfer.identifier
asset = mediated_transfer.asset
while channel.our_state.balance_proof.is_unclaimed(hashlock):
current_block = raiden.chain.block_number()
if current_block > block_to_close:
if log.isEnabledFor(logging.WARN):
log.warn(
'Closing channel (%s, %s) to prevent expiration of lock %s %s',
pex(channel.our_state.address),
pex(channel.partner_state.address),
pex(hashlock),
repr(self),
)
channel.netting_channel.close(
channel.our_state.address,
channel.our_state.balance_proof.transfer,
channel.partner_state.balance_proof.transfer,
)
return
try:
response = self.response_queue.get(
timeout=DEFAULT_EVENTS_POLL_TIMEOUT
)
except Empty:
pass
else:
if isinstance(response, Secret):
if response.identifier == identifier and response.asset == asset:
assetmanager.handle_secretmessage(response)
else:
assetmanager.handle_secret(identifier, response.secret)
if log.isEnabledFor(logging.ERROR):
log.error(
'Invalid Secret message received, expected message'
' for asset=%s identifier=%s received=%s',
asset,
identifier,
response,
)
elif isinstance(response, RevealSecret):
assetmanager.handle_secret(identifier, response.secret)
elif log.isEnabledFor(logging.ERROR):
log.error(
'Invalid message ignoring. %s %s',
repr(response),
repr(self),
)
def _wait_expiration(self, raiden, transfer, sleep=DEFAULT_EVENTS_POLL_TIMEOUT):
""" Utility to wait until the expiration block.
For a chain A-B-C, if an attacker controls A and C a mediated transfer
can be done through B and C will wait for/send a timeout, for that
reason B must not unregister the hashlock from the transfermanager
until the lock has expired, otherwise the revealed secret wouldnt be
caught.
"""
# pylint: disable=no-self-use
expiration = transfer.lock.expiration + 1
while True:
current_block = raiden.chain.block_number()
if current_block > expiration:
return
gevent.sleep(sleep)
# Note: send_and_wait_valid methods are used to check the message type and
# sender only, this can be improved by using a encrypted connection between the
# nodes making the signature validation unnecessary
class StartMediatedTransferTask(BaseMediatedTransferTask):
def __init__(self, raiden, asset_address, amount, identifier, target, done_result):
# pylint: disable=too-many-arguments
super(StartMediatedTransferTask, self).__init__()
self.raiden = raiden
self.asset_address = asset_address
self.amount = amount
self.identifier = identifier
self.target = target
self.done_result = done_result
def __repr__(self):
return '<{} {} asset:{}>'.format(
self.__class__.__name__,
pex(self.raiden.address),
pex(self.asset_address),
)
def _run(self): # noqa pylint: disable=method-hidden,too-many-locals
raiden = self.raiden
amount = self.amount
identifier = self.identifier
target = self.target
node_address = raiden.address
assetmanager = raiden.get_manager_by_asset_address(self.asset_address)
transfermanager = assetmanager.transfermanager
fee = 0
# there are no guarantees that the next_hop will follow the same route
routes = assetmanager.get_best_routes(
amount,
target,
lock_timeout=None,
)
if log.isEnabledFor(logging.DEBUG):
log.debug(
'START MEDIATED TRANSFER initiator:%s target:%s',
pex(node_address),
pex(target),
)
for path, forward_channel in routes:
# never reuse the last secret, discard it to avoid losing asset
secret = sha3(hex(random.getrandbits(256)))
hashlock = sha3(secret)
if log.isEnabledFor(logging.DEBUG):
log.debug(
'START MEDIATED TRANSFER NEW PATH path:%s hashlock:%s',
lpex(path),
pex(hashlock),
)
transfermanager.register_task_for_hashlock(self, hashlock)
assetmanager.register_channel_for_hashlock(forward_channel, hashlock)
lock_timeout = forward_channel.settle_timeout - forward_channel.reveal_timeout
lock_expiration = raiden.chain.block_number() + lock_timeout
mediated_transfer = forward_channel.create_mediatedtransfer(
node_address,
target,
fee,
amount,
identifier,
lock_expiration,
hashlock,
)
raiden.sign(mediated_transfer)
forward_channel.register_transfer(mediated_transfer)
for response in self.send_and_iter_valid(raiden, path, mediated_transfer):
valid_secretrequest = (
isinstance(response, SecretRequest) and
response.amount == amount and
response.hashlock == hashlock and
response.identifier == identifier
)
if valid_secretrequest:
# This node must reveal the Secret starting with the
# end-of-chain, the `next_hop` can not be trusted to reveal the
# secret to the other nodes.
revealsecret_message = RevealSecret(secret)
raiden.sign(revealsecret_message)
# we cannot wait for ever since the `target` might
# intentionally _not_ send the Ack, blocking us from
# unlocking the asset.
wait = (
ESTIMATED_BLOCK_TIME * lock_timeout / .6
)
raiden.send_and_wait(target, revealsecret_message, wait)
# target has acknowledged the RevealSecret, we can update
# the chain in the forward direction
assetmanager.handle_secret(
identifier,
secret,
)
# call the callbacks and unregister the task
transfermanager.on_hashlock_result(hashlock, True)
# the transfer is done when the lock is unlocked and the Secret
# message is sent (doesn't imply the other nodes in the chain
# have unlocked/withdrawn)
self.done_result.set(True)
return
# someone down the line timed out / couldn't proceed, try next
# path, stop listening for messages for the current hashlock
else:
# the initiator can unregister right away because it knowns
# no one else can reveal the secret
transfermanager.on_hashlock_result(hashlock, False)
del assetmanager.hashlock_channel[hashlock]
break
if log.isEnabledFor(logging.DEBUG):
log.debug(
'START MEDIATED TRANSFER FAILED initiator:%s target:%s',
pex(node_address),
pex(self.target),
)
# all routes failed, consider:
# - if the target is a good node to have a channel:
# - deposit/reopen/open a channel with target
# - if the target has a direct channel with good nodes and there is
# sufficient funds to complete the transfer
# - open the required channels with these nodes
self.done_result.set(False)
def send_and_iter_valid(self, raiden, path, mediated_transfer): # noqa pylint: disable=no-self-use
""" Send the `mediated_transfer` and wait for either a message from
`target` or the `next_hop`.
"""
next_hop = path[1]
target = path[-1]
response_iterator = self._send_and_wait_time(
raiden,
mediated_transfer.recipient,
mediated_transfer,
raiden.config['msg_timeout'],
)
for response in response_iterator:
refund_or_timeout = (
isinstance(response, (RefundTransfer, TransferTimeout)) and
response.sender == next_hop
)
secret_request = (
isinstance(response, SecretRequest) and
response.sender == target
)
timeout = response is TIMEOUT
if refund_or_timeout or secret_request or timeout:
yield response
elif log.isEnabledFor(logging.ERROR):
log.error(
'Invalid message ignoring. %s',
repr(response),
)
return
class MediateTransferTask(BaseMediatedTransferTask):
def __init__(self, raiden, asset_address, originating_transfer, fee):
super(MediateTransferTask, self).__init__()
self.raiden = raiden
self.asset_address = asset_address
self.originating_transfer = originating_transfer
self.fee = fee
def __repr__(self):
return '<{} {} asset:{}>'.format(
self.__class__.__name__,
pex(self.raiden.address),
pex(self.asset_address),
)
def _run(self): # noqa
# pylint: disable=method-hidden,too-many-locals,too-many-branches,too-many-statements
raiden = self.raiden
fee = self.fee
originating_transfer = self.originating_transfer
raiden = self.raiden
assetmanager = raiden.get_manager_by_asset_address(self.asset_address)
transfermanager = assetmanager.transfermanager
from_address = originating_transfer.sender
originating_channel = assetmanager.partneraddress_channel[from_address]
hashlock = originating_transfer.lock.hashlock
transfermanager.register_task_for_hashlock(self, hashlock)
assetmanager.register_channel_for_hashlock(originating_channel, hashlock)
# there are no guarantees that the next_hop will follow the same route
routes = assetmanager.get_best_routes(
originating_transfer.lock.amount,
originating_transfer.target,
)
if log.isEnabledFor(logging.DEBUG):
log.debug(
'MEDIATED TRANSFER initiator:%s node:%s target:%s',
pex(originating_transfer.initiator),
pex(raiden.address),
pex(originating_transfer.target),
)
maximum_expiration = (
originating_channel.settle_timeout +
raiden.chain.block_number() -
2 # decrement as a safety measure to avoid limit errors
)
# Ignore locks that expire after settle_timeout
if originating_transfer.lock.expiration > maximum_expiration:
if log.isEnabledFor(logging.ERROR):
log.debug(
'lock_expiration is too large, ignore the mediated transfer',
initiator=pex(originating_transfer.initiator),
node=pex(self.address),
target=pex(originating_transfer.target),
)
# Notes:
# - The node didn't send a transfer forward, so it can not lose
# asset.
# - It's quiting early, so it wont claim the lock if the secret is
# revealed.
# - The previous_node knowns the settle_timeout because this value
# is in the smart contract.
# - It's not sending a RefundTransfer to the previous_node, so it
# will force a retry with a new path/different hashlock, this
# could make the bad behaving node lose it's fees but it will
# also increase latency.
return
for path, forward_channel in routes:
current_block_number = raiden.chain.block_number()
# Dont forward the mediated transfer to the next_hop if we cannot
# decrease the expiration by `reveal_timeout`, this is time
# required to learn the secret through the blockchain that needs to
# consider DoS attacks.
lock_timeout = originating_transfer.lock.expiration - current_block_number
if lock_timeout < forward_channel.reveal_timeout:
if log.isEnabledFor(logging.INFO):
log.info(
'transfer.lock_expiration is smaller than'
' reveal_timeout, channel/path cannot be used',
lock_timeout=originating_transfer.lock.expiration,
reveal_timeout=forward_channel.reveal_timeout,
settle_timeout=forward_channel.settle_timeout,
nodeid=pex(path[0]),
partner=pex(path[1]),
)
continue
new_lock_timeout = lock_timeout - forward_channel.reveal_timeout
# Our partner won't accept a locked transfer that can expire after
# the settlement period, otherwise the secret could be revealed
# after channel is settled and asset would be lost, in that case
# decrease the expiration by an amount larger than reveal_timeout.
if new_lock_timeout > forward_channel.settle_timeout:
new_lock_timeout = forward_channel.settle_timeout - 2 # arbitrary decrement
if log.isEnabledFor(logging.DEBUG):
log.debug(
'lock_expiration would be too large, decrement more so'
' that the channel/path can be used',
lock_timeout=lock_timeout,
new_lock_timeout=new_lock_timeout,
nodeid=pex(path[0]),
partner=pex(path[1]),
)
new_lock_expiration = current_block_number + new_lock_timeout
mediated_transfer = forward_channel.create_mediatedtransfer(
originating_transfer.initiator,
originating_transfer.target,
fee,
originating_transfer.lock.amount,
originating_transfer.identifier,
new_lock_expiration,
hashlock,
)
raiden.sign(mediated_transfer)
if log.isEnabledFor(logging.DEBUG):
log.debug(
'MEDIATED TRANSFER NEW PATH path:%s hashlock:%s',
lpex(path),
pex(hashlock),
)
assetmanager.register_channel_for_hashlock(
forward_channel,
hashlock,
)
forward_channel.register_transfer(mediated_transfer)
for response in self.send_and_iter_valid(raiden, path, mediated_transfer):
valid_refund = (
isinstance(response, RefundTransfer) and
response.lock.amount == originating_transfer.lock.amount
)
if isinstance(response, RevealSecret):
assetmanager.handle_secret(
originating_transfer.identifier,
response.secret,
)
self._wait_for_unlock_or_close(
raiden,
assetmanager,
originating_channel,
originating_transfer,
)
elif isinstance(response, Secret):
assetmanager.handle_secretmessage(response)
# Secret might be from a different node, wait for the
# update from `from_address`
self._wait_for_unlock_or_close(
raiden,
assetmanager,
originating_channel,
originating_transfer,
)
transfermanager.on_hashlock_result(hashlock, True)
return
elif valid_refund:
forward_channel.register_transfer(response)
break
else:
timeout_message = originating_channel.create_timeouttransfer_for(
originating_transfer,
)
raiden.send_async(
originating_transfer.sender,
timeout_message,
)
self._wait_expiration(
raiden,
originating_transfer,
)
transfermanager.on_hashlock_result(hashlock, False)
return
# No suitable path avaiable (e.g. insufficient distributable, no active node)
# Send RefundTransfer to the originating node, this has the effect of
# backtracking in the graph search of the raiden network.
if log.isEnabledFor(logging.DEBUG):
log.debug(
'REFUND MEDIATED TRANSFER from=%s node:%s hashlock:%s',
pex(from_address),
pex(raiden.address),
pex(hashlock),
)
refund_transfer = originating_channel.create_refundtransfer_for(
originating_transfer,
)
raiden.sign(refund_transfer)
originating_channel.register_transfer(refund_transfer)
raiden.send_async(from_address, refund_transfer)
self._wait_expiration(
raiden,
originating_transfer,
)
transfermanager.on_hashlock_result(hashlock, False)
def send_and_iter_valid(self, raiden, path, mediated_transfer):
response_iterator = self._send_and_wait_time(
raiden,
mediated_transfer.recipient,
mediated_transfer,
raiden.config['msg_timeout'],
)
for response in response_iterator:
timeout = response is TIMEOUT
secret = isinstance(response, (Secret, RevealSecret))
refund_or_timeout = (
isinstance(response, (RefundTransfer, TransferTimeout)) and
response.sender == path[0]
)
if timeout or secret or refund_or_timeout:
yield response
elif log.isEnabledFor(logging.ERROR):
log.error(
'Partner sent an invalid message. %s',
repr(response),
)
class EndMediatedTransferTask(BaseMediatedTransferTask):
""" Task that requests a secret for a registered transfer. """
def __init__(self, raiden, asset_address, originating_transfer):
super(EndMediatedTransferTask, self).__init__()
self.raiden = raiden
self.asset_address = asset_address
self.originating_transfer = originating_transfer
def __repr__(self):
return '<{} {} asset:{}>'.format(
self.__class__.__name__,
pex(self.raiden.address),
pex(self.asset_address),
)
def _run(self): # pylint: disable=method-hidden
raiden = self.raiden
originating_transfer = self.originating_transfer
hashlock = originating_transfer.lock.hashlock
assetmanager = raiden.get_manager_by_asset_address(self.asset_address)
transfermanager = assetmanager.transfermanager
originating_channel = assetmanager.get_channel_by_partner_address(
originating_transfer.sender,
)
transfermanager.register_task_for_hashlock(self, hashlock)
assetmanager.register_channel_for_hashlock(originating_channel, hashlock)
if log.isEnabledFor(logging.DEBUG):
log.debug(
'END MEDIATED TRANSFER %s -> %s msghash:%s hashlock:%s',
pex(originating_transfer.target),
pex(originating_transfer.initiator),
pex(originating_transfer.hash),
pex(originating_transfer.lock.hashlock),
)
secret_request = SecretRequest(
originating_transfer.identifier,
originating_transfer.lock.hashlock,
originating_transfer.lock.amount,
)
raiden.sign(secret_request)
# If the transfer timed out in the initiator a new hashlock will be
# created and this task will not receive a secret, this is fine because
# the task will eventually exit once a blocktimeout happens and a new
# task will be created for the new hashlock
valid_messages_iterator = self.send_secretrequest_and_iter_valid(
raiden,
originating_transfer,
secret_request,
)
for response in valid_messages_iterator:
# at this point a Secret message is not valid
if isinstance(response, RevealSecret):
assetmanager.handle_secret(
originating_transfer.identifier,
response.secret,
)
self._wait_for_unlock_or_close(
raiden,
assetmanager,
originating_channel,
originating_transfer,
)
transfermanager.on_hashlock_result(hashlock, True)
return
elif response is TIMEOUT:
# this task timeouts on a blocknumber, at this point all the other
# nodes have timedout
transfermanager.on_hashlock_result(originating_transfer.lock.hashlock, False)
break
def send_secretrequest_and_iter_valid(self, raiden, originating_transfer, secret_request):
# pylint: disable=invalid-name
# keep this task alive up to the expiration block
response_iterator = self._send_and_wait_block(
raiden,
originating_transfer.initiator,
secret_request,
originating_transfer.lock.expiration,
)
# a Secret message is not valid here since the secret needs to first be
# revealed to the target
for response in response_iterator:
if isinstance(response, RevealSecret):
yield response
break
elif response is TIMEOUT:
if log.isEnabledFor(logging.ERROR):
log.error(
'SECRETREQUEST TIMED OUT node:%s msghash:%s hashlock:%s',
pex(raiden.address),
pex(secret_request.hash),
pex(originating_transfer.lock.hashlock),
)
yield response
elif log.isEnabledFor(logging.ERROR):
log.error(
'INVALID MESSAGE RECEIVED %s',
repr(response),
)
class StartExchangeTask(BaseMediatedTransferTask):
""" Initiator task, responsible to choose a random secret, initiate the
asset exchange by sending a mediated transfer to the counterparty and
revealing the secret once the exchange can be complete.
"""
def __init__(self, identifier, raiden, from_asset, from_amount, to_asset, to_amount, target):
# pylint: disable=too-many-arguments
super(StartExchangeTask, self).__init__()
self.identifier = identifier
self.raiden = raiden
self.from_asset = from_asset
self.from_amount = from_amount
self.to_asset = to_asset
self.to_amount = to_amount
self.target = target
def __repr__(self):
return '<{} {} from_asset:{} to_asset:{}>'.format(
self.__class__.__name__,
pex(self.raiden.address),
pex(self.from_asset),
pex(self.to_asset),
)
def _run(self): # pylint: disable=method-hidden,too-many-locals
identifier = self.identifier
raiden = self.raiden
from_asset = self.from_asset
from_amount = self.from_amount
to_asset = self.to_asset
to_amount = self.to_amount
target = self.target
from_assetmanager = raiden.get_manager_by_asset_address(from_asset)
to_assetmanager = raiden.get_manager_by_asset_address(to_asset)
from_transfermanager = from_assetmanager.transfermanager
from_routes = from_assetmanager.get_best_routes(
from_amount,
target,
lock_timeout=None,
)
fee = 0
for path, from_channel in from_routes:
# for each new path a new secret must be used
secret = sha3(hex(random.getrandbits(256)))
hashlock = sha3(secret)
from_transfermanager.register_task_for_hashlock(self, hashlock)
from_assetmanager.register_channel_for_hashlock(from_channel, hashlock)
lock_expiration = (
raiden.chain.block_number() +
from_channel.settle_timeout -
raiden.config['reveal_timeout']
)
from_mediated_transfer = from_channel.create_mediatedtransfer(
raiden.address,
target,
fee,
from_amount,
identifier,
lock_expiration,
hashlock,
)
raiden.sign(from_mediated_transfer)
from_channel.register_transfer(from_mediated_transfer)
# wait for the SecretRequest and MediatedTransfer
to_mediated_transfer = self.send_and_wait_valid_state(
raiden,
path,
from_mediated_transfer,
to_asset,
to_amount,
)
if to_mediated_transfer is None:
# the initiator can unregister right away since it knows the
# secret wont be revealed
from_transfermanager.on_hashlock_result(hashlock, False)
del from_assetmanager.hashlock_channel[hashlock]
elif isinstance(to_mediated_transfer, MediatedTransfer):
to_hop = to_mediated_transfer.sender
# reveal the secret to the `to_hop` and `target`
self.reveal_secret(
self.raiden,
secret,
last_node=to_hop,
exchange_node=target,
)
to_channel = to_assetmanager.get_channel_by_partner_address(
to_mediated_transfer.sender
)
# now the secret can be revealed forward (`from_hop`)
from_assetmanager.handle_secret(identifier, secret)
to_assetmanager.handle_secret(identifier, secret)
self._wait_for_unlock_or_close(
raiden,
to_assetmanager,
to_channel,
to_mediated_transfer,
)
from_transfermanager.on_hashlock_result(hashlock, True)
self.done_result.set(True)
def send_and_wait_valid_state( # noqa
self,
raiden,
path,
from_asset_transfer,
to_asset,
to_amount):
""" Start the exchange by sending the first mediated transfer to the
taker and wait for mediated transfer for the exchanged asset.
This method will validate the messages received, discard the invalid
ones, and wait until a valid state is reached. The valid state is
reached when a mediated transfer for `to_asset` with `to_amount` tokens
and a SecretRequest from the taker are received.
Returns:
None: when the timeout was reached.
MediatedTransfer: when a valid state is reached.
RefundTransfer/TransferTimeout: when an invalid state is reached by
our partner.
"""
# pylint: disable=too-many-arguments
next_hop = path[1]
taker_address = path[-1] # taker_address and next_hop might be equal
# a valid state must have a secret request from the maker and a valid
# mediated transfer for the new asset
received_secretrequest = False
mediated_transfer = None
response_iterator = self._send_and_wait_time(
raiden,
from_asset_transfer.recipient,
from_asset_transfer,
raiden.config['msg_timeout'],
)
for response in response_iterator:
if response is None:
if log.isEnabledFor(logging.DEBUG):
log.debug(
'EXCHANGE TRANSFER TIMED OUT hashlock:%s',
pex(from_asset_transfer.lock.hashlock),
)
return None
# The MediatedTransfer might be from `next_hop` or most likely from
# a different node.
#
# The other participant must not use a direct transfer to finish
# the asset exchange, ignore it
if isinstance(response, MediatedTransfer) and response.asset == to_asset:
# XXX: allow multiple transfers to add up to the correct amount
if response.lock.amount == to_amount:
mediated_transfer = response
elif isinstance(response, SecretRequest) and response.sender == taker_address:
received_secretrequest = True
# next_hop could send the MediatedTransfer, this is handled in a
# previous if
elif response.sender == next_hop:
if isinstance(response, (RefundTransfer, TransferTimeout)):
return response
else:
if log.isEnabledFor(logging.INFO):
log.info(
'Partner %s sent an invalid message %s',
pex(next_hop),
repr(response),
)
return None
elif log.isEnabledFor(logging.ERROR):
log.error(
'Invalid message ignoring. %s',
repr(response),
)
if mediated_transfer and received_secretrequest:
return mediated_transfer
return None
def reveal_secret(self, raiden, secret, last_node, exchange_node):
""" Reveal the `secret` to both participants.
The secret must be revealed backwards to get the incentives right
(first mediator would not forward the secret and get the transfer to
itself).
With exchanges there is an additional failure point, if a node is
mediating both asset transfers it can intercept the transfer (as in not
revealing the secret to others), for this reason it is not sufficient
to just send the Secret backwards, the Secret must also be sent to the
exchange_node.
"""
# pylint: disable=no-self-use
reveal_secret = RevealSecret(secret)
raiden.sign(reveal_secret)
# first reveal the secret to the last_node in the chain, proceed after
# ack
raiden.send_and_wait(last_node, reveal_secret, timeout=None) # XXX: wait for expiration
# the last_node has acknowledged the Secret, so we know the exchange
# has kicked-off, reveal the secret to the exchange_node to
# avoid interceptions but dont wait
raiden.send_async(exchange_node, reveal_secret)
class ExchangeTask(BaseMediatedTransferTask):
""" Counterparty task, responsible to receive a MediatedTransfer for the
from_transfer and forward a to_transfer with the same hashlock.
"""
def __init__(self, raiden, from_mediated_transfer, to_asset, to_amount, target):
# pylint: disable=too-many-arguments
super(ExchangeTask, self).__init__()
self.raiden = raiden
self.from_mediated_transfer = from_mediated_transfer
self.target = target
self.to_amount = to_amount
self.to_asset = to_asset
def __repr__(self):
return '<{} {} from_asset:{} to_asset:{}>'.format(
self.__class__.__name__,
pex(self.raiden.address),
pex(self.from_mediated_transfer.asset),
pex(self.to_asset),
)
def _run(self): # pylint: disable=method-hidden,too-many-locals
fee = 0
raiden = self.raiden
from_mediated_transfer = self.from_mediated_transfer
hashlock = from_mediated_transfer.lock.hashlock
from_asset = from_mediated_transfer.asset
to_asset = self.to_asset
to_amount = self.to_amount
to_assetmanager = raiden.get_manager_by_asset_address(to_asset)
from_assetmanager = raiden.get_manager_by_asset_address(from_asset)
from_transfermanager = from_assetmanager.transfermanager
from_channel = from_assetmanager.get_channel_by_partner_address(
from_mediated_transfer.sender,
)
from_transfermanager.register_task_for_hashlock(self, hashlock)
from_assetmanager.register_channel_for_hashlock(from_channel, hashlock)
lock_expiration = from_mediated_transfer.lock.expiration - raiden.config['reveal_timeout']
lock_timeout = lock_expiration - raiden.chain.block_number()
to_routes = to_assetmanager.get_best_routes(
from_mediated_transfer.lock.amount,
from_mediated_transfer.initiator, # route back to the initiator
lock_timeout,
)
if log.isEnabledFor(logging.DEBUG):
log.debug(
'EXCHANGE TRANSFER %s -> %s msghash:%s hashlock:%s',
pex(from_mediated_transfer.target),
pex(from_mediated_transfer.initiator),
pex(from_mediated_transfer.hash),
pex(hashlock),
)
secret_request = SecretRequest(
from_mediated_transfer.identifier,
from_mediated_transfer.lock.hashlock,
from_mediated_transfer.lock.amount,
)
raiden.sign(secret_request)
raiden.send_async(from_mediated_transfer.initiator, secret_request)
for path, to_channel in to_routes:
to_next_hop = path[1]
to_mediated_transfer = to_channel.create_mediatedtransfer(
raiden.address, # this node is the new initiator
from_mediated_transfer.initiator, # the initiator is the target for the to_asset
fee,
to_amount,
lock_expiration,
hashlock, # use the original hashlock
)
raiden.sign(to_mediated_transfer)
if log.isEnabledFor(logging.DEBUG):
log.debug(
'MEDIATED TRANSFER NEW PATH path:%s hashlock:%s',
lpex(path),
pex(from_mediated_transfer.lock.hashlock),
)
# Using assetmanager to register the interest because it outlives
# this task, the secret handling will happen only _once_
to_assetmanager.register_channel_for_hashlock(
to_channel,
hashlock,
)
to_channel.register_transfer(to_mediated_transfer)
response = self.send_and_wait_valid(raiden, to_mediated_transfer)
if log.isEnabledFor(logging.DEBUG):
log.debug(
'EXCHANGE TRANSFER NEW PATH path:%s hashlock:%s',
lpex(path),
pex(hashlock),
)
# only refunds for `from_asset` must be considered (check send_and_wait_valid)
if isinstance(response, RefundTransfer):
if response.lock.amount != to_mediated_transfer.amount:
log.info(
'Partner %s sent an invalid refund message with an invalid amount',
pex(to_next_hop),
)
timeout_message = from_channel.create_timeouttransfer_for(
from_mediated_transfer
)
raiden.send_async(from_mediated_transfer.sender, timeout_message)
self.transfermanager.on_hashlock_result(hashlock, False)
return
else:
to_channel.register_transfer(response)
elif isinstance(response, Secret):
# this node is receiving the from_asset and sending the
# to_asset, meaning that it can claim the to_asset but it needs
# a Secret message to claim the from_asset
to_assetmanager.handle_secretmessage(response)
from_assetmanager.handle_secretmessage(response)
self._wait_for_unlock_or_close(
raiden,
from_assetmanager,
from_channel,
from_mediated_transfer,
)
def send_and_wait_valid(self, raiden, mediated_transfer):
response_iterator = self._send_and_wait_time(
raiden,
mediated_transfer.recipient,
mediated_transfer,
raiden.config['msg_timeout'],
)
for response in response_iterator:
if response is None:
log.error(
'EXCHANGE TIMED OUT node:%s hashlock:%s',
pex(raiden.address),
pex(mediated_transfer.lock.hashlock),
)
return None
if isinstance(response, Secret):
if sha3(response.secret) != mediated_transfer.lock.hashlock:
log.error('Secret doesnt match the hashlock, ignoring.')
continue
return response
# first check that the message is from a known/valid sender/asset
valid_target = response.target == raiden.address
valid_sender = response.sender == mediated_transfer.recipient
valid_asset = response.asset == mediated_transfer.asset
if not valid_target or not valid_sender or not valid_asset:
log.error(
'Invalid message [%s] supplied to the task, ignoring.',
repr(response),
)
continue
if isinstance(response, RefundTransfer):
return response
return None
| utzig/raiden | raiden/tasks.py | tasks.py | py | 52,323 | python | en | code | null | github-code | 13 |
31126659944 | from django.db import models
from pods.models import User
# Data representing the underlying assets that a user wants to insure.
# there are differnt bits of information that are needed for each type of item
# These are all subsequenty organized into a single Risk model that is attached to the policy for each user via a content type
# that risk model then looks at the underlying and computes the risk score and premium for that user
CONDITION_CHOICES = [
("new", "Brand New"),
("near_perfect", "Near Perfect"),
("great", "Great"),
("good", "Good"),
("ok", "ok"),
]
# reduces the need for a bunch of basically identical columns
class ImageAlbum(models.Model):
def default(self):
return self.images.filter(default=True).first()
def thumbnails(self):
return self.images.filter(width__lt=100, length_lt=100)
def __str__(self) -> str:
risk_id = None
risk_name = None
try:
if self.audio_equipment:
risk_id = self.audio_equipment.id
risk_name = "Audio Equipment"
except:
pass
try:
if self.cell_phone:
risk_id = self.cell_phone.id
risk_name = "Cell phone"
except:
pass
return f"{self.images.count()} photos - Risk photo album for risk #{risk_id} {risk_name}"
class PropertyImage(models.Model):
image = models.ImageField(upload_to="property_images", max_length=264)
default = models.BooleanField(default=False)
album = models.ForeignKey(
ImageAlbum,
related_name="images",
on_delete=models.CASCADE,
)
owner = models.ForeignKey(
User,
related_name="risk_photos",
null=True,
blank=True,
on_delete=models.CASCADE,
)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class GenericProperty(models.Model):
make = models.CharField(max_length=128, null=True, blank=True)
model = models.CharField(max_length=128, null=True, blank=True)
condition = models.CharField(
max_length=64, choices=CONDITION_CHOICES, null=True, blank=True
)
market_value = models.IntegerField(help_text="in cents", null=True, blank=True)
# override if a model has more text fields
def is_filled_out(self):
return self.make and self.model and self.condition and self.market_value
def __str__(self) -> str:
return f"{self.make} {self.model} {self.condition} condition"
class Meta:
app_label = "policies"
abstract = True
class PhoneRisk(GenericProperty):
album = models.OneToOneField(
ImageAlbum,
blank=True,
null=True,
related_name="cell_phone",
on_delete=models.SET_NULL,
)
has_screen_protector = models.BooleanField(default=False)
has_case = models.BooleanField(default=False)
def __str__(self) -> str:
return super().__str__() + " Phone Risk"
class AudioEquipmentRisk(GenericProperty):
album = models.OneToOneField(
ImageAlbum,
blank=True,
null=True,
related_name="audio_equipment",
on_delete=models.SET_NULL,
)
def __str__(self) -> str:
return f"Audio Equipment Risk #{self.id}"
def get_model_for_risk_type(risk_type):
if risk_type == "cell_phone":
return PhoneRisk
elif risk_type == "audio_equipment":
return AudioEquipmentRisk
else:
raise Exception("Risk type not found")
| daxaxelrod/open_insure | policies/risk/models.py | models.py | py | 3,560 | python | en | code | 33 | github-code | 13 |
1391849265 | import copy
from math import log
import os.path
class AmplifierConfig:
def __init__(self, modification, sn):
self.__file_name = '__Config_Write_XXX_'
self.mod = modification
self.sn = sn
self.u_drv = 30
self.i_op = 650
self.i_eol = 661
self.nf = 4.35
self.u_drv_corr = 0
def found_error(self):
"""Функция проверки наличия
файла в директории"""
is_exist = os.path.exists(
fr"C:\Users\danil\OneDrive\Документы\ZOC7 Files\ComSet_for_UI\{self.__file_name[0:15]}{self.sn}.txt")
return is_exist
def delete_file(self):
"""Функция удаления файла,
если он есть в директории"""
if self.found_error:
os.remove(
fr"C:\Users\danil\OneDrive\Документы\ZOC7 Files\ComSet_for_UI\{self.__file_name[0:15]}{self.sn}.txt")
print('Файл успешно удален')
@staticmethod
def open_for_read(mod):
"""Функция открытия и чтения конфиг файлов 15 и 19,
при создании файлов конфигурации"""
config_dict1 = {}
with open(
fr"C:\Users\danil\OneDrive\Документы\ZOC7 Files\ComSet_for_UI\__Config_Write_XXX_{mod}.txt",
encoding='UTF-8') as config_file:
for row in config_file:
(key, val) = row.split(' ')
try:
config_dict1[key] = int(val)
except ValueError:
config_dict1[key] = val.strip()
return config_dict1
def read_config_file(self):
"""Функция считывания файла
конфигурации в словарь"""
config_dict = {}
if self.mod == 15 and self.found_error() is False:
return self.open_for_read(self.mod)
elif self.mod == 19 and self.found_error() is False:
return self.open_for_read(self.mod)
elif self.mod in (15, 19) and self.sn is not None and self.found_error() is True:
with open(
fr'C:\Users\danil\OneDrive\Документы\ZOC7 Files\ComSet_for_UI\{self.__file_name[0:15]}{self.sn}.txt',
encoding='UTF-8') as config_file:
for row in config_file:
(key, val) = row.split(' ')
try:
config_dict[key] = int(val)
except ValueError:
config_dict[key] = val.strip()
return config_dict
else:
print('Некорректно указана модификация изделия.')
def get_config_value(self, key):
"""Функция возвращает значение
файла по ключу"""
return self.read_config_file()[key]
def new_config_dict(self, **kwargs):
"""Функция считывает файл и
переписывает значения по ключу,
и возвращает новый словарь"""
old_config_dict = self.read_config_file()
new_config_dict = copy.deepcopy(old_config_dict)
for key, value in kwargs.items():
for old_key in old_config_dict.keys():
if key == old_key:
new_config_dict[key] = value
return new_config_dict
def write_config_file(self, new_dict):
"""Функция создает новый файл, если его нет
или пересохраняет старый"""
with open(
fr'C:\Users\danil\OneDrive\Документы\ZOC7 Files\ComSet_for_UI\{self.__file_name[0:15]}{self.sn}.txt',
'w', encoding='UTF-8') as new_config_file:
for key, value in new_dict.items():
if key == 'SN':
new_config_file.write(f'{key} {f"{value:03d}"}\n')
elif key == 'AMPSETUP':
new_config_file.write(f'{key} {value}\n')
new_config_file.write('AMPSETUP 3278\n')
else:
new_config_file.write(f'{key} {value}\n')
new_config_file.write('AMPSETUP OFF\n')
def dacsp_calc(self):
"""Функция расчета ошибки драйвера"""
DACSP = int(2650 - self.u_drv / 0.8 + 0.5)
return DACSP
def dacsp_corr(self, u_drv_corr):
if u_drv_corr > 0.4:
new_DACSP = self.get_config_value('DACSP') - int(u_drv_corr / 0.8 + 0.5)
self.write_config_file(self.new_config_dict(DACSP=new_DACSP))
elif u_drv_corr < -0.4:
new_DACSP = self.get_config_value('DACSP') - int(u_drv_corr / 0.8 - 0.5)
self.write_config_file(self.new_config_dict(DACSP=new_DACSP))
def eol_calc(self):
"""Функция расчета максиального тока"""
if self.i_eol < 758:
EOL = int(round(self.i_eol / 0.185033 - 6.396, 0))
return EOL
elif self.i_eol >= 758:
EOL = 4090
return EOL
def nf_calc(self):
"""Функция расчета уровня шума"""
NF = int(round(10 ** (self.nf / 10) * 10000, 0))
return NF
class LineAmplifierConfig(AmplifierConfig):
def __init__(self, sn):
self.sn = sn
AmplifierConfig.__init__(self, modification=19, sn=self.sn)
self.tap_in_line = -6.27
self.tap_out_line = 18.65
self.pd_in_line = -35.10
self.pd_out_line = -2.03
def tap_in_line_calc(self):
TAP_IN_IL_line = int(
round((10 ** ((10 * log((71285303 / 10 ** 6), 10) + (-6.1 - self.tap_in_line)) / 10)) * 10 ** 6, 0))
return TAP_IN_IL_line
def tap_out_line_calc(self):
TAP_OUT_IL_line = int(
round((10 ** ((10 * log(121338885 / 10 ** 6, 10) - (18.97 - self.tap_out_line)) / 10)) * 10 ** 6, 0))
return TAP_OUT_IL_line
def pd_in_line_calc(self, dlevel):
B_PD_IN_line = int(round((10.729691 - (
10 ** ((-35.1 - 10 * log(dlevel / 10 ** 6, 10)) / 10) - 10 ** (
(self.pd_in_line - 10 * log(dlevel / 10 ** 6, 10)) / 10)) * 1000000) * 1000000, 0))
return B_PD_IN_line
def pd_out_line_calc(self, dlevel):
B_PD_OUT_line = int(
round(-(-2624.13 - (10 ** ((-2.03 - 10 * log(dlevel / 10 ** 6, 10)) / 10) - 10 ** (
(self.pd_out_line - 10 * log(dlevel / 10 ** 6, 10)) / 10)) * 1000000) * 100, 0))
return B_PD_OUT_line
class PreAmplifierConfig(AmplifierConfig):
def __init__(self, sn):
self.sn = sn
AmplifierConfig.__init__(self, modification=15, sn=self.sn)
self.tap_in_pre = -10.27
self.tap_out_pre = 14.00
self.pd_in_pre = -35.10
self.pd_out_pre = -2.03
def tap_in_pre_calc(self):
TAP_IN_IL_pre = int(
round((10 ** ((10 * log((35727283 / 10 ** 6), 10) + (-10.1 - self.tap_in_pre)) / 10)) * 10 ** 6, 0))
return TAP_IN_IL_pre
def tap_out_pre_calc(self):
TAP_OUT_IL_pre = int(
round((10 ** ((10 * log(60813500 / 10 ** 6, 10) - (14.97 - self.tap_out_pre)) / 10)) * 10 ** 6, 0))
return TAP_OUT_IL_pre
def pd_in_pre_calc(self, dlevel):
B_PD_IN_pre = int(
round((10.729691 - (10 ** ((-35.1 - 10 * log(dlevel / 10 ** 6, 10)) / 10) - 10 ** (
(self.pd_in_pre - 10 * log(dlevel / 10 ** 6, 10)) / 10)) * 1000000) * 1000000, 0))
return B_PD_IN_pre
def pd_out_pre_calc(self, dlevel):
B_PD_OUT_pre = int(
round(-(-2624.13 - (10 ** ((-2.03 - 10 * log(dlevel / 10 ** 6, 10)) / 10) - 10 ** (
(self.pd_out_pre - 10 * log(dlevel / 10 ** 6, 10)) / 10)) * 1000000) * 100, 0))
return B_PD_OUT_pre
| Alexander2327/GUI-for-job | AmplifierConfig.py | AmplifierConfig.py | py | 8,112 | python | en | code | 0 | github-code | 13 |
10789097760 | from .KontroleryModeli.KontrolerKontaPrywatnego import KontrolerKontaPrywatnego
from .KontroleryModeli.KontrolerKontaFirmowego import *
from .KontroleryModeli.KontrolerSal import *
from .KontroleryModeli.KontrolerTerminow import *
from .KontroleryModeli.KontrolerRezerwacji import *
from .KontroleryModeli.KontrolerModeluInterface import KontrolerModeluInterface, TypModelu
from .SystemMailingowy.Obserwator import Obserwator
class PosrednikBazyDanych:
def __init__(self, typ: TypModelu):
self.kontrolerModelu: KontrolerModeluInterface = None
if BazaDanych.db_session is None:
raise Exception("Baza danych niezostała zainicjowana!")
if typ == TypModelu.KontoPrywatne:
self.kontrolerModelu = KontrolerKontaPrywatnego()
elif typ == TypModelu.KontoFirmowe:
self.kontrolerModelu = KontrolerKontaFirmowego()
elif typ == TypModelu.Rezerwacje:
self.kontrolerModelu = KontrolerRezerwacji()
elif typ == TypModelu.Sale:
self.kontrolerModelu = KontrolerSal()
elif typ == TypModelu.Terminy:
self.kontrolerModelu = KontrolerTerminow()
else:
raise TypeError("Niepoprawny typ modelu")
def dodajObiekt(self, obiekt: object) -> int:
return self.kontrolerModelu.dodajObiekt(obiekt)
def usunObiekt(self, id: int):
self.kontrolerModelu.usunObiekt(id)
def zaktualizujObiekt(self, stary_id: int, nowy: object):
self.kontrolerModelu.zaktualizujObiekt(stary_id, nowy)
def pobierzObiekt(self, id: str):
return self.kontrolerModelu.pobierzObiekt(id)
def pobierzWszystkieObiekty(self):
return self.kontrolerModelu.pobierzWszystkieObiekty()
@staticmethod
def przygotujBazeDanych(db_uri: str, check_same_thread: bool = False):
BazaDanych.przygotujBazeDanych(db_uri, check_same_thread) | danielswietlik/WypozyczalniaSalKonferencyjnych | source/WarstwaBiznesowa/PosrednikBazyDanych.py | PosrednikBazyDanych.py | py | 1,895 | python | pl | code | 0 | github-code | 13 |
17078651514 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.VoucherTemplateBudgetDTO import VoucherTemplateBudgetDTO
class AlipayAssetVoucherTemplateInfoQuerybudgetResponse(AlipayResponse):
def __init__(self):
super(AlipayAssetVoucherTemplateInfoQuerybudgetResponse, self).__init__()
self._template_list = None
@property
def template_list(self):
return self._template_list
@template_list.setter
def template_list(self, value):
if isinstance(value, list):
self._template_list = list()
for i in value:
if isinstance(i, VoucherTemplateBudgetDTO):
self._template_list.append(i)
else:
self._template_list.append(VoucherTemplateBudgetDTO.from_alipay_dict(i))
def parse_response_content(self, response_content):
response = super(AlipayAssetVoucherTemplateInfoQuerybudgetResponse, self).parse_response_content(response_content)
if 'template_list' in response:
self.template_list = response['template_list']
| alipay/alipay-sdk-python-all | alipay/aop/api/response/AlipayAssetVoucherTemplateInfoQuerybudgetResponse.py | AlipayAssetVoucherTemplateInfoQuerybudgetResponse.py | py | 1,177 | python | en | code | 241 | github-code | 13 |
32376721773 | from api.models.crud import check_id_exists_in_table, insert_tuple_on_open_stocks_table, retrieve_tuple_from_id, delete_tuple_from_table_by_id, update_tuples
from api.database.db_connection import OPEN_STOCKS_TABLE
from api.models.open_stocks.valid_ibovespa_symbols import check_if_symbol_is_valid
class OpenStock:
def __init__(self, id=None, stock=None, date=None, price=None, portfolio=None, user_id=None):
self.id = id
self.stock = stock
self.date = date
self.price = price
self.portfolio = portfolio
self.user_id = user_id
self.valid = self._validation()
self.message = None
def __repr__(self):
return f'{self.stock} {self.portfolio}'
def _validate_id(self, table):
if self.id and (type(self.id) == int):
if check_id_exists_in_table(self.id, table):
return True
else:
self._set_message('Id not found.')
else:
self._set_message('Invalid id.')
return False
def _validate_stock(self):
if self.stock and (type(self.stock) == str) and (len(self.stock) <= 8):
self.stock = self.stock.upper()
if check_if_symbol_is_valid(self.stock):
return True
else:
self._set_message('Invalid stock.')
return False
def _validate_date(self):
if self.date:
return True
else:
self._set_message('Invalid date.')
return False
def _validate_price(self):
if self.price and (type(self.price) == float):
return True
else:
self._set_message('Invalid price.')
return False
def _validate_portfolio(self):
if self.portfolio and (type(self.portfolio) == str) and (len(self.portfolio) <= 30):
return True
else:
self._set_message('Invalid portfolio.')
return False
def _validate_user_id(self):
if self.user_id and (type(self.user_id) == int):
return True
else:
self._set_message('Invalid user id....')
return False
def _validation(self):
if self._validate_id(OPEN_STOCKS_TABLE) and self._validate_stock() and self._validate_date() and self._validate_price() \
and self._validate_portfolio() and self._validate_user_id():
return True
else:
return False
def _set_message(self, message):
self.message = message
def _set_id(self, id):
self.id = id
def json_data(self):
return {'Id': self.id, 'Stock': self.stock, 'Date': str(self.date), 'Price': self.price,
'Portfolio': self.portfolio, 'User id': self.user_id}
class NewOpenStock(OpenStock):
def __init__(self, stock=None, date=None, price=None, portfolio=None, user_id=None):
super().__init__()
self.stock = stock
self.date = date
self.price = price
self.portfolio = portfolio
self.user_id = user_id
self.valid = self._validation()
self.crud = self._crud()
def _validation(self):
if self._validate_stock() and self._validate_date() and self._validate_price() and self._validate_portfolio() \
and self._validate_user_id():
return True
else:
return False
def _crud(self):
if self.valid:
insert_success, id = insert_tuple_on_open_stocks_table(self)
self._set_id(id)
if insert_success:
return True
else:
return False
else:
return False
class GetOpenStock(OpenStock):
def __init__(self, id=None):
super().__init__()
self.id = id
self.valid = self._validation()
self.crud = self._crud()
def _validation(self):
if self._validate_id(OPEN_STOCKS_TABLE):
return True
else:
self._set_message('Invalid id.')
return False
def _crud(self):
if self.valid:
tuple = retrieve_tuple_from_id(self.id, OPEN_STOCKS_TABLE)
if tuple:
self.stock = tuple[1]
self.date = tuple[2]
self.price = tuple[3]
self.portfolio = tuple[4]
self.user_id = tuple[5]
self._set_message('Ok.')
return True
else:
self._set_message('Error retrieving id.')
return False
else:
return False
class DeleteOpenStock(OpenStock):
def __init__(self, id=None):
super().__init__()
self.id = id
self.valid = self._validation()
self.crud = self._crud()
def _validation(self):
if self._validate_id(OPEN_STOCKS_TABLE):
return True
else:
self._set_message('Invalid id.')
def _crud(self):
if self.valid:
if delete_tuple_from_table_by_id(self.id, OPEN_STOCKS_TABLE):
self._set_message(f'Stock {self.id} deleted.')
return True
else:
return False
else:
return False
class EditOpenStock(OpenStock):
def __init__(self, args):
super().__init__()
self.id = args.id
self.stock = args.stock
self.date = args.date
self.price = args.price
self.portfolio = args.portfolio
self.user_id = args.user_id
self.valid = self._validation()
self.crud = self._crud()
def _validation(self):
if self._validate_id(OPEN_STOCKS_TABLE):
return True
else:
self._set_message('Invalid id.')
def _crud(self):
if self.id and self._validate_id:
if self.stock or self.date or self.price or self.portfolio:
results_list = []
if self.stock and self._validate_stock():
updated_stock = update_tuples(OPEN_STOCKS_TABLE, 'STOCK', self.id, self.stock)
if updated_stock:
results_list.append(True)
else:
results_list.append(False)
if self.date and self._validate_date():
updated_stock = update_tuples(OPEN_STOCKS_TABLE, 'DATE', self.id, self.date)
if updated_stock:
results_list.append(True)
else:
results_list.append(False)
if self.price and self._validate_price():
updated_stock = update_tuples(OPEN_STOCKS_TABLE, 'PRICE', self.id, self.price)
if updated_stock:
results_list.append(True)
else:
results_list.append(False)
if self.portfolio and self._validate_portfolio():
updated_stock = update_tuples(OPEN_STOCKS_TABLE, 'PORTFOLIO', self.id, self.portfolio)
if updated_stock:
results_list.append(True)
else:
results_list.append(False)
if all(i for i in results_list):
return True
else:
self._set_message('Fail')
return False
else:
self._set_message('No update parameters provided.')
else:
return False
| Gui-Luz/CarteiraAppApi | api/models/open_stocks/open_stocks.py | open_stocks.py | py | 7,577 | python | en | code | 0 | github-code | 13 |
17091595794 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.WeatherInfo import WeatherInfo
class AnttechBlockchainDefinDataserviceWeatherinfosQueryResponse(AlipayResponse):
def __init__(self):
super(AnttechBlockchainDefinDataserviceWeatherinfosQueryResponse, self).__init__()
self._weather_infos = None
@property
def weather_infos(self):
return self._weather_infos
@weather_infos.setter
def weather_infos(self, value):
if isinstance(value, list):
self._weather_infos = list()
for i in value:
if isinstance(i, WeatherInfo):
self._weather_infos.append(i)
else:
self._weather_infos.append(WeatherInfo.from_alipay_dict(i))
def parse_response_content(self, response_content):
response = super(AnttechBlockchainDefinDataserviceWeatherinfosQueryResponse, self).parse_response_content(response_content)
if 'weather_infos' in response:
self.weather_infos = response['weather_infos']
| alipay/alipay-sdk-python-all | alipay/aop/api/response/AnttechBlockchainDefinDataserviceWeatherinfosQueryResponse.py | AnttechBlockchainDefinDataserviceWeatherinfosQueryResponse.py | py | 1,152 | python | en | code | 241 | github-code | 13 |
72425118418 | import os
import json
from flask import Flask, Response, jsonify, request
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.orm import sessionmaker
from flask_cors import CORS
from flask_socketio import SocketIO, send
from IAModel import IAModel
from PredictedClass import ClassList
from core.definitions import CHECKPOINT_NEW as modelPath, EMAIL_SENDER_CRON_ID, OBJECT_COLOR_DICT
from core.definitions import CLEAN, MASK, GLASSES, FACE_SHIELD, GLASSES_AND_MASK
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger
from src.Video import Video
from src.EmailSender import EmailSender
from src.Cron import Cron
from src.DBHelper import *
from src.DailyReport import DailyReport
from datetime import datetime
from collections import OrderedDict
app = Flask(__name__)
socketIo = SocketIO(app, cors_allowed_origins='*')
app.config["socketIo"] = socketIo
app.config["clients"] = []
app.config['JSON_SORT_KEYS'] = False
# TODO: set cors properly
cors = CORS(app)
clients = []
configFile = os.path.abspath(os.getcwd()) + '/config/config.json'
with open(configFile) as file:
config = json.load(file)
app.config.update(config)
realTimeDetector = IAModel(modelPath)
dbUser = config['database']['username']
dbPassword = config['database']['password']
dbHost = config['database']['host']
dbPort = config['database']['port']
database = config['database']['dbName']
app.config['SQLALCHEMY_DATABASE_URI'] = f'mysql://{dbUser}:{dbPassword}@{dbHost}:{dbPort}/{database}'
db = SQLAlchemy(app)
Session = sessionmaker()
Session.configure(bind=db.engine)
session = Session()
app.app_context().push()
scheduler = BackgroundScheduler()
print('Starting daily report cron')
scheduler.add_job(DailyReport.runSync, 'cron', hour=00, args=[db, session])
scheduler.start()
print('Daily report cron successfully started running everyday at 00h')
@socketIo.on('connect')
def handle_connect():
app.config["clients"].append(request.sid)
@socketIo.on('disconnect')
def handle_disconnect():
app.config["clients"].remove(request.sid)
@app.route('/set_camera', methods=['POST'])
def setCamera():
try:
app.config["deviceId"] = request.json['deviceId']
Video.setNewVideoCapture(app.config["deviceId"])
return jsonify('{"status":"OK"')
except:
return jsonify('{"status":"Error", "message": "Could Not Set Camera"}')
@app.route('/video_feed')
def video():
elementsConfig = json.loads(getConfiguration().get_data().decode("utf-8"))
return Response(Video.getFrame(model=realTimeDetector, elementsConfiguration=elementsConfig, app=app), mimetype = "multipart/x-mixed-replace; boundary=frame")
@app.route('/configuration', methods=['GET'])
def getConfiguration():
objectDetectionConfig = OrderedDict(app.config['objectDetection'])
shouldDisableFaceMask = objectDetectionConfig[MASK] or objectDetectionConfig[GLASSES]
shouldDisableGlassesAndMask = objectDetectionConfig[FACE_SHIELD]
elements = OrderedDict({key: {'elementName': key, 'isChecked': value} for key, value in objectDetectionConfig.items()})
elements[MASK]['isDisabled'] = shouldDisableGlassesAndMask
elements[GLASSES]['isDisabled'] = shouldDisableGlassesAndMask
elements[FACE_SHIELD]['isDisabled'] = shouldDisableFaceMask
elements['soundAlarm'] = app.config['soundAlarm']
return jsonify(elements)
@app.route('/configuration/stats', methods=['GET'])
def getConfigurationStats():
config = OrderedDict()
config['sendEmails'] = app.config['sendEmails']
config['frequency'] = app.config['frequency']
return jsonify(config)
@app.route('/configuration/stats', methods=['POST'])
def setConfigurationStats():
try:
requestData = request.json
app.config['sendEmails'] = requestData['sendEmails']
app.config['frequency'] = requestData['frequency']
return jsonify('{"status":"ok", "message": "Stats Configuration Updated"}')
except:
return jsonify('{"status":"Error", "message": "Error updating Stats Configuration"}')
@app.route('/configuration', methods=['POST'])
def setConfiguration():
try:
requestData = request.json
app.config['soundAlarm'] = requestData['soundAlarm']
del requestData['soundAlarm']
app.config['objectDetection'] = requestData
return jsonify('{"status":"ok", "message": "Configuration Changed"}')
except:
return jsonify('{"status":"Error", "message": "Error on Configuration Change"}')
@app.route('/loadCron', methods=['POST'])
def setCron():
try:
frequency = request.json
print(frequency)
selectedDayOfWeek = Cron.translateDayOfWeek(frequency['propiedadAdicional']) or '*'
selectedDayOfMonth = Cron.calculateDayOfMonth(frequency['propiedadAdicional']) or '*'
cron = Cron(date=datetime.today().strftime("%Y-%m-%d"), day_of_week=selectedDayOfWeek, day=selectedDayOfMonth, hour=frequency['hora'], isDeleted=False)
if scheduler.get_job(EMAIL_SENDER_CRON_ID):
scheduler.remove_job(EMAIL_SENDER_CRON_ID)
scheduler.add_job(EmailSender.triggerEmailSender, 'cron', day=selectedDayOfMonth, day_of_week=selectedDayOfWeek, hour=frequency['hora'], args=[frequency, datetime.today(), db, app], id=EMAIL_SENDER_CRON_ID)
save(session, cron)
app.config["sendEmails"] = "true"
for prop in frequency:
app.config['frequency'][prop] = frequency[prop]
return jsonify('{"status":"ok", "message": "Cron successfully triggered"}')
except:
return jsonify('{"status":"Error", "message": "Error on Cron trigger"}')
@app.route('/removeCron', methods=['GET'])
def removeCron():
try:
scheduler.remove_job(EMAIL_SENDER_CRON_ID)
app.config["sendEmails"] = "false"
return jsonify('{"status":"ok", "message": "Cron successfully removed"}')
except:
return jsonify('{"status":"Error", "message": "Error on Cron remove"}')
@app.route('/statistic/<date>', methods=['GET'])
def getStatistic(date):
try:
statisticsData = getStatisticsByDate(db, date)
jsonObject = {}
for row in statisticsData:
className = row['name']
if className not in jsonObject:
jsonObject[className] = {'x': [], 'y': [], 'name': className, 'color': OBJECT_COLOR_DICT[className]}
jsonObject[className]['x'].append(row['hour'])
jsonObject[className]['y'].append(row['events'])
return jsonify(list(jsonObject.values()))
except:
return jsonify(list())
@app.route('/emails', methods=['GET'])
def getEmails():
try:
return jsonify(list(getAllEmailsAvailables(session, app)))
except:
return jsonify(list())
@app.route('/emails', methods=['POST'])
def saveNewEmail():
try:
currentEmails = list(getAllEmails(session, app))
email = request.json
print(email)
if any(e.email == email for e in currentEmails):
restoreEmail(session, email)
return jsonify('{"status":"ok", "message": "{email} sucessfully restored"}')
else:
emailObject = Email(email)
save(session, emailObject)
return jsonify('{"status":"ok", "message": "{email} sucessfully saved"}')
except:
return jsonify('{"status":"Error", "message": "Error saving {email}"}')
@app.route('/removeEmail', methods=['POST'])
def deleteEmails():
try:
email = request.json
print(email)
# emailObject = Email(email, True)
deleteEmail(session, email)
return jsonify('{"status":"ok", "message": "{email} sucessfully deleted"}')
except:
return jsonify('{"status":"Error", "message": "Error removing {email}"}')
@app.route('/sendStatsNow', methods=['POST'])
def sendStatsNow():
try:
DailyReport.runSync(db, session)
EmailSender.sendEmailNow(datetime.today(), db, app)
return jsonify('{"status":"ok", "message": "Email successfully sent"}')
except:
return jsonify('{"status":"Error", "message": "Error sending Email"}') | lemmau/real-time-detector | flask-back/app.py | app.py | py | 8,180 | python | en | code | 0 | github-code | 13 |
73534348178 | import logging
import json
from flask import request, make_response, jsonify
from flask.views import MethodView
from flask_login import current_user
from burgeon import db
from burgeon.models import Goal, Task
log = logging.getLogger('burgeon.api.task.delete_task_api')
class DeleteTaskAPI(MethodView):
"""
Delete Goal
"""
def delete(self, task_id):
post_data = request.get_json()
task = Task.query.get(task_id)
if task:
try:
db.session.delete(task)
db.session.commit()
responseObject = {
'status': 'success',
'message': 'Task successfully deleted.'
}
return make_response(jsonify(responseObject), 200)
except Exception as e:
log.error('Delete Task failed. Error: {}. Params: {}'.format(e, post_data))
responseObject = {
'status': 'fail',
'message': 'Some error occurred. Please try again.'
}
return make_response(jsonify(responseObject), 401)
else:
responseObject = {
'status': 'fail',
'message': 'Task not found.',
}
return make_response(jsonify(responseObject), 404)
| danielvinson/Burgeon | burgeon-server/burgeon/api/tasks/delete_task_api.py | delete_task_api.py | py | 1,340 | python | en | code | 1 | github-code | 13 |
40332714193 | import os
import re
import csv
import sys
from docutils import nodes
from sphinx.builders import Builder
detect_all = re.compile(r'''
::(?=[^=])| # two :: (but NOT ::=)
:[a-zA-Z][a-zA-Z0-9]+| # :foo
`| # ` (seldom used by itself)
(?<!\.)\.\.[ \t]*\w+: # .. foo: (but NOT ... else:)
''', re.UNICODE | re.VERBOSE).finditer
py3 = sys.version_info >= (3, 0)
class Rule:
def __init__(self, docname, lineno, issue, line):
"""A rule for ignoring issues"""
self.docname = docname # document to which this rule applies
self.lineno = lineno # line number in the original source;
# this rule matches only near that.
# None -> don't care
self.issue = issue # the markup fragment that triggered this rule
self.line = line # text of the container element (single line only)
self.used = False
def __repr__(self):
return '{0.docname},,{0.issue},{0.line}'.format(self)
class dialect(csv.excel):
"""Our dialect: uses only linefeed as newline."""
lineterminator = '\n'
class CheckSuspiciousMarkupBuilder(Builder):
"""
Checks for possibly invalid markup that may leak into the output.
"""
name = 'suspicious'
def init(self):
# create output file
self.log_file_name = os.path.join(self.outdir, 'suspicious.csv')
open(self.log_file_name, 'w').close()
# load database of previously ignored issues
self.load_rules(os.path.join(os.path.dirname(__file__), '..',
'susp-ignored.csv'))
def get_outdated_docs(self):
return self.env.found_docs
def get_target_uri(self, docname, typ=None):
return ''
def prepare_writing(self, docnames):
pass
def write_doc(self, docname, doctree):
# set when any issue is encountered in this document
self.any_issue = False
self.docname = docname
visitor = SuspiciousVisitor(doctree, self)
doctree.walk(visitor)
def finish(self):
unused_rules = [rule for rule in self.rules if not rule.used]
if unused_rules:
self.warn('Found %s/%s unused rules:' %
(len(unused_rules), len(self.rules)))
for rule in unused_rules:
self.info(repr(rule))
return
def check_issue(self, line, lineno, issue):
if not self.is_ignored(line, lineno, issue):
self.report_issue(line, lineno, issue)
def is_ignored(self, line, lineno, issue):
"""Determine whether this issue should be ignored."""
docname = self.docname
for rule in self.rules:
if rule.docname != docname: continue
if rule.issue != issue: continue
# Both lines must match *exactly*. This is rather strict,
# and probably should be improved.
# Doing fuzzy matches with levenshtein distance could work,
# but that means bringing other libraries...
# Ok, relax that requirement: just check if the rule fragment
# is contained in the document line
if rule.line not in line: continue
# Check both line numbers. If they're "near"
# this rule matches. (lineno=None means "don't care")
if (rule.lineno is not None) and \
abs(rule.lineno - lineno) > 5: continue
# if it came this far, the rule matched
rule.used = True
return True
return False
def report_issue(self, text, lineno, issue):
if not self.any_issue: self.info()
self.any_issue = True
self.write_log_entry(lineno, issue, text)
if py3:
self.warn('[%s:%d] "%s" found in "%-.120s"' %
(self.docname, lineno, issue, text))
else:
self.warn('[%s:%d] "%s" found in "%-.120s"' % (
self.docname.encode(sys.getdefaultencoding(),'replace'),
lineno,
issue.encode(sys.getdefaultencoding(),'replace'),
text.strip().encode(sys.getdefaultencoding(),'replace')))
self.app.statuscode = 1
def write_log_entry(self, lineno, issue, text):
if py3:
f = open(self.log_file_name, 'a')
writer = csv.writer(f, dialect)
writer.writerow([self.docname, lineno, issue, text.strip()])
f.close()
else:
f = open(self.log_file_name, 'ab')
writer = csv.writer(f, dialect)
writer.writerow([self.docname.encode('utf-8'),
lineno,
issue.encode('utf-8'),
text.strip().encode('utf-8')])
f.close()
def load_rules(self, filename):
"""Load database of previously ignored issues.
A csv file, with exactly the same format as suspicious.csv
Fields: document name (normalized), line number, issue, surrounding text
"""
self.info("loading ignore rules... ", nonl=1)
self.rules = rules = []
try:
if py3:
f = open(filename, 'r')
else:
f = open(filename, 'rb')
except IOError:
return
for i, row in enumerate(csv.reader(f)):
if len(row) != 4:
raise ValueError(
"wrong format in %s, line %d: %s" % (filename, i+1, row))
docname, lineno, issue, text = row
if lineno:
lineno = int(lineno)
else:
lineno = None
if not py3:
docname = docname.decode('utf-8')
issue = issue.decode('utf-8')
text = text.decode('utf-8')
rule = Rule(docname, lineno, issue, text)
rules.append(rule)
f.close()
self.info('done, %d rules loaded' % len(self.rules))
def get_lineno(node):
"""Obtain line number information for a node."""
lineno = None
while lineno is None and node:
node = node.parent
lineno = node.line
return lineno
def extract_line(text, index):
"""text may be a multiline string; extract
only the line containing the given character index.
>>> extract_line("abc\ndefgh\ni", 6)
>>> 'defgh'
>>> for i in (0, 2, 3, 4, 10):
... print extract_line("abc\ndefgh\ni", i)
abc
abc
abc
defgh
defgh
i
"""
p = text.rfind('\n', 0, index) + 1
q = text.find('\n', index)
if q < 0:
q = len(text)
return text[p:q]
class SuspiciousVisitor(nodes.GenericNodeVisitor):
lastlineno = 0
def __init__(self, document, builder):
nodes.GenericNodeVisitor.__init__(self, document)
self.builder = builder
def default_visit(self, node):
if isinstance(node, (nodes.Text, nodes.image)): # direct text containers
text = node.astext()
# lineno seems to go backwards sometimes (?)
self.lastlineno = lineno = max(get_lineno(node) or 0, self.lastlineno)
seen = set() # don't report the same issue more than only once per line
for match in detect_all(text):
issue = match.group()
line = extract_line(text, match.start())
if (issue, line) not in seen:
self.builder.check_issue(line, lineno, issue)
seen.add((issue, line))
unknown_visit = default_visit
def visit_document(self, node):
self.lastlineno = 0
def visit_comment(self, node):
# ignore comments -- too much false positives.
# (although doing this could miss some errors;
# there were two sections "commented-out" by mistake
# in the Python docs that would not be caught)
raise nodes.SkipNode
| kbengine/kbengine | kbe/src/lib/python/Doc/tools/extensions/suspicious.py | suspicious.py | py | 7,967 | python | en | code | 5,336 | github-code | 13 |
21419996698 | from darkflow.net.build import TFNet
import cv2
import numpy as np
options = {"model": "cfg/yolo.cfg", "load": "bin/yolo.weights", "threshold": 0.1}
tfnet = TFNet(options)
# 動画の読み込み
cap = cv2.VideoCapture("/content/darkflow/sample_movie/デモ.mp4")
# アウトプットの準備
output_file = "/content/darkflow/sample_movie/デモ_output.mp4"
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
fps = int(cap.get(cv2.CAP_PROP_FPS))
size = (
int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
)
vw = cv2.VideoWriter(output_file, fourcc, fps, size)
class_names = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle',
'bus', 'car', 'cat', 'chair', 'cow', 'diningtable',
'dog', 'horse', 'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor']
num_classes = len(class_names)
class_colors = []
for i in range(0, num_classes):
hue = 255*i/num_classes
col = np.zeros((1,1,3)).astype("uint8")
col[0][0][0] = hue
col[0][0][1] = 128
col[0][0][2] = 255
cvcol = cv2.cvtColor(col, cv2.COLOR_HSV2BGR)
col = (int(cvcol[0][0][0]), int(cvcol[0][0][1]), int(cvcol[0][0][2]))
class_colors.append(col)
ret, frame = cap.read()
print(len(frame))
#if len(frame)':
# print('true')
print(ret)
count = cap.get(cv2.CAP_PROP_FRAME_COUNT)
print(count)
print(size)
| kanno0725/201106_kanno | test4.py | test4.py | py | 1,385 | python | en | code | 0 | github-code | 13 |
12732695580 | from django.conf import settings
from django.utils.html import format_html_join
from wagtail.core import hooks
@hooks.register("insert_editor_js")
def editor_js():
js_files = ["js/override_preview.js"]
return format_html_join(
"\n",
'<script src="{0}{1}"></script>',
((settings.STATIC_URL, filename) for filename in js_files),
)
| michael-caktus/headless_wagtail_test | lp_test/wagtail_hooks.py | wagtail_hooks.py | py | 367 | python | en | code | 0 | github-code | 13 |
2993932369 | # -*- coding: utf-8 -*-
import os
from tencentcloud.common import credential
from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
# 导入对应产品模块的client models。
from tencentcloud.soe.v20180724 import soe_client, models
from tencentcloud.common.profile.client_profile import ClientProfile
from tencentcloud.common.profile.http_profile import HttpProfile
try:
# 实例化一个认证对象,入参需要传入腾讯云账户secretId,secretKey
cred = credential.Credential(
os.environ.get("TENCENTCLOUD_SECRET_ID"),
os.environ.get("TENCENTCLOUD_SECRET_KEY"))
# 实例化一个http选项,可选的,没有特殊需求可以跳过。
httpProfile = HttpProfile()
httpProfile.reqMethod = "POST" # post请求(默认为post请求)
httpProfile.reqTimeout = 30 # 请求超时时间,单位为秒(默认60秒)
httpProfile.endpoint = "soe.tencentcloudapi.com" # 指定接入地域域名(默认就近接入)
httpProfile.keepAlive = True
# 实例化一个client选项,可选的,没有特殊需求可以跳过。
clientProfile = ClientProfile()
clientProfile.signMethod = "TC3-HMAC-SHA256" # 指定签名算法(默认为HmacSHA256)
clientProfile.unsignedPayload = True
clientProfile.httpProfile = httpProfile
client = soe_client.SoeClient(cred, "", clientProfile)
req = models.InitOralProcessRequest()
req.SessionId = "stress_test_956938"
req.RefText = "since"
req.WorkMode = 0
req.EvalMode = 1
req.ScoreCoeff = 3.5
resp = client.InitOralProcess(req)
# 输出json格式的字符串回包
print("%s" % resp.to_json_string())
except TencentCloudSDKException as err:
print("%s" % err)
| Leavemaple/voice-recognition-evaluation | examples/soe/v20180903/init_oral_process.py | init_oral_process.py | py | 1,760 | python | en | code | 1 | github-code | 13 |
39132842472 | # 2292 벌집 - 구글링
# 랜덤으로 숫자 N이 주어질 때 1이 있는 벌집 위치에서 N방 까지 거쳐가는 "단계"의 수를 찾기
# 즉, 숫자 N이 벌집에서 몇 겹째에 있는지
# 벌집의 개수가 6의 배수로 증가하면서 규칙적으로 한 겹씩 쌓인다.
# while문을 통해 6의 배수로 숫자 증가시키기 - N에 도달할 때 까지만
n = int(input())
honeycomb = 1 #벌집은 1부터 시작 -> 6의 배수로 증가할 예쩡 ( 1 , 6, 12, 18 ...)
count = 1 #벌집의 겹수를 나타내기 위한 변수
while n > honeycomb : #n이 벌집의 개수보다 클 때 까지 반복
honeycomb += 6 * count
count += 1 #count는 겹으로, 6의 배수가 채워지면 1씩 증가한다.
print(count)
| wndnjs2037/Algorithm | 백준/Bronze/2292. 벌집/벌집.py | 벌집.py | py | 757 | python | ko | code | 0 | github-code | 13 |
71876364497 | #!/usr/bin/env python3
# A program that prompts a user for two operators and operation (plus or minus)
# the program then shows the result.
# The user may enter q to exit the program.
calc1 = 0.0
calc2 = 0.0
operation = ""
# Missing colon at end of 'while' line
while (calc1 != "q"):
print("\nWhat is the first operator? Or, enter q to quit: ")
calc1 = input()
# added to '.upper()' so when 'q' is input it will quit
if calc1.upper() == "Q":
break
calc1 = float(calc1)
print("\nWhat is the second operator? Or, enter q to quit: ")
calc2 = input()
if calc2.lower() == "q":
break
calc2 = float(calc2)
print("Enter an operation to perform on the two operators (+ or -): ")
operation = input()
if operation == "+":
# Placed quotes are \n
print("\n" + str(calc1) + " + " + str(calc2) + " = " + str(calc1 + calc2))
# Added break to end the script
break
# ilef is incorrect syntax. replaced with elif
elif operation == '-':
print("\n" + str(calc1) + " - " + str(calc2) + " = " + str(calc1 - calc2))
# Added break to end the script
break
else:
print("\n Not a valid entry. Restarting...")
| Binkledurg/mycode | broken01/ifixed.py | ifixed.py | py | 1,225 | python | en | code | 0 | github-code | 13 |
22334642425 | #Leetcode 1417. Reformat The String
class Solution:
def reformat(self, s: str) -> str:
result = ""
digits = []
alpha = []
for i in s:
if i.isdigit():
digits.append(i)
else:
alpha.append(i)
if abs(len(alpha)-len(digits))<2:
while digits and alpha:
result += alpha.pop()
result += digits.pop()
if digits:
result = digits[-1] + result
elif alpha:
result = result + alpha[-1]
return result | komalupatil/Leetcode_Solutions | Easy/Reformat The String.py | Reformat The String.py | py | 594 | python | en | code | 1 | github-code | 13 |
71473329617 | import numpy as np
from onnx.reference.ops._op import OpRunUnaryNum
class Hardmax(OpRunUnaryNum):
def _run(self, x, axis=None): # type: ignore
axis = axis or self.axis # type: ignore
x_argmax = np.argmax(x, axis=axis) # type: ignore
y = np.zeros_like(x)
np.put_along_axis(
y, np.expand_dims(x_argmax, axis=axis), 1, axis=axis # type: ignore
)
return (y,)
| onnx/onnx | onnx/reference/ops/op_hardmax.py | op_hardmax.py | py | 426 | python | en | code | 15,924 | github-code | 13 |
16863736563 | from __future__ import division
from __future__ import print_function
from builtins import range
from past.utils import old_div
import sys
import argparse
from mapperlite import MapperLite
import struct
import hashlib
import pysam
import chicago_edge_scores as ces
#import BamTags
from bamtags import BamTags
from chicago_edge_links2 import read_mask_ranges, mask_test
from time import gmtime, strftime
debug=False
def pairs_overlap(x,y):
a=min(x[0],x[1])
b=max(x[0],x[1])
c=min(y[0],y[1])
d=max(y[0],y[1])
if a<=c and c<=b: return True
if a<=d and d<=b: return True
if c<=a and a<=d: return True
if c<=b and b<=d: return True
return False
def segments_intersect(x,y):
if not pairs_overlap(x,y):
raise Exception
a=min(x[0],x[1])
b=max(x[0],x[1])
c=min(y[0],y[1])
d=max(y[0],y[1])
if a<=c and c<=b:
p,q = (c,min(b,d))
if p>q:
print("wtf1",x,y,(p,q))
if a<=d and d<=b:
p,q = (max(a,c),d)
if p>q:
print("wtf2",x,y,(p,q))
if c<=a and a<=d:
p,q = (a,min(b,d))
#wtf3 (2608, 2741) (1500, 3000) (2608, 1500)
if p>q:
print("wtf3",x,y,(p,q))
if c<=b and b<=d:
p,q = (max(a,c),b)
if p>q:
print("wtf4",x,y,(p,q))
#wtf (1694, 3362) (1500, 3000) (1694, 1500)
if p>q:
print("wtf",x,y,(p,q))
raise Exception
return (p,q)
tmsd_debug=False
def tile_mask_score_delta(i,j,binwidth,masked_segments,model,mask_iter_i,mask_iter_j,debug):
if i==j: return(0)
gap=(j-i)*binwidth
left_tile_masked_segments = []
right_tile_masked_segments = []
mii0,mij0=mask_iter_i,mask_iter_j
while mask_iter_i<len(masked_segments) and masked_segments[mask_iter_i][0]<(i+1)*binwidth:
if tmsd_debug: print("tmsd:\tI",mask_iter_i,masked_segments[mask_iter_i],(i*binwidth,(i+1)*binwidth),file=sys.stderr,sep="\t")
if pairs_overlap( masked_segments[mask_iter_i],(i*binwidth,(i+1)*binwidth)) :
a,b = segments_intersect(masked_segments[mask_iter_i],(i*binwidth,(i+1)*binwidth))
if b-a < 0:
print("tmsd:\tI",a,b,mask_iter_i,masked_segments[mask_iter_i],(i*binwidth,(i+1)*binwidth),file=sys.stderr,sep="\t")
raise Exception
left_tile_masked_segments.append( (a,b) )
mask_iter_i+=1
while mask_iter_j<len(masked_segments) and masked_segments[mask_iter_j][0]<(j+1)*binwidth:
if tmsd_debug: print("tmsd:\tJ",mask_iter_j,masked_segments[mask_iter_j],(j*binwidth,(j+1)*binwidth),file=sys.stderr,sep="\t")
if pairs_overlap( masked_segments[mask_iter_j],(j*binwidth,(j+1)*binwidth)) :
a,b = segments_intersect(masked_segments[mask_iter_j],(j*binwidth,(j+1)*binwidth))
if b-a < 0:
print("tmsd:\tJ",a,b,mask_iter_j,masked_segments[mask_iter_j],(j*binwidth,(j+1)*binwidth),file=sys.stderr,sep="\t")
raise Exception
right_tile_masked_segments.append( (a,b) )
mask_iter_j+=1
score_delta = 0.0
for a,b in right_tile_masked_segments:
score_delta += model.n_bar( binwidth, b-a, gap+a-j*binwidth ) - model.n_bar0( binwidth*(b-a) )
for a,b in left_tile_masked_segments:
score_delta += model.n_bar( binwidth, b-a, gap+(i+1)*binwidth-b ) - model.n_bar0( binwidth*(b-a) )
for a,b in left_tile_masked_segments:
for c,d in right_tile_masked_segments:
if tmsd_debug: print("mask pair:",a,b,c,d,b-a,d-c,c-b,file=sys.stderr,sep="\t")
score_delta -= model.n_bar( b-a, d-c, c-b ) - model.n_bar0( (b-a)*(d-c) )
if tmsd_debug:
print("tmsd:",(i*binwidth,(i+1)*binwidth),(j*binwidth,(j+1)*binwidth),score_delta,left_tile_masked_segments,right_tile_masked_segments,(mii0,mij0),masked_segments,file=sys.stderr,sep="\t")
return(score_delta)
def pair2bin2D(pair,binwidth):
a,b=pair
return( int(a/binwidth),int(b/binwidth) )
def make_scaffold_mask(scaffold,mapper,mask):
s_mask = {}
segments={}
slen = mapper.scaffold_length[scaffold]
for seg in mapper.scaffold_contigs[scaffold]:
# print("x:",seg)
ocontig=mapper.contig_ocontig[seg]
for m in mask.get(ocontig,[]):
oscaffold ,z1a,z2a,strand,c1 = mapper.mapCoord(ocontig,m[0],m[1])
if not scaffold==oscaffold: continue
if not z1a: continue
if strand==1:
# segments.append( (z1a,min(slen,z2a)) )
segments[ (z1a,min(slen,z2a)) ] =1
else:
segments[ (max(0,z2a),z1a) ] = 1
# segments.append( (max(0,z2a),z1a) )
# print(ocontig,seg,"\t\t",m,mapper.mapCoord(ocontig,m[0],m[1]))
# for a,b in segments:
# print("\t",a,b,b-a,sep="\t")
segments=list(segments.keys())
segments.sort()
return(segments)
def chicago_pairs(sca,mapper,bamlist,minq=20,mask={}):
for seg in mapper.scaffold_contigs[sca]:
ref="_".join(seg.split("_")[:-1])
for b in bamlist:
for aln in b.fetch(until_eof=True,reference=ref):
if not aln.is_read1: continue
if aln.is_duplicate: continue
if aln.mapq < minq : continue
if BamTags.mate_mapq(aln) < minq : continue
# print("#x",ref,mask.get(ref,[]))
if mask_test(ref,aln.pos,mask) or mask_test(b.getrname(aln.rnext),aln.pnext,mask) : continue
contig = b.getrname(aln.tid) # snam[aln.tid]
ncontig= b.getrname(aln.rnext) if aln.rnext>=0 else -1
scaffold ,z1a,z2a,z3a,c1 = mapper.mapCoord( contig, aln.pos, aln.pos+1 )
nscaffold,z2p,x2p,z3p,c2 = mapper.mapCoord(ncontig, aln.pnext,aln.pnext+1 )
if debug: print(("#x",contig,ncontig,aln.pos,aln.pnext,scaffold,nscaffold,sca,z1a,z2p,ref,mapper.ocontig_contigs.get(ref,[])))
if scaffold==nscaffold and sca==scaffold:
#yield( sc,seg,contig,ncontig,scaffold,z1a,z2a,z3a,nscaffold,z2p,x2p,z3p )
yield( sca,z1a,z2p,c1,c2,seg,contig,ncontig,scaffold,z1a,z2a,z3a,nscaffold,z2p,x2p,z3p,aln.query_name )
hist_sample_interval = 1500
hist_bisize = 1.0
scaffold_end_filter=10000
# Fine grained means for every read
def fine_grained_support(edges,nb,scaffold,pairs,model,buff,debug,t1,t2,gpf,minx,maxx,joins,logfile,binwidth,slen,mask=[],masked_segment_pairs_n_minus_n0=[],raw_hist=False,support_curve=False):
# print("zz:",scaffold,sum([b-a for a,b in mask]),[ (a,b,b-a) for a,b in mask])
curve=[]
if gpf: gpf.write("\n\n\n")
# if True: print("fine",len(edges),debug)
if len(edges)==0: return([],[])
if edges[-1][0]<0 or edges[-1][0] > slen+1:
print("unexpected coordinate {} not in range 0-{}".format(edges[-1][0],slen))
raise Exception
edges.sort()
nb.sort()
rs=0 # running sum for individual reads
n=0
tripped=False # have we reached the threshold where we want to start breaking (by construction support low on edges).
last=0
state=0
stretch_start=0
low_point=0.0
ji=0
gap_scores={}
gap_coverages={}
# slen = mapper.scaffold_length[scaffold]
break_buffer = []
last_trip=0
a=0
b=0
c=0
f=0 # index in masked segment pairs
mask_correction_rs=0.0
next_sample = hist_sample_interval /2
for i in range(len(edges)):
rs+=edges[i][1]
n+=nb[i][1] # number of pairs spanning
x=edges[i][0] # position
while f<len(masked_segment_pairs_n_minus_n0) and masked_segment_pairs_n_minus_n0[f][0]<x:
mask_correction_rs -= masked_segment_pairs_n_minus_n0[f][1]
f +=1
#update iterators that keep track of which masked regions are "in range": a-b are in range and trailing x, b-c are in range and upcoming.
while a<len(mask) and mask[a][1]<x-maxx: a+=1
while b<len(mask) and mask[b][0]<x: b+=1
while c<len(mask) and mask[c][0]<x+maxx: c+=1
# -----------------------------------------
# a bx c
mask_correction = 0.0
# rmi = right mask index
for rmi in range(b,min(c+1,len(mask))):
ma,mb = mask[rmi]
if x>ma: continue
left_limit = max(0,ma-maxx)
if (x-left_limit)<0: continue
mask_correction_delta = model.n_bar( mb-ma, x-left_limit, ma-x ) - model.n_bar0( (mb-ma)*(x-left_limit ) )
mask_correction += mask_correction_delta
if debug: print(x,ma-x,a,b,c,left_limit,"Right",mask_correction_delta,sep="\t")
# left mask index
for lmi in range(a,b):
ma,mb = mask[lmi]
if x<mb:
if ma<x and x<mb:
right_limit = min(slen,mb+maxx)
#left_limit = min(slen,mb-maxx)
left_limit = max(0,ma-maxx)
mask_correction_delta1 = model.n_bar( mb-x, x-left_limit, 0 ) - model.n_bar0( (mb-x)*(x-left_limit) )
try:
mask_correction_delta2 = model.n_bar( x-ma, right_limit-x, mb-x ) - model.n_bar0( (x-ma)*(right_limit-x) )
except Exception as e:
#wtf: scaffold: 7396, x: 1006292, ma: 686903, mb: 1006300, rl: 886903, ll: 486903
print("wtf: scaffold: {scaffold}, x: {x}, ma: {ma}, mb: {mb}, rl: {right_limit}, ll: {left_limit}".format(scaffold=scaffold,x=x,ma=ma,right_limit=right_limit,mb=mb,left_limit=left_limit))
raise e
mask_correction += mask_correction_delta1 + mask_correction_delta2
if debug: print(x,x-mb,a,b,c,left_limit,right_limit,"Spanned",mask_correction_delta1,mask_correction_delta2,sep="\t")
continue
right_limit = min(slen,mb+maxx)
if right_limit - x < 0: continue
mask_correction_delta = model.n_bar( mb-ma, right_limit-x, x-mb ) - model.n_bar0( (mb-ma)*(right_limit-x ) )
mask_correction += mask_correction_delta
if debug: print(x,x-mb,a,b,c,right_limit,"Left",mask_correction_delta,sep="\t")
# take our running sum score and see if we should cut here, fine grained not clipping
try:
score=model.cutScore(slen,x,n,rs,rangeCutoff=maxx,minSep=minx)+mask_correction+mask_correction_rs
score+=mask_correction+mask_correction_rs
if debug: print("finescore:",scaffold,x,a,b,c,len(mask),score,mask_correction ,mask_correction_rs,score+mask_correction+mask_correction_rs,sep="\t")
except Exception as e:
print("Exception computing cutScore for scaffold {} at {}. i={}".format(scaffold,x,i),edges[:10])
#Exception computing cutScore for scaffold 8351 at 12290. i=2 [(11306, -8.997670875606678, 'a', 'a'), (11686, -8.578118407318865, 'a', 'a'), (12290, 8.578118407318865, 'a', 'b'), (12297, 8.997670875606678, 'a', 'b')]
# print(i,edges[i],nb[i],rs,n,x,scaffold,slen,len(edges))
raise e
# print("#xxxxx",x,next_sample,raw_hist)
# print(support_curve)
if support_curve:
curve.append((x,score))
# print("##yield")
# yield (x,score)
if (not raw_hist==False) and x>next_sample:
if min(x,slen-x)>scaffold_end_filter:
next_sample += hist_sample_interval
hist_bin=int(score/hist_bisize)*hist_bisize
raw_hist[ hist_bin ] = raw_hist.get(hist_bin,0)+1
# print("#hist bin",hist_bin,score,raw_hist[ hist_bin ])
# sys.stdout.flush()
# Obsolete? each gap between contigs?
while ji<len(joins) and x>joins[ji][0]:
gap_scores[ji] =score
gap_coverages[ji] =n
ji+=1
if score>t1:
tripped=True
last_trip=x
if tripped and score<t2 and state==0:
stretch_start=x
state=1
low_point =score
low_x = x
# Reached beginning of region for candidate break (state = 0)
if state==1 and score>t2:
if logfile:
break_buffer.append( (scaffold,stretch_start,x,low_x,low_point,slen) )
# print(scaffold,stretch_start,x,slen,low_point,"break")
state=0
if state==1:
if score<low_point:
low_point=score
low_x=x
if debug: print("dd:",scaffold,edges[i][0],score,rs,state,x,stretch_start,score,n,edges[i][2])
if gpf: gpf.write("{}\t{}\t{}\t{}\n".format(edges[i][0],score,rs,n))
last=edges[i][0]
segments = []
for scaffold,stretch_start,x,low_x,low_point,slen in break_buffer:
if x < last_trip:
logfile.write("{} {} {} {} {} {} rawLLR\n".format(scaffold,stretch_start,x,low_x,low_point,slen))
segments.append((scaffold,stretch_start,x,low_x,low_point))
return segments,curve
# for ji in range(len(joins)):
# print("\t".join(map(str,["gapscore:",ji]+list(joins[ji])+[gap_scores.get(ji,0),gap_coverages.get(ji,0)])))
def pairs2support(scaffold,
pairs, # internal to scaffold
model,
slen=0,
masked_segments=[],
mapper=None,
buff=0,
debug=False,
t1=20.0,
t2=5.0,
gpf=False, # gnu plot file handle?
minx=1000,
maxx=1e7,
joins=[],
logfile=False,
binwidth=1000,
nreject=2, # how many rows or colums to toss out
raw_hist=False,
clipped_hist=False,
support_curve=False):
# slen=mapper.scaffold_length[scaffold]
# if debug: print("#masked:",scaffold,len(masked_segments),masked_segments[:10])
# print("#",scaffold,masked_segments,file=logfile,sep="\t")
logfile.flush()
edges=[] # pairs
nb=[] # buffer to keep track of how many pairs cover
tile_scores={}
tile_counts={}
tile_reads={}
maxtile=0
# masked_segments=[]
for p in pairs:
if len(p)>=16: # old style, mostly ignore the additional info now
if p[1]<p[2]:
a,b,c1,c2,w,z=p[1],p[2],p[3],p[4],p[6],p[7]
else:
a,b,c1,c2,w,z=p[2],p[1],p[4],p[3],p[7],p[6]
# masked_segments = p[17]
tid=p[16]
else:
# a,b = p[0],p[1]
# Order the coordinates
a=min( p[0],p[1])
b=max( p[0],p[1])
c1,c2="a","a"
tid="x"
if a>slen or b> slen:
print("how could a read be at x > slen?",a,b,slen)
raise Exception
# Throw away really far and short innies
if abs(b-a) > maxx: continue
if abs(b-a) < minx: continue
# insert size log liklihood
ll=model.lnF(b-a)
if debug: print("pt:",scaffold,a,b,b-a,tid,ll-model.lnFinf)
#For the old-style exact LLR score, size of "buff" is taken off
# to be conservative about support
edges.append( tuple([a+buff , ll,c1,"a"]) )
edges.append( tuple([b-buff +1 , -ll,c2,"b"]) )
nb.append( tuple([a+buff , 1]) )
nb.append( tuple([b-buff +1 ,-1]) )
#For the new-style clipped LLR score add to appropriate tile
tile = pair2bin2D((a,b),binwidth) # tile is a tuple rows, colums
maxtile = max(maxtile,tile[0],tile[1]) # furthest tiles seen for size?
tile_scores[tile] = tile_scores.get(tile,0.0)+ll
tile_counts[tile] = tile_counts.get(tile,0)+1
# for debuggin? should we remove when not debugging?
tile_reads[tile] = tile_reads.get(tile,[]) + [tid]
masked_segment_pairs_n_minus_n0 = []
for i in range(len(masked_segments)):
a,b = masked_segments[i]
for j in range(i+1,len(masked_segments)):
c,d = masked_segments[j] # pair of masked segments: a---b c-----d
if c-b > maxx: continue # gap = c-b
if b-a<0:
print("#wtf?",scaffold,(a,b),(c,d),i,j,file=logfile,sep="\t")
logfile.flush()
raise Exception
if d-c<0:
print("#wtf?",scaffold,(a,b),(c,d),i,j,file=logfile,sep="\t")
logfile.flush()
raise Exception
if c-b<0:
print("#wtf?",scaffold,(a,b),(c,d),i,j,file=logfile,sep="\t")
logfile.flush()
raise Exception
n_bar = ces.model.n_bar(b-a,d-c,c-b)
n_bar0= ces.model.n_bar0((b-a)*(d-c))
if debug: print("X",i,j,(a,b),(c,d),b-a,d-c,c-b,n_bar,n_bar0,sep="\t")
masked_segment_pairs_n_minus_n0.append( (b, (n_bar-n_bar0)) )
masked_segment_pairs_n_minus_n0.append( (c,-(n_bar-n_bar0)) )
masked_segment_pairs_n_minus_n0.sort()
fine_grain_segments,fine_grain_support_curve=fine_grained_support(edges,nb,scaffold,pairs,model,buff,debug,t1,t2,gpf,minx,maxx,joins,logfile,binwidth,slen,masked_segments,masked_segment_pairs_n_minus_n0=masked_segment_pairs_n_minus_n0,raw_hist=raw_hist,support_curve=support_curve)
if debug:
print("w:",scaffold,slen,len(fine_grain_segments),len(edges),sep="\t")
for a,b in masked_segments:
print(a,b)
if a>slen:
print("a>slen",a,slen)
raise Exception
if b>slen:
print("b>slen",b,slen)
raise Exception
# clipped LLR score:
tile_bumps=[]
# print("#maxtile=",maxtile,strftime("%Y-%m-%d %H:%M:%S"),file=logfile,sep="\t")
# logfile.flush()
mask_iter_i=0
for i in range(maxtile+1):
# print("#i=",i,strftime("%Y-%m-%d %H:%M:%S"),file=logfile,sep="\t")
# logfile.flush()
while mask_iter_i<len(masked_segments) and masked_segments[mask_iter_i][1]<i*binwidth: mask_iter_i+=1
j=i
mask_iter_j=mask_iter_i
while ((j-i)*binwidth<maxx) and j<=maxtile:
tile=(i,j)
# print("#tile=",(i,j),strftime("%Y-%m-%d %H:%M:%S"),file=logfile,sep="\t")
# logfile.flush()
tscore = tile_scores.get(tile,0.0)
score = model.tileScore(binwidth,tile,tile_counts.get(tile,0),tscore,rangeCutoff=maxx,minSep=minx)
# print("#score=",score,masked_segments,i,j,mask_iter_i,mask_iter_j,strftime("%Y-%m-%d %H:%M:%S"),file=logfile,sep="\t")
# logfile.flush()
mask_score_delta = tile_mask_score_delta(i,j,binwidth,masked_segments,model,mask_iter_i,mask_iter_j,debug)
score+= mask_score_delta
# print("#mask_score_delta=",mask_score_delta,strftime("%Y-%m-%d %H:%M:%S"),file=logfile,sep="\t")
# logfile.flush()
if debug:
print("tile:",scaffold,tile[0],tile[1],tile[0]*binwidth,tile[1]*binwidth,tscore,tile_counts.get(tile,0.0),score,mask_score_delta,sep="\t")
for read in tile_reads.get(tile,[]):
print("tileread:",tile,read)
if not i==j:
tile_bumps.append( (i*binwidth,i,j, score, 1) )
tile_bumps.append( (j*binwidth,i,j,-score,-1) )
j+=1
while mask_iter_j<len(masked_segments) and masked_segments[mask_iter_j][1]<j*binwidth: mask_iter_j+=1
print("#done making tile bumps. len(tile_bumps)=",len(tile_bumps),strftime("%Y-%m-%d %H:%M:%S"),file=logfile,sep="\t")
logfile.flush()
if debug:
for tile in tile_scores.keys():
print("tile:",scaffold,tile[0],tile[1],tile[0]*binwidth,tile[1]*binwidth,tile_scores[tile],tile_counts[tile],model.tileScore(binwidth,tile,tile_counts[tile],tile_scores[tile],rangeCutoff=maxx,minSep=minx),sep="\t")
for read in tile_reads[tile]:
print("tileread:",tile,read)
tile_bumps.sort()
print("#done sorting tile bumps",strftime("%Y-%m-%d %H:%M:%S"),file=logfile,sep="\t")
logfile.flush()
tripped=False
row_sums={}
col_sums={}
row_counts={}
col_counts={}
break_buffer = []
stretch_start=0
minx=0
state=0
low_point=0
ii=0
last_trip=0
while ii<len(tile_bumps):
x,i,j,scoreD,dn = tile_bumps[ii]
row_sums[i] = row_sums.get(i,0.0) + scoreD
col_sums[j] = col_sums.get(j,0.0) + scoreD
row_counts[i] = row_counts.get(i,0) + dn
col_counts[j] = col_counts.get(j,0) + dn
if dn==-1 and row_counts[i]==0: del row_sums[i]
if dn==-1 and col_counts[j]==0: del col_sums[j]
if ii%100000==0:
print("#progress: ii= {} / {}".format(ii,len(tile_bumps)),x,i,j,scoreD,strftime("%Y-%m-%d %H:%M:%S"),file=logfile,sep="\t")
logfile.flush()
while ii<len(tile_bumps)-1 and tile_bumps[ii+1][0]==x:
ii+=1
x,i,j,scoreD,dn = tile_bumps[ii]
row_sums[i] = row_sums.get(i,0.0) + scoreD
col_sums[j] = col_sums.get(j,0.0) + scoreD
row_counts[i] = row_counts.get(i,0) + dn
col_counts[j] = col_counts.get(j,0) + dn
if dn==-1 and row_counts[i]==0: del row_sums[i]
if dn==-1 and col_counts[j]==0: del col_sums[j]
total = sum(row_sums.values())
row_vals = list(row_sums.values())
col_vals = list(col_sums.values())
row_vals.sort()
col_vals.sort()
without_best_row = sum(row_vals[:-nreject]) #total - max(row_sums.values())
without_best_col = sum(col_vals[:-nreject]) #total - max(col_sums.values())
trimmed_total = min(without_best_row,without_best_col,total)
score=trimmed_total
if (not clipped_hist==False):
if min(x,slen-x)>scaffold_end_filter:
hist_bin=int(score/hist_bisize)*hist_bisize
clipped_hist[ hist_bin ] = clipped_hist.get(hist_bin,0)+1
if score>t1:
tripped=True
last_trip=x
if tripped and score<t2 and state==0:
stretch_start=x
state=1
low_point =score
minx = x
if debug: print("trimmed_support",x,trimmed_total,total,without_best_row,without_best_col,tripped,maxtile*binwidth,scaffold,tripped,score<t2,state,low_point,minx,stretch_start,last_trip)
if state==1 and score>t2:
if logfile:
break_buffer.append( (scaffold,stretch_start,x,minx,low_point,slen) )
# print(scaffold,stretch_start,x,slen,low_point,"break")
state=0
if state==1:
if score<low_point:
low_point=score
minx=x
ii+=1
print("#done building breaks buffer",strftime("%Y-%m-%d %H:%M:%S"),file=logfile,sep="\t")
logfile.flush()
if state==1:
if logfile:
break_buffer.append( (scaffold,stretch_start,x,minx,low_point,slen) )
for scaffold,stretch_start,x,minx,low_point,slen in break_buffer:
if (x < last_trip) or (minx < last_trip):
min_fine_graph_segment_overlap_score=False
for fg_scaffold,fg_stretch_start,fg_x,fg_minx,fg_low_point in fine_grain_segments:
if pairs_overlap((fg_stretch_start,fg_x),(stretch_start,x)):
if (not min_fine_graph_segment_overlap_score) or (min_fine_graph_segment_overlap_score > fg_low_point):
min_fine_graph_segment_overlap_score = fg_low_point
logfile.write("{} {} {} {} {} {} {} clippedLLR\n".format(scaffold,stretch_start,x,minx,low_point,slen,min_fine_graph_segment_overlap_score))
return fine_grain_support_curve
if __name__=="__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-d','--debug',default=False,action="store_true",help="Turn on debugging ouput")
parser.add_argument('-p','--progress',default=False,action="store_true",help="Print progress info")
parser.add_argument('-l','--layout',default=False,help="File containing scaffolding layout.")
parser.add_argument('-s','--segments',default=False,help="File containing scaffolding segments.")
# parser.add_argument('-L','--length',default=False,help="File containing lenghts.")
parser.add_argument('-m','--mask',default=False,help="File containing segments to mask.")
parser.add_argument('-g','--plotfiles',default=False,help="Plot file name prefix.")
parser.add_argument('-f','--logfile',default=False,help="Output file for storing score segments.")
parser.add_argument('-q','--mapq',type=int,default=55)
parser.add_argument('-t','--t1',type=float,default=50.0,help="Don't break in the trailing regions at the end of scaffolds where support never exceeds this number.")
parser.add_argument('-T','--t2',type=float,default=25.0,help="Report all segments where support dips below this threshold, if they are not in the trailing ends.")
parser.add_argument('-w','--binwidth',type=float,default=3000.0)
parser.add_argument('-n','--nreject',type=int,default=2)
parser.add_argument('-c','--my_chunk',type=int,default=1)
parser.add_argument('-C','--nchunks',type=int,default=32)
parser.add_argument('--minx',type=int,default=1000, help=" ")
parser.add_argument('--maxx',type=int,default=200000,help=" ")
parser.add_argument('-S','--scaffold',default=False)
# parser.add_argument('-b','--bamfiles',required=True)
parser.add_argument('-b','--bamfile',action="append")
parser.add_argument('-M','--model')
args = parser.parse_args()
if args.progress: print("#",args)
fmodel=open( args.model )
contents = fmodel.read()
try:
fit_params=eval(contents)
except:
print("couldn't set model parameters", args.model)
fmodel.close
ces.set_exp_insert_size_dist_fit_params(fit_params)
if args.mask:
mask_ranges = read_mask_ranges( open(args.mask) )
else:
mask_ranges={}
#
# Read in a hirise layout (from "^p:" lines)
#
mapper = MapperLite()
mapper.load_layout(open(args.layout))
if args.scaffold:
my_scaffolds={args.scaffold:1}
else:
my_scaffolds={}
scaffold_hashes={}
for s in list(mapper.scaffolds.keys()):
scaffold_hashes[s]=struct.unpack("<L", hashlib.md5(s.encode("utf-8")).digest()[:4])[0]%args.nchunks
if scaffold_hashes[s]==args.my_chunk:
my_scaffolds[s]=1
if args.debug: print("my scaffold:",s)
bamlist = [ pysam.Samfile(bamfile,"rb") for bamfile in args.bamfile ]
segment_logfile=False
if args.logfile: segment_logfile = open(args.logfile,"wt")
for sc in sorted(my_scaffolds.keys()):
ncontigs=len(mapper.scaffold_contigs[sc])
slen=mapper.scaffold_length[sc]
if not ncontigs>1: continue
print("sc:",sc,ncontigs,slen)
# print(sc,mapper.scaffold_contigs[sc])
fp=False
contigs = sorted(mapper.scaffold_contigs[sc],key=lambda x: mapper.contigx[x])
ii=0
gap_locations=[]
for i in range(len(contigs)-1):
con = contigs[i]
x= max(mapper.contigx[con]+mapper.contig_strand[con]*mapper.contig_length[con],mapper.contigx[con])
con2 = contigs[i+1]
y= min(mapper.contigx[con2]+mapper.contig_strand[con2]*mapper.contig_length[con2],mapper.contigx[con2])
gap_locations.append((int((x+y)/2),con,con2))
if args.plotfiles:
fn="{}{}".format(args.plotfiles,sc)
fp=open(fn,"wt")
print("#plot \"{}\" i 2 u 1:2 w steps, \"\" i 1 u 1:($2/20) lt 3 pt 5 ps 0.7, \"\" i 0 u 1:(($2-5)*100) w steps lt 3, -500 lt 3".format(fn))
ii=0
fp.write("0\t0\n")
for con in contigs:
ii+=1
ii=ii%2
if args.debug: print(con,mapper.contigx[con],mapper.contig_strand[con],mapper.contig_length[con],slen)
if mapper.contig_strand[con]==1:
fp.write("{}\t{}\n".format( mapper.contigx[con],2*ii-1 ))
fp.write("{}\t{}\n".format( mapper.contigx[con]+mapper.contig_length[con],0 ))
else:
fp.write("{}\t{}\n".format( mapper.contigx[con]-mapper.contig_length[con],2*ii-1 ))
fp.write("{}\t{}\n".format( mapper.contigx[con],0 ))
fp.write("\n\n\n")
masked_segments = make_scaffold_mask(sc,mapper,mask_ranges)
pairs2support(sc,chicago_pairs(sc,mapper,bamlist,minq=args.mapq,mask=mask_ranges),ces.model,masked_segments=masked_segments,slen=mapper.scaffold_length[sc],buff=1,debug=args.debug,gpf=fp,joins=gap_locations,minx=args.minx,maxx=args.maxx,logfile=segment_logfile,t1=args.t1,t2=args.t2,binwidth=args.binwidth,nreject=args.nreject)
if fp: fp.close()
| DovetailGenomics/HiRise_July2015_GR | scripts/chicago_support_bootstrap.py | chicago_support_bootstrap.py | py | 30,342 | python | en | code | 28 | github-code | 13 |
71808371858 | from vb2py.vbfunctions import *
from vb2py.vbdebug import *
from vb2py.vbconstants import *
import ExcelAPI.XLW_Workbook as P01
import proggen.M02_Public as M02
#import proggen.M02_global_variables as M02GV
#import proggen.M03_Dialog as M03
#import proggen.M06_Write_Header as M06
#import proggen.M06_Write_Header_LED2Var as M06LED
#import proggen.M06_Write_Header_Sound as M06Sound
#import proggen.M06_Write_Header_SW as M06SW
#import proggen.M07_COM_Port as M07
#import proggen.M08_ARDUINO as M08
import proggen.M09_Language as M09
#import proggen.M09_Select_Macro as M09SM
#import proggen.M09_SelectMacro_Treeview as M09SMT
#import proggen.M10_Par_Description as M10
#import proggen.M20_PageEvents_a_Functions as M20
import proggen.M25_Columns as M25
#import proggen.M27_Sheet_Icons as M27
#import proggen.M28_divers as M28
import proggen.M30_Tools as M30
#import proggen.M31_Sound as M31
#import proggen.M37_Inst_Libraries as M37
#import proggen.M60_CheckColors as M60
#import proggen.M70_Exp_Libraries as M70
import proggen.M80_Create_Mulitplexer as M80
import mlpyproggen.Prog_Generator as PG
""" https://wellsr.com/vba/2019/excel/vba-playsound-to-play-system-sounds-and-wav-files/
# VB2PY (CheckDirective) VB directive took path 1 on VBA7
-------------------------------------------------------------------------
"""
__SND_SYNC = 0x0
__SND_ASYNC = 0x1
__SND_NODEFAULT = 0x2
__SND_NOSTOP = 0x10
__SND_ALIAS = 0x10000
__SND_FILENAME = 0x20000
# VB2PY (UntranslatedCode) Argument Passing Semantics / Decorators not supported: ThisSound='Beep' - ByVal
# VB2PY (UntranslatedCode) Argument Passing Semantics / Decorators not supported: ThisValue=VBMissingArgument - ByVal
# VB2PY (UntranslatedCode) Argument Passing Semantics / Decorators not supported: ThisCount=1 - ByVal
# VB2PY (UntranslatedCode) Argument Passing Semantics / Decorators not supported: Wait=False - ByVal
def BeepThis2(ThisSound='Beep', ThisValue=VBMissingArgument, ThisCount=1, Wait=False):
return #*HL
fn_return_value = None
sPath = String()
flags = int()
sMedia = '\\Media\\'
if IsMissing(ThisValue):
ThisValue = ThisSound
fn_return_value = ThisValue
if ThisCount > 1:
Wait = True
flags = __SND_ALIAS
sPath = StrConv(ThisSound, vbProperCase)
if (sPath == 'Beep'):
Beep()
return fn_return_value
elif (sPath == 'Asterisk') or (sPath == 'Exclamation') or (sPath == 'Hand') or (sPath == 'Notification') or (sPath == 'Question'):
sPath = 'System' + sPath
elif (sPath == 'Connect') or (sPath == 'Disconnect') or (sPath == 'Fail'):
sPath = 'Device' + sPath
elif (sPath == 'Mail') or (sPath == 'Reminder'):
sPath = 'Notification.' + sPath
elif (sPath == 'Text'):
sPath = 'Notification.SMS'
elif (sPath == 'Message'):
sPath = 'Notification.IM'
elif (sPath == 'Fax'):
sPath = 'FaxBeep'
elif (sPath == 'Select'):
sPath = 'CCSelect'
elif (sPath == 'Error'):
sPath = 'AppGPFault'
elif (sPath == 'Close') or (sPath == 'Maximize') or (sPath == 'Minimize') or (sPath == 'Open'):
# ok
pass
elif (sPath == 'Default'):
sPath = '.' + sPath
elif (sPath == 'Chimes') or (sPath == 'Chord') or (sPath == 'Ding') or (sPath == 'Notify') or (sPath == 'Recycle') or (sPath == 'Ringout') or (sPath == 'Tada'):
sPath = Environ('SystemRoot') + sMedia + sPath + '.wav'
flags = __SND_FILENAME
else:
if LCase(Right(ThisSound, 4)) != '.wav':
ThisSound = ThisSound + '.wav'
sPath = ThisSound
if Dir(sPath) == '':
sPath = P01.ActiveWorkbook.Path + '/' + ThisSound
if Dir(sPath) == '':
sPath = Environ('SystemRoot') + sMedia + ThisSound
flags = __SND_FILENAME
flags = flags + IIf(Wait, __SND_SYNC, __SND_ASYNC)
while ThisCount > 0:
PlaySound(sPath, 0, flags)
ThisCount = ThisCount - 1
return fn_return_value
# VB2PY (UntranslatedCode) Argument Passing Semantics / Decorators not supported: ThisSound='Beep' - ByVal
# VB2PY (UntranslatedCode) Argument Passing Semantics / Decorators not supported: ThisValue=VBMissingArgument - ByVal
def BeepThis1(ThisSound='Beep', ThisValue=VBMissingArgument):
fn_return_value = None
#-------------------------------------------------------------------------
if IsMissing(ThisValue):
ThisValue = ThisSound
fn_return_value = ThisValue
Beep()
return fn_return_value
def __Test_BeepThis1():
#BeepThis2 "Default"
#BeepThis2 "Asterisk"
#BeepThis2 "Fax"
BeepThis2('Windows Information Bar.wav', VBGetMissingArgument(BeepThis2(), 1), VBGetMissingArgument(BeepThis2(), 2), True)
# VB2PY (UntranslatedCode) Option Explicit
# VB2PY (UntranslatedCode) Public Declare PtrSafe Function PlaySound Lib "winmm.dll" Alias "PlaySoundA" (ByVal lpszName As String, ByVal hModule As LongPtr, ByVal dwFlags As Long) As Long
| haroldlinke/pyMobaLedLib | python/proggen/M31_Sound.py | M31_Sound.py | py | 4,975 | python | en | code | 3 | github-code | 13 |
72144182737 | n = int(input('Digite um n > 0: '))
soma_p = 0
if n > 0:
for i in range(1, n+1):
i = i ** 2
soma_p += i
print(soma_p)
else:
print('input inválido!') | LogLucasRocha/UFABC | BCC/Estrutura de Repetição for/Exercicios Teoricos/SomaP.py | SomaP.py | py | 178 | python | pt | code | 0 | github-code | 13 |
72941944018 | def FindAngle (hour, minute):
degreePerMin = 360/60
degreeOfMin = degreePerMin * minute
#degree/x * x/hour = degree/hour
degreePerHour = 360/12
degreeOfHour = degreePerHour * (hour + minute/60)
# 30 * (3+0/60) = 90
# 0-90=90, 360-90-0=270
return min(abs(degreeOfMin-degreeOfHour), 360-abs(degreeOfHour-degreeOfMin))
print(FindAngle(12,45)) | isabellakqq/Alogorithm | math/Angle.py | Angle.py | py | 404 | python | en | code | 2 | github-code | 13 |
24129383424 | # Instructions :
# Create a class to handle paginated content in a website. A pagination is used to divide long lists of content in a series of pages.
# The Pagination class will accept 2 parameters:
# items (default: []): A list of contents to paginate.
# pageSize (default: 10): The amount of items to show in each page.
# So for example we could initialize our pagination like this:
# alphabetList = "abcdefghijklmnopqrstuvwxyz".split('')
# p = Pagination(alphabetList, 4)
# The Pagination class will have a few methods:
# getVisibleItems() : returns a list of items visible depending on the pageSize
# So for example we could use this method like this:
# p.getVisibleItems()
# # ["a", "b", "c", "d"]
# You will have to implement various methods to go through the pages such as:
# prevPage()
# nextPage()
# firstPage()
# lastPage()
# goToPage(pageNum)
# Here’s a continuation of the example above using nextPage and lastPage:
# alphabetList = "abcdefghijklmnopqrstuvwxyz".split('')
# p = Pagination(alphabetList, 4)
# p.getVisibleItems()
# # ["a", "b", "c", "d"]
# p.nextPage()
# p.getVisibleItems()
# # ["e", "f", "g", "h"]
# p.lastPage()
# p.getVisibleItems()
# # ["y", "z"]
# Notes
# The second argument (pageSize) could be a float, in that case just convert it to an int (this is also the case for the goToPage method)
# The methods used to change page should be chainable, so you can call them one after the other like this: p.nextPage().nextPage()
# Please set the p.totalPages and p.currentPage attributes to the appropriate number as there cannot be a page 0.
# If a page is outside of the totalPages attribute, then the goToPage method should go to the closest page to the number provided
# (e.g. there are only 5 total pages, but p.goToPage(10) is given: the p.currentPage should be set to 5; if 0 or a negative number is given, p.currentPage should be set to 1).
class Pagination():
def __init__(self, items = [], pagesize = 10):
self.items = items
self.pagesize = int(pagesize)
self.pages = {} # extract all the code to function like self.pages = create_pages()
pageslice = slice(int(pagesize))
i=1
while len(self.items) > 0:
self.pages[i] = items[pageslice]
del items[0:int(pagesize)]
i += 1
self.currentPage = 1
self.numberOfPages = len(self.pages)
def getVisibleItems(self):
print(self.pages[self.currentPage])
return self
def prevPage(self):
self.currentPage -= 1
if self.currentPage < 1:
self.currentPage = 1
return self
def nextPage(self):
self.currentPage += 1
if self.currentPage > self.numberOfPages:
self.currentPage = self.numberOfPages
return self
def firstPage(self):
self.currentPage = 1
return self
def lastPage(self):
self.currentPage = self.numberOfPages
return self
def goToPage(self, number):
self.currentPage = int(number)
if self.currentPage > self.numberOfPages:
self.currentPage = self.numberOfPages
if self.currentPage < 1:
self.currentPage = 1
return self
alphabetList = [*'abcdefghijklmnopqrstuvwxyz']
p = Pagination(alphabetList, 4.9)
p.getVisibleItems().nextPage().getVisibleItems().prevPage().getVisibleItems().lastPage().getVisibleItems().firstPage().getVisibleItems().goToPage(2.2).getVisibleItems()
# p.nextPage()
# p.getVisibleItems()
# p.prevPage()
# p.getVisibleItems()
# p.lastPage()
# p.getVisibleItems()
# p.firstPage()
# p.getVisibleItems()
# p.goToPage(2.2)
# p.getVisibleItems()
| ydb5755/DI_Bootcamp | Week-8/Day-4/DailyChallenge/Pagination.py | Pagination.py | py | 3,672 | python | en | code | 0 | github-code | 13 |
21616011612 | import json
from bson.dbref import DBRef
from bson import json_util
import sys
import pymongo
import logging
from fuzzywuzzy import process
#Create and configure logger
logging.basicConfig(filename="server.log",
format='%(asctime)s %(message)s',
filemode='a')
#Creating an object
logger=logging.getLogger()
#Setting the threshold of logger to DEBUG
logger.setLevel(logging.DEBUG)
uri = "mongodb://218ffa09-0ee0-4-231-b9ee:zTV4cwDG0vM49J2GFsw72JzwOD79Bv3dPU8fbVLb5pbh3p0CmTBYcvhrFKTjtl1s7hgYSfRbMOrsVve6hfvhag==@218ffa09-0ee0-4-231-b9ee.documents.azure.com:10255/?ssl=true&replicaSet=globaldb"
client = pymongo.MongoClient(uri)
logger.debug("Obtained the client")
mydb = client.test
def read_fromDB(jsonData):
num = int(jsonData['page'])
skips = 10 * (num - 1)
return json.dumps(list(mydb.userInfo.find({"userDeleted":False},{'_id' : 0,'user_id':0}).skip(skips).limit(10)), default=json_util.default)
def read_fromDBSpecfic(jsonData):
logger.debug("This is the JsonData")
logger.debug(jsonData)
allList = list(mydb.userInfo.find({"userDeleted":False},{'name':1}))
newList = [ d["name"] for d in allList]
maxFound = 89
obtainedName = ""
for item in jsonData:
if(len(item) <= 3):
continue
highest = process.extractOne(item,newList)
logger.debug(highest)
if(highest[1] == 100):
maxFound = highest[1]
obtainedName = highest[0]
break
if(highest[1]>maxFound):
maxFound = highest[1]
obtainedName = highest[0]
logger.debug("Obtained Name is")
logger.debug(obtainedName)
logger.debug("Max Ratio found for Obtained Name")
logger.debug(maxFound)
if(maxFound == 89):
logger.debug("Could not find any user")
return json.dumps({},default = json_util.default)
foundUser = dict(mydb.userInfo.find_one({'name':obtainedName,"userDeleted":False},{'_id' : 0,'user_id':0}))
return json.dumps(foundUser,default = json_util.default)
def add_usertoDB(jsonData):
mydb.userInfo.insert({'name':jsonData['name'],'department':jsonData['department'],'building':jsonData['building'],'division':jsonData['division'],'email':jsonData['emailaddress'],'floor':jsonData['floor'],'cubicle':jsonData['cubicle'],"user_id":jsonData["user_id"],"userDeleted":False})
logger.debug("Sucessfully added")
return json.dumps({"status": "Success","statusreason": "addSuccess"})
def delete_userfromDB(jsonData):
founduser = mydb.userInfo.find_one({"userDeleted":False,"name":jsonData["name"]},{"_id":1})
mydb.userInfo.update_many({"_id":founduser["_id"],"user_id":1},{"$set":{"userDeleted":True}})
return json.dumps({"status": "Success","statusreason": "deleteSuccess"})
def update_user(jsonData):
founduser = mydb.userInfo.find_one({"userDeleted":False,"name":jsonData["name"]},{"_id":1})
mydb.userInfo.update_many({"_id":founduser["_id"],"user_id":1},{"$set":{'department':jsonData['department'],'building':jsonData['building'],'division':jsonData['division'],'email':jsonData['emailaddress'],'floor':jsonData['floor'],'cubicle':jsonData['cubicle']}})
newDbref = DBRef("mydb.userInfo",founduser["_id"])
mydb.userMailInfo.update_many({"otherdbref":newDbref,"user_id":1},{"$set":{'userDeleted':True}})
return json.dumps({"status": "Success","statusreason": "updateSucess"})
#Production Level Testing code
def clear_DB():
mydb.userInfo.drop()
mydb.userMailInfo.drop()
return json.dumps({"status": "Success","statusreason": "deleteSuccess"})
#
| SecEveryday/FlaskApp | dbaccesslibUserInfo.py | dbaccesslibUserInfo.py | py | 3,622 | python | en | code | 1 | github-code | 13 |
21690314697 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('aggregateur', '0018_settings_main_settings'),
]
operations = [
migrations.CreateModel(
name='WelcomeText',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),
('content', models.TextField(max_length=1024)),
('date', models.DateTimeField(auto_now_add=True)),
],
options={
'ordering': ['-date'],
},
),
migrations.DeleteModel(
name='Settings',
),
]
| Brachamul/elan-democrate | aggregateur/migrations/0019_auto_20151022_1926.py | 0019_auto_20151022_1926.py | py | 762 | python | en | code | 2 | github-code | 13 |
28447880020 | import csv
import os
import pickle
import time
from collections import defaultdict
import random
import pandas as pd
from scipy import sparse
import numpy as np
import logging
from spacy.cli.init_model import read_vectors
from src.utlis import filter_pair_by_class, batch_item_similarity_matrix
logging.basicConfig(level=logging.DEBUG)
from src.preprocess_ml import create_positive_pair, create_movie_genre_table, create_genre_table, load_data
dir_path = os.path.dirname(os.path.realpath(__file__))
parent_path = os.path.abspath(os.path.join(dir_path, os.pardir))
def load_modification_score_data(args, tag_vec, pid_to_itemId_array, num_modif=200):
if args.data == 'ali':
# For ali data, we use binary value as score, and because all items have tag list, therefore, all items have score
pid_to_tid_score = tag_vec
# pid_with_score_array is all mid
pid_to_pidt_dict = load_pickle(args.data, 'pid_to_pidt_dict')
pid_with_score_array = np.zeros((len(pid_to_pidt_dict, )))
for idx, (k, v) in enumerate(pid_to_pidt_dict.items()):
pid_with_score_array[idx] = v
assert pid_with_score_array.max()+1 == len(pid_with_score_array)
tag_to_tid_dict = load_pickle('ali', 'tag_to_tid_dict')
else:
pid_to_tid_score = load_pickle(args.data, 'pid_to_tid_score')
pid_with_score_array = load_pickle(args.data, 'pid_with_score_array')
tag_to_tid_dict = {}
tag_table_data = load_data(parent_path + '/data/{}/pro_sg/standard_tag_table.csv'.format(args.data))
for data in tag_table_data:
tag_to_tid_dict[data[1]] = int(data[0])
# create add tag item pair and modification array
add_modification_pair = load_pickle(args.data, 'add_modification_pair')
random.Random(args.seed).shuffle(add_modification_pair)
add_modification_pair = np.array(add_modification_pair)[:num_modif] # random test a small part for speedup
print(add_modification_pair.shape)
add_gradient_modification = np.zeros((add_modification_pair[:, 0].shape[0], tag_vec.shape[1]))
add_gradient_modification_idx = list(zip(list(range(add_modification_pair[:, 0].shape[0])),
list(add_modification_pair[:, 1])))
add_gradient_modification[tuple(np.array(add_gradient_modification_idx).T)] = 1
# create remove tag item pair and modification array
remove_modification_pair = load_pickle(args.data, 'remove_modification_pair')
random.Random(args.seed).shuffle(remove_modification_pair)
remove_modification_pair = np.array(remove_modification_pair)[:num_modif] # random test a small part for speedup
print(remove_modification_pair.shape)
remove_gradient_modification = np.zeros((remove_modification_pair[:, 0].shape[0], tag_vec.shape[1]))
add_gradient_modification_idx = list(zip(list(range(remove_modification_pair[:, 0].shape[0])),
list(remove_modification_pair[:, 1])))
remove_gradient_modification[tuple(np.array(add_gradient_modification_idx).T)] = 1
pid_with_score_array = pid_with_score_array.astype(int)
pid_with_score_to_itemId_array = pid_to_itemId_array[pid_with_score_array].astype(int)
assert (pid_with_score_to_itemId_array > 0).all()
modification_to_str = {}
for tag, tid in tag_to_tid_dict.items():
modification_to_str[tid] = tag
return pid_to_tid_score, pid_with_score_array, add_modification_pair, add_gradient_modification, \
remove_modification_pair, remove_gradient_modification, pid_with_score_to_itemId_array, modification_to_str
def new_load_data(args):
print("Loading dataset: {}.".format(args.data))
pid_to_pidt_dict = load_pickle(args.data, 'pid_to_pidt_dict')
tag_vec = load_pickle(args.data, 'tag_vec')
tag_vec = tag_vec.toarray() if args.data == 'ali' else tag_vec
itemId_to_pid_dict = load_pickle(args.data, 'itemId_to_pid_dict')
w2v_slice_array = load_pickle(args.data, 'w2v_slice_array')
tid_over_used_word_matrix = load_pickle(args.data, 'tid_over_used_word_matrix')
num_items = len(itemId_to_pid_dict)
if args.data in ['ml-25m', 'ml-20m']:
genre_dict = create_genre_table(args.data)
category_table = create_movie_genre_table(genre_dict, itemId_to_pid_dict, pid_to_pidt_dict, args.data)
elif args.data == 'ali':
sp_tagged_item_cate_table = load_pickle('ali', 'sp_tagged_item_cate_table')
category_table = sp_tagged_item_cate_table.toarray()
else:
category_table = None
print("{} doesn't exist.".format(args.data))
exit(1)
user_interaction_data = load_interaction_data(parent_path + '/data/{}/pro_sg/user_item_interaction.csv'.format(args.data), num_items, args.data)
tag_distance_vec_full = batch_item_similarity_matrix(tag_vec, batch_size=5000)
test_pairs = load_pickle(args.data, 'test_pair')
tag_distance_vec_full[(test_pairs[:, 0], test_pairs[:, 1])] = -1
print("test pairs num: {}".format(len(test_pairs)))
pidt_to_pid_dict = {}
for k,v in pid_to_pidt_dict.items():
pidt_to_pid_dict[v] = k
pidt_to_pid_array = np.zeros(len(pidt_to_pid_dict))
for pidt,pid in pidt_to_pid_dict.items():
pidt_to_pid_array[pidt] = pid
pid_to_itemId_array = -np.ones(max(list(itemId_to_pid_dict.values()))+1)
for itemId, pid in itemId_to_pid_dict.items():
pid_to_itemId_array[pid] = itemId
positive_array = create_positive_pair(tag_distance_vec_full, level=args.modification_level, type='less_than', include_zero_dist=args.include_zero_dist)
filtered_positive_array = filter_pair_by_class(positive_array, category_table)
# mask
num_midt = len(pid_to_pidt_dict)
num_mid = len(itemId_to_pid_dict)
tail_mid = pidt_to_pid_array[positive_array[:,1]]
head_midt = positive_array[:,0]
train_midt_to_mid_interaction = sparse.csr_matrix((np.ones_like(head_midt), (head_midt, tail_mid)),
dtype='float64', shape=(num_midt, num_mid))
# word2vec
print("Loading word mebedding data...")
word_dict, w2v_data = load_word_embedding(dataset=args.data, debug=False)
print("Finish Data Loading!")
return user_interaction_data, test_pairs, filtered_positive_array, pid_to_pidt_dict, pidt_to_pid_dict, \
w2v_data, tag_vec, category_table, w2v_slice_array, tid_over_used_word_matrix,\
train_midt_to_mid_interaction, pid_to_itemId_array
def load_word_embedding(dataset, debug=False):
if debug:
return None, np.zeros((10000, 300))
else:
if dataset == 'ali':
cn_w2v = parent_path + '/data/sgns.weibo.bigram'
vectors, iw, wi, dim = read_vectors(cn_w2v)
w2v_data_list = []
for k, v in vectors.items():
w2v_data_list.append(v.reshape(1, -1))
data = np.concatenate(w2v_data_list, axis=0)
word_dict = None
else:
lines = open(parent_path + '/data/new_glove.6B.300d.txt').readlines()
data = []
word_dict = {}
for idx, line in enumerate(lines):
tokens = line.strip('\n')
word, vec = tokens.split('\t')
vec_nums = vec.split(' ')
word_dict[word] = idx
temp_ = [float(i) for i in vec_nums]
assert len(temp_) == 300
data.append(temp_)
data = np.array(data)
assert data.shape[1] == 300
print("Loaded data. #shape = " + str(data.shape))
print(" #words = %d " % (len(word_dict)))
return word_dict, data
def load_pickle(dataset_name, var):
with open(parent_path + '/data/' + dataset_name + '/pro_sg/' + var + '.pkl', 'rb') as file:
data = pickle.load(file)
return data
def load_interaction_data(csv_file, n_items, dataset="ml-25m"):
item_str = 'pid'
tp = pd.read_csv(csv_file)
n_users = tp['uid'].max() + 1
rows, cols = tp['uid'], tp[item_str] # user 160775, item = 38715
data = sparse.csr_matrix((np.ones_like(rows), (rows, cols)), dtype='float64', shape=(n_users, n_items))
return data
def compute_sparsity(X):
non_zeros = 1. * np.count_nonzero(X)
total = X.size
sparsity = 100. * (1 - (non_zeros) / total)
return sparsity
def dump_vectors(X, outfile, words):
print("shape", X.shape)
assert len(X) == len(words)
fw = open(outfile, 'w')
for i in range(len(words)):
fw.write(words[i] + " ")
for j in X[i]:
fw.write(str(j) + " ")
fw.write("\n")
fw.close() | haonan3/CGIR | src/dataloader.py | dataloader.py | py | 8,734 | python | en | code | 5 | github-code | 13 |
32719505873 | import pandas as pd
import os
df = pd.read_csv("SF Visulisation/Police_Department_Incidents_-_Previous_Year__2016_.csv")
df = df.groupby(['Category', 'DayOfWeek']).count()
df.reset_index(inplace = True)
df = df[['Category', 'DayOfWeek', 'IncidntNum']]
df.rename(columns = {"IncidntNum": "Count"}, inplace = True)
df.to_csv("SF Visulisation/crime_by_day.csv")
| alanshiau717/FIT3179 | data_wranging/crime_by_day.py | crime_by_day.py | py | 368 | python | en | code | 0 | github-code | 13 |
42652906446 | import os,json
import pickle
from torch.utils.data import Dataset
from torchvision import transforms
import torch
import torch.nn as nn
from PIL import Image
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
import torchvision.models as models
from collections import defaultdict
from utils import *
class VisualSemanticEmbedding(nn.Module):
def __init__(self):
super(VisualSemanticEmbedding, self).__init__()
self.model = models.resnet18(pretrained=True)
def forward(self, img):
# image feature
img = self.model.conv1(img)
img = self.model.relu(self.model.bn1(img))
img = self.model.maxpool(img)
img = self.model.layer1(img)
img = self.model.layer2(img)
img = self.model.layer3(img)
img = self.model.layer4(img)
img = self.model.avgpool(img)
img = img.view(img.size(0),-1)
return img
class MultimodalDataset(Dataset):
'''
Preprocessed Caltech-UCSD Birds 200-2011 and Oxford 102 Category Flowers
datasets, used in ``Learning Deep Representations of Fine-grained Visual
Descriptions``.
Download data from: https://github.com/reedscot/cvpr2016.
Arguments:
data_dir (string): path to directory containing dataset files.
split (string): which data split to load.
'''
def __init__(self, image_dir, captions_json, image_size=(64,64), embed_ndim=1024, image_json=None):
super().__init__()
self.image_dir = image_dir
with open(captions_json, 'r') as f:
captions_data = json.load(f)
self.transform = transforms.Compose([
transforms.ToTensor(),
])
if image_json == None:
self.img_encoder = VisualSemanticEmbedding()
self.image_size = image_size
self.image_ids = []
self.image = {}
for image_data in captions_data['images']:
image_id = image_data['id']
filename = image_data['file_name']
self.image_ids.append(image_id)
image_path = os.path.join(self.image_dir, filename)
with open(image_path, 'rb') as f:
with Image.open(f).resize(self.image_size,Image.BILINEAR) as image:
image = self.transform(image.convert('RGB'))
img = self.img_encoder(image.unsqueeze(0))
img = img.squeeze().detach()
self.image[image_id] = img
with open("./COCO.pkl", "wb") as fp: #Pickling
pickle.dump(self.image, fp, protocol = pickle.HIGHEST_PROTOCOL)
else:
self.image_ids = []
for image_data in captions_data['images']:
image_id = image_data['id']
self.image_ids.append(image_id)
with open(image_json,'rb') as f:
self.image = pickle.load(f)
self.image_id_to_captions = defaultdict(list)
for caption_data in captions_data['annotations']:
image_id = caption_data['image_id']
self.image_id_to_captions[image_id].append(prepare_text(caption_data['caption']))
def __len__(self):
# WARNING: this number is somewhat arbitrary, since we do not
# necessarily use all instances in an epoch
return len(self.image_id_to_captions)
def __getitem__(self, index):
image_id = self.image_ids[index]
cls_txts = self.image_id_to_captions[image_id]
id_txt = torch.randint(len(cls_txts), (1,))
txt = cls_txts[id_txt].squeeze()
img = self.image[image_id].squeeze()
return {'img': img, 'txt': txt}
| jia1995/char-CNN-RNN_pytorch | dataset.py | dataset.py | py | 3,734 | python | en | code | 0 | github-code | 13 |
31501333995 | from control_msgs.msg import JointTrajectoryAction, JointTrajectoryGoal
from trajectory_msgs.msg import JointTrajectoryPoint
import geometry_msgs.msg
import sensor_msgs.msg
import numpy as np
import threading
import rospy
class TopicLogger:
def __init__(self,topic_name,message_type,log_length,subscribe_buffer_length=2):
"""
subscribe_buffer_length not correct
"""
self.log_length = log_length
self.time = np.zeros((self.log_length,),np.uint64)
self.idx = 0
self.done = False
self.unregister_on_finish = True
self.last_message = None
self.stop_time = None
self.stop_time_lock = threading.Lock() #arbitrate access to last_message
self.subscriber = rospy.Subscriber(topic_name, message_type, self.handler, buff_size=subscribe_buffer_length)
self.done_event = threading.Event() #this event remains cleared until logging is done
self.start_event = threading.Event() #this lock remains cleared until logging is started
self.request_stop_event = threading.Event()
self.first_run = True #gets set to False the first time the handler is run
self.stopped_reason = "not stopped"
def block_until_done(self):
while not rospy.is_shutdown() and not self.done_event.is_set():
self.done_event.wait(0.5)
def block_until_start(self):
while not rospy.is_shutdown() and not self.start_event.is_set():
self.start_event.wait(0.5)
def stop_after(self,nsec):
self.stop_time_lock.acquire()
self.stop_time = nsec
self.stop_time_lock.release()
def stop(self):
self.request_stop_event.set()
def handler(self,message):
#this handler logs time, manages unsubscribing, manipulates self.idx and calls another handler that may do other things in subclassed version
if self.done:
if self.unregister_on_finish:
raise AssertionError("handler got called, but should have been unsubscribed from topic")
return
self.last_message = message
self.time[self.idx] = message.header.stamp.to_nsec()
self.handler_specific(message)
self.idx += 1
self.stop_time_lock.acquire()
stop_time = self.stop_time
self.stop_time_lock.release()
is_past_stop_time = (stop_time is not None and stop_time < message.header.stamp.to_nsec())
if self.idx == self.log_length or self.request_stop_event.is_set() or is_past_stop_time or rospy.is_shutdown():
#this block runs exactly once
self.done = True
if self.unregister_on_finish:
self.subscriber.unregister()
self.done_event.set()
if self.idx == self.log_length:
self.stopped_reason = "Log Filled"
elif self.request_stop_event.is_set():
self.stopped_reason = "Got Stop Signal"
elif is_past_stop_time:
self.stopped_reason = "Past Stop Time"
elif rospy_is_shutdown():
self.stopped_reason = "rospy sent shutdown"
return
if self.first_run:
self.start_event.set()
self.first_run = False
def handler_specific(self,message):
pass
class FT_Logger(TopicLogger):
def __init__(self,log_length,subscribe_buffer_length=2):
self.ft = np.zeros((log_length,6),np.float64)
TopicLogger.__init__(self,"/ft/r_gripper_motor", geometry_msgs.msg.WrenchStamped,
log_length=log_length,subscribe_buffer_length=subscribe_buffer_length)
def handler_specific(self,message):
f = message.wrench.force
t = message.wrench.torque
self.ft[self.idx,:] = (f.x,f.y,f.z) + (t.x, t.y, t.z)
class Joint_Logger(TopicLogger):
def __init__(self,log_length,joints):
"""
joints is an iterable of joint names
"""
self.joints = joints
self.pos = np.zeros((log_length,len(self.joints)),np.float64)
TopicLogger.__init__(self,"joint_states",sensor_msgs.msg.JointState,
log_length=log_length,subscribe_buffer_length=2)
def handler_specific(self,message):
for (i,joint) in enumerate(self.joints):
try:
joint_idx = message.name.index(joint) #Shlemiel the painter's algorithm
v = message.position[joint_idx]
except ValueError:
rospy.loginfo("Joint %s missing from log entry %i"%(joint,self.idx) )
v = np.NaN
self.pos[self.idx,i] = v
| baxelrod/pr2_calibrated_ft | scripts/TopicLogger.py | TopicLogger.py | py | 4,794 | python | en | code | 1 | github-code | 13 |
26570386455 | """Chat receiver."""
import traceback
from websockets.exceptions import ConnectionClosedError
from chat import chat_events
from events import DummyEvent
from init import EVENT_QUEUE
from log import LOG
class ChatReceiver(object):
"""Chatreceiver."""
def __init__(self, connection) -> None:
"""Init."""
super(ChatReceiver, self).__init__()
self.connection = connection
async def get_data(self) -> str:
"""Get the data from the server.
Returns:
data (str): Received data from the server.
"""
try:
raw_data = (
(await self.connection.socket.recv()).strip('\r\n').strip()
)
except KeyboardInterrupt:
LOG.info('Exiting.')
self.running = False
raw_data = ''
except ConnectionResetError:
LOG.warning('Connection reset: reconnecting.')
await self.connection._connect()
return
except BrokenPipeError:
LOG.warning('Pipe broken: reconnecting.')
await self.connection._connect()
return
except ConnectionClosedError:
LOG.warning('Connection closed prematurely: reconnecting.')
await self.connection._connect()
return
except Exception as e:
LOG.error(
'Attempt to get data failed failed: {}'.format(
getattr(e, 'message', repr(e)),
)
)
traceback.print_exc()
raw_data = ''
return raw_data
async def run(self) -> None:
"""Main functionality of the bot.
This is run automatically when the thread is started.
"""
LOG.debug('Running chat receiver loop.')
self.running = True
while self.running:
# Get and decode the data sent from the server.
raw_data = await self.get_data()
if not raw_data:
continue
# The raw_data may contain multiple server messages,
# so separate them.
for data in filter(None, raw_data.split('\r\n')):
# Respond to the keep-alive message so the session is not
# terminated.
if 'PING' in data:
LOG.debug('PING <> PONG')
await self.connection.send_server_message(
'PONG :tmi.twitch.tv\n'
)
continue
event_type = await chat_events.get_event_type(data)
# Mapping of chat message type to Event class.
event_mapping = {
'353': chat_events.ExistingUsersEvent,
'366': chat_events.NamesEndEvent,
'ACK': chat_events.AckEvent,
'CAP': chat_events.CapEvent,
'JOIN': chat_events.JoinEvent,
'NOTICE': chat_events.NoticeEvent,
'PART': chat_events.PartEvent,
'PRIVMSG': chat_events.PubmsgEvent,
'ROOMSTATE': chat_events.RoomstateEvent,
'USERNOTICE': chat_events.UsernoticeEvent,
'USERSTATE': chat_events.UserstateEvent,
'WHISPER': chat_events.WhisperEvent,
}
event = await event_mapping.get(event_type, DummyEvent)().init(
data
)
LOG.debug('Adding event to queue')
await EVENT_QUEUE.put(event)
LOG.debug('ChatMessagereceiver no longer running.')
| amorphousWaste/twitch_bot_public | twitch_bot/chat/chat_receiver.py | chat_receiver.py | py | 3,657 | python | en | code | 0 | github-code | 13 |
74150534416 |
import os
import shutil
import pickle
import copy
import torch
import pytorch_lightning as pl
import neurogym as ngym
from pytorch_lightning.callbacks import ModelCheckpoint
from neurogym.wrappers import PassAction, PassReward, Noise
from ttrnn.trainer import Supervised, A2C, MetaA2C
from ttrnn.dataset import NeurogymTrialEnvDataset, NeurogymDataLoader
from ttrnn.tasks.driscoll2022 import Driscoll2022, MemoryPro
from ttrnn.tasks.harlow import HarlowMinimal, Harlow1D, HarlowMinimalDelay, HarlowMinimalRT
from ttrnn.tasks.wrappers import DiscreteToBoxWrapper, RingToBoxWrapper, ParallelEnvs
from ttrnn.callbacks import TaskPerformance, TrajectoryPlot, TaskPlot
import sys
if len(sys.argv) > 1:
seed = int(sys.argv[1])
else:
seed = 0
pl.seed_everything(seed)
rnn_params = {
'input_size': 1 + 11 * 2 + 3 + 1,
'hidden_size': 256,
# 'output_size': 3,
# 'nonlinearity': 'relu',
'bias': True,
'trainable_h0': False,
'batch_first': True,
# 'init_config': {
# 'default': ('normal_', {'mean': 0.0, 'std': 1 / (32 ** 0.5)}),
# },
'noise_config': {'enable': False, 'noise_type': 'normal', 'noise_params': {'mean': 0.0, 'std': 0.05}},
'dt': 10,
'tau': 20,
'trainable_tau': False,
}
# import pdb; pdb.set_trace()
# task = 'PerceptualDecisionMaking-v0'
# task = MemoryPro(
# dim_ring=4,
# dt=20,
# timing={'fixation': 200, 'stimulus': 1000, 'delay': 1000, 'decision': 400},
# )
# task = HarlowMinimal(
# dt=100, obj_dim=5, obj_mode="kb", obj_init="normal", orthogonalize=True,
# # rewards={'fail': -1.0},
# inter_trial_interval=2, num_trials_before_reset=6,
# )
# task = Harlow1D(
# dt=100, obj_dim=1, obj_dist=3, obj_mode="kb", obj_init="randint",
# rewards={'fail': -1.0},
# inter_trial_interval=0, num_trials_before_reset=6,
# )
task = HarlowMinimalDelay(
dt=100,
obj_dim=11,
obj_mode="kb",
obj_init="normal",
orthogonalize=False,
normalize=True,
abort=True,
rewards={'abort': -0.1, 'correct': 1.0, 'fail': 0.0},
timing={'fixation': 200, 'stimulus': 400, 'delay': 200, 'decision': 200},
num_trials_before_reset=6,
r_tmax=-1.0,
)
# task = HarlowMinimalRT(
# dt=100,
# obj_dim=5,
# obj_mode="kb",
# obj_init="normal",
# orthogonalize=True,
# abort=True,
# rewards={'abort': -0.1, 'correct': 1.0, 'fail': -1.0},
# timing={'fixation': 400, 'decision': 1600},
# num_trials_before_reset=6,
# )
# env_kwargs = {'dt': 100, 'timing': {'fixation': 300, 'stimulus': 500, 'delay': 0, 'decision': 300}}
env_kwargs = {}
wrappers = [
(Noise, {'std_noise': 0.1}),
(PassAction, {'one_hot': True}),
(PassReward, {}),
(ParallelEnvs, {'num_envs': 8})
] # [(RingToBoxWrapper, {})] # [(DiscreteToBoxWrapper, {})]
# wrappers = []
ckpt_dir = "/home/fpei2/learning/harlow-rnn-analysis/runs/harlowdelaynotorth_rnn256/"
overwrite = True
if overwrite:
if os.path.exists(ckpt_dir):
shutil.rmtree(ckpt_dir)
loggers = [
pl.loggers.CSVLogger(save_dir=ckpt_dir),
# pl.loggers.WandbLogger(project='ttrnn-dev'),
]
callbacks = [
# TaskPerformance(log_every_n_epochs=250, threshold=0.6),
# TrajectoryPlot(log_every_n_epochs=5),
# TaskPlot(log_every_n_epochs=5),
ModelCheckpoint(dirpath=ckpt_dir, monitor="train/loss", save_top_k=8, every_n_epochs=2500),
]
if len(wrappers) > 0:
for wrapper, wrapper_kwargs in wrappers:
task = wrapper(task, **wrapper_kwargs)
backup = copy.deepcopy(task)
model = A2C(
env=task,
env_kwargs=env_kwargs,
rnn_type='leakyRNN',
rnn_params=rnn_params,
actor_type='linear',
critic_type='linear',
encoder_type='none',
optim_type='RMSprop',
optim_params={'lr': 7.5e-4}, #, 'weight_decay': 1e-6},
epoch_len=20,
reset_state_per_episode=False,
trials_per_episode=6,
discount_gamma=0.91,
critic_beta=0.4,
entropy_beta=0.001,
entropy_anneal_len=30000,
)
trainer = pl.Trainer(
max_epochs=30000,
callbacks=callbacks,
# accelerator='gpu',
# devices=1,
# num_nodes=1,
log_every_n_steps=100,
enable_progress_bar=True,
enable_model_summary=True,
enable_checkpointing=True,
logger=loggers,
gradient_clip_val=0.5,
)
trainer.fit(model=model) # , train_dataloaders=train_dataloader, val_dataloaders=val_dataloader)
# TODO: pickle `task` for later loading
# with open(os.path.join(ckpt_dir, "task.pkl"), 'wb') as f:
# backup.obj1_builder = None
# backup.obj2_builder = None
# pickle.dump(backup, f)
# import pdb; pdb.set_trace() | felixp8/ttrnn | scripts/rl_example.py | rl_example.py | py | 4,583 | python | en | code | 0 | github-code | 13 |
17047673474 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AntMerchantExpandIotdeviceChangeModifyModel(object):
def __init__(self):
self._device_sn = None
self._gmt_created = None
self._order_id = None
self._policy_type = None
self._settled_alipay_id = None
self._shop_address = None
self._shop_city = None
self._shop_district = None
self._shop_industry = None
self._shop_latitude = None
self._shop_linkman_mobile = None
self._shop_linkman_name = None
self._shop_longitude = None
self._shop_name = None
self._shop_open_time = None
self._shop_province = None
self._signed_alipay_id = None
self._supplier_sn = None
@property
def device_sn(self):
return self._device_sn
@device_sn.setter
def device_sn(self, value):
self._device_sn = value
@property
def gmt_created(self):
return self._gmt_created
@gmt_created.setter
def gmt_created(self, value):
self._gmt_created = value
@property
def order_id(self):
return self._order_id
@order_id.setter
def order_id(self, value):
self._order_id = value
@property
def policy_type(self):
return self._policy_type
@policy_type.setter
def policy_type(self, value):
self._policy_type = value
@property
def settled_alipay_id(self):
return self._settled_alipay_id
@settled_alipay_id.setter
def settled_alipay_id(self, value):
self._settled_alipay_id = value
@property
def shop_address(self):
return self._shop_address
@shop_address.setter
def shop_address(self, value):
self._shop_address = value
@property
def shop_city(self):
return self._shop_city
@shop_city.setter
def shop_city(self, value):
self._shop_city = value
@property
def shop_district(self):
return self._shop_district
@shop_district.setter
def shop_district(self, value):
self._shop_district = value
@property
def shop_industry(self):
return self._shop_industry
@shop_industry.setter
def shop_industry(self, value):
self._shop_industry = value
@property
def shop_latitude(self):
return self._shop_latitude
@shop_latitude.setter
def shop_latitude(self, value):
self._shop_latitude = value
@property
def shop_linkman_mobile(self):
return self._shop_linkman_mobile
@shop_linkman_mobile.setter
def shop_linkman_mobile(self, value):
self._shop_linkman_mobile = value
@property
def shop_linkman_name(self):
return self._shop_linkman_name
@shop_linkman_name.setter
def shop_linkman_name(self, value):
self._shop_linkman_name = value
@property
def shop_longitude(self):
return self._shop_longitude
@shop_longitude.setter
def shop_longitude(self, value):
self._shop_longitude = value
@property
def shop_name(self):
return self._shop_name
@shop_name.setter
def shop_name(self, value):
self._shop_name = value
@property
def shop_open_time(self):
return self._shop_open_time
@shop_open_time.setter
def shop_open_time(self, value):
self._shop_open_time = value
@property
def shop_province(self):
return self._shop_province
@shop_province.setter
def shop_province(self, value):
self._shop_province = value
@property
def signed_alipay_id(self):
return self._signed_alipay_id
@signed_alipay_id.setter
def signed_alipay_id(self, value):
self._signed_alipay_id = value
@property
def supplier_sn(self):
return self._supplier_sn
@supplier_sn.setter
def supplier_sn(self, value):
self._supplier_sn = value
def to_alipay_dict(self):
params = dict()
if self.device_sn:
if hasattr(self.device_sn, 'to_alipay_dict'):
params['device_sn'] = self.device_sn.to_alipay_dict()
else:
params['device_sn'] = self.device_sn
if self.gmt_created:
if hasattr(self.gmt_created, 'to_alipay_dict'):
params['gmt_created'] = self.gmt_created.to_alipay_dict()
else:
params['gmt_created'] = self.gmt_created
if self.order_id:
if hasattr(self.order_id, 'to_alipay_dict'):
params['order_id'] = self.order_id.to_alipay_dict()
else:
params['order_id'] = self.order_id
if self.policy_type:
if hasattr(self.policy_type, 'to_alipay_dict'):
params['policy_type'] = self.policy_type.to_alipay_dict()
else:
params['policy_type'] = self.policy_type
if self.settled_alipay_id:
if hasattr(self.settled_alipay_id, 'to_alipay_dict'):
params['settled_alipay_id'] = self.settled_alipay_id.to_alipay_dict()
else:
params['settled_alipay_id'] = self.settled_alipay_id
if self.shop_address:
if hasattr(self.shop_address, 'to_alipay_dict'):
params['shop_address'] = self.shop_address.to_alipay_dict()
else:
params['shop_address'] = self.shop_address
if self.shop_city:
if hasattr(self.shop_city, 'to_alipay_dict'):
params['shop_city'] = self.shop_city.to_alipay_dict()
else:
params['shop_city'] = self.shop_city
if self.shop_district:
if hasattr(self.shop_district, 'to_alipay_dict'):
params['shop_district'] = self.shop_district.to_alipay_dict()
else:
params['shop_district'] = self.shop_district
if self.shop_industry:
if hasattr(self.shop_industry, 'to_alipay_dict'):
params['shop_industry'] = self.shop_industry.to_alipay_dict()
else:
params['shop_industry'] = self.shop_industry
if self.shop_latitude:
if hasattr(self.shop_latitude, 'to_alipay_dict'):
params['shop_latitude'] = self.shop_latitude.to_alipay_dict()
else:
params['shop_latitude'] = self.shop_latitude
if self.shop_linkman_mobile:
if hasattr(self.shop_linkman_mobile, 'to_alipay_dict'):
params['shop_linkman_mobile'] = self.shop_linkman_mobile.to_alipay_dict()
else:
params['shop_linkman_mobile'] = self.shop_linkman_mobile
if self.shop_linkman_name:
if hasattr(self.shop_linkman_name, 'to_alipay_dict'):
params['shop_linkman_name'] = self.shop_linkman_name.to_alipay_dict()
else:
params['shop_linkman_name'] = self.shop_linkman_name
if self.shop_longitude:
if hasattr(self.shop_longitude, 'to_alipay_dict'):
params['shop_longitude'] = self.shop_longitude.to_alipay_dict()
else:
params['shop_longitude'] = self.shop_longitude
if self.shop_name:
if hasattr(self.shop_name, 'to_alipay_dict'):
params['shop_name'] = self.shop_name.to_alipay_dict()
else:
params['shop_name'] = self.shop_name
if self.shop_open_time:
if hasattr(self.shop_open_time, 'to_alipay_dict'):
params['shop_open_time'] = self.shop_open_time.to_alipay_dict()
else:
params['shop_open_time'] = self.shop_open_time
if self.shop_province:
if hasattr(self.shop_province, 'to_alipay_dict'):
params['shop_province'] = self.shop_province.to_alipay_dict()
else:
params['shop_province'] = self.shop_province
if self.signed_alipay_id:
if hasattr(self.signed_alipay_id, 'to_alipay_dict'):
params['signed_alipay_id'] = self.signed_alipay_id.to_alipay_dict()
else:
params['signed_alipay_id'] = self.signed_alipay_id
if self.supplier_sn:
if hasattr(self.supplier_sn, 'to_alipay_dict'):
params['supplier_sn'] = self.supplier_sn.to_alipay_dict()
else:
params['supplier_sn'] = self.supplier_sn
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AntMerchantExpandIotdeviceChangeModifyModel()
if 'device_sn' in d:
o.device_sn = d['device_sn']
if 'gmt_created' in d:
o.gmt_created = d['gmt_created']
if 'order_id' in d:
o.order_id = d['order_id']
if 'policy_type' in d:
o.policy_type = d['policy_type']
if 'settled_alipay_id' in d:
o.settled_alipay_id = d['settled_alipay_id']
if 'shop_address' in d:
o.shop_address = d['shop_address']
if 'shop_city' in d:
o.shop_city = d['shop_city']
if 'shop_district' in d:
o.shop_district = d['shop_district']
if 'shop_industry' in d:
o.shop_industry = d['shop_industry']
if 'shop_latitude' in d:
o.shop_latitude = d['shop_latitude']
if 'shop_linkman_mobile' in d:
o.shop_linkman_mobile = d['shop_linkman_mobile']
if 'shop_linkman_name' in d:
o.shop_linkman_name = d['shop_linkman_name']
if 'shop_longitude' in d:
o.shop_longitude = d['shop_longitude']
if 'shop_name' in d:
o.shop_name = d['shop_name']
if 'shop_open_time' in d:
o.shop_open_time = d['shop_open_time']
if 'shop_province' in d:
o.shop_province = d['shop_province']
if 'signed_alipay_id' in d:
o.signed_alipay_id = d['signed_alipay_id']
if 'supplier_sn' in d:
o.supplier_sn = d['supplier_sn']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/AntMerchantExpandIotdeviceChangeModifyModel.py | AntMerchantExpandIotdeviceChangeModifyModel.py | py | 10,161 | python | en | code | 241 | github-code | 13 |
28157494330 | #External libs
import ast
import boto3
import json
import sys
import os
from botocore.exceptions import ClientError
#Establish our boto resources
client = boto3.client('lambda')
session = boto3.session.Session()
region = session.region_name
ec2Client = boto3.client('ec2')
def import_config(lambda_name, alias=False):
'''
Uses the lambda name to grab existing lambda configuration
and import it into a config file
args:
lambda_name: the name of the lambda you want to import
alias: the alias, defaults to $LATEST if not present
'''
print('Attempting to import configuration')
#Create an empty config dict, start with the bare minimum
config_dict = {
"initializers": {
"name": "",
"description": "",
"region": region,
"handler": "",
"role": ""
},
"provisioners": {
"runtime": "",
"timeout": 0,
"mem_size": 0
}
}
#If the user didn't pass an alias we want to use $LATEST
if not alias:
alias_used = '$LATEST'
else:
alias_used = alias
try:
lambda_config = client.get_function(
FunctionName=lambda_name,
Qualifier=alias_used
)
#Set a variable to make things readable later
config = lambda_config['Configuration']
except ClientError as error:
print(error.response)
sys.exit(1)
else:
#Check to make sure there is a config there
#print(json.dumps(config, indent=4))
if 'FunctionName' in config:
#Grab our initializers
config_dict['initializers']['name'] = config['FunctionName']
config_dict['initializers']['handler'] = handler = config['Handler']
config_dict['initializers']['description'] = config['Description']
config_dict['initializers']['role'] = config['Role'].split('/')[-1]
#Grab our provisioners
config_dict['provisioners']['timeout'] = config['Timeout']
config_dict['provisioners']['mem_size'] = config['MemorySize']
config_dict['provisioners']['runtime'] = config['Runtime']
#VPC Work
sg_ids = config['VpcConfig']['SecurityGroupIds']
if len(sg_ids) != 0:
vpc_id = config['VpcConfig']['VpcId']
config_dict['vpc_setting'] = {"vpc_name": vpc_id, "security_group_ids": sg_ids}
else:
pass
#Tracing config
trace_config = config['TracingConfig']
if trace_config['Mode'] != "PassThrough":
config_dict['initializers']['tracing_mode'] = trace_config['Mode'].lower()
else:
pass
if 'DeadLetterConfig' in config:
#Split DLQ config so we can grab things easily
dead_letter_split = config['DeadLetterConfig']['TargetArn'].split(':')
config_dict['dead_letter_config'] = {"type": dead_letter_split[2], "target_name": dead_letter_split[-1]}
else:
pass
#Variables
if 'Environment' in config:
config_dict['variables'] = config['Environment']['Variables']
else:
pass
#Write the alias if needed
if alias_used != "$LATEST":
config_dict['initializers']['alias'] = alias
else:
pass
#Set a variable for the ARN
true_arn = config['FunctionArn'].split(':')[:7]
function_arn = ":".join(true_arn)
else:
print("No Lambda found! Please check your config")
finally:
return config_dict, function_arn
def import_triggers(lambda_name, alias=False):
'''
Uses the lambda name to grab triggers from existing lambda config
and import it into a config file
args:
lambda_name:
alias:
'''
print("Attempting to retrieve lambda triggers")
try:
if not alias:
get_lambda_policy = client.get_policy(
FunctionName=lambda_name
)
else:
get_lambda_policy = client.get_policy(
FunctionName=lambda_name,
Qualifier=alias
)
except ClientError as error:
print("No policy found")
principal = ''
resource = ''
else:
policy = ast.literal_eval(get_lambda_policy['Policy'])
statement_dict = policy['Statement'][0]
principal = statement_dict['Principal']['Service'].split('.')[0]
resource = statement_dict['Condition']['ArnLike']['AWS:SourceArn'].split(":")[-1]
finally:
return principal, resource
def get_sg_name(sg_id):
'''
Grabs the NAME of the security groups, we want them because they are
friendlier to read than the sg code
args:
sg_id: the id of the security group, returned from import_config
'''
print('Attempting to retrieve security group names')
try:
sg_info = ec2Client.describe_security_groups(
GroupIds=sg_id
)
group_info = sg_info['SecurityGroups'][0]
except ClientError as error:
print(error.response)
sys.exit(1)
finally:
print("Retrieved name for groups %s" % group_info['GroupName'])
return group_info['GroupName']
def get_vpc_name(vpc_id):
'''
Grabs the NAME of the VPC
args:
vpc_id: the id the VPC, returned from import_config
'''
print('Attempting to retrieve VPC name')
try:
vpc_info = ec2Client.describe_vpcs(
VpcIds=[vpc_id]
)
vpc_info = vpc_info['Vpcs'][0]
except ClientError as error:
print(error.response)
sys.exit(1)
else:
tags = vpc_info['Tags'][0]
if tags['Key'] == 'Name':
name = tags['Value']
else:
print("No VPC found, make sure your VPC is tagged 'Key': 'Name', 'Value': 'Your-VPC'")
sys.exit(1)
finally:
print('Retrieved VPC name %s' % name)
return name
def get_tags(lambda_arn):
'''
Grabs a tags dict from the current lambda
args:
lambda_arn: the arn returned from the import config function
'''
print('Attempting to retrieve tags')
try:
tags = client.list_tags(Resource=lambda_arn)
tags = tags['Tags']
except ClientError as error:
print(error.response)
sys.exit(1)
finally:
return tags
########### Entrypoint ###########
def import_lambda(lambda_name, alias):
'''
The main entry point of the module
args:
lambda_name: the name of the lambda
alias: alias of the lambda
'''
config_dict, lambda_arn = import_config(lambda_name=lambda_name, alias=alias)
tag_dict = get_tags(lambda_arn)
if len(tag_dict) != 0:
config_dict['tags'] = tag_dict
if 'vpc_setting' in config_dict:
config_dict['vpc_setting']['vpc_name'] = get_vpc_name(config_dict['vpc_setting']['vpc_name'])
config_dict['vpc_setting']['security_group_ids'] = get_sg_name(config_dict['vpc_setting']['security_group_ids'])
trigger_method, trigger_source = import_triggers(lambda_name=lambda_name, alias=alias)
if len(trigger_method) != 0:
config_dict['trigger'] = {"method": trigger_method, "source": trigger_source}
return config_dict | tunein/Maestro | maestro/providers/aws/import_lambda.py | import_lambda.py | py | 7,825 | python | en | code | 10 | github-code | 13 |
25982341602 | import sys
from PyQt6.QtWidgets import *
from PyQt6.QtGui import QPixmap, QImage, QClipboard
import qrcode
import pay_by_square
from PIL import ImageQt
from PyQt6 import uic, QtWidgets
from dialog import Dialog
from parser import getAccount
from parser import configChecker
class UI(QMainWindow):
#Opens dialog window
def openDialog(self):
self.window = QtWidgets.QDialog()
self.ui = Dialog()
self.ui.setModal(True)
self.ui.show()
def __init__(self):
super(UI, self).__init__()
uic.loadUi('GUI.ui', self)
self.copyBtn.clicked.connect(self.copyQR)
self.saveBtn.clicked.connect(self.saveQR)
self.settingsBtn.clicked.connect(lambda: self.openDialog())
self.copyBtn.setVisible(False)
self.saveBtn.setVisible(False)
self.labelQR = self.findChild(QLabel,'labelQR')
self.generateBtn.clicked.connect(self.generate)
self.show()
#Generates QR code from given values and stored account in config file
def generate(self):
account = getAccount()
code = pay_by_square.generate(
iban=account[0],
swift=account[1],
currency=account[2],
amount=self.value.value(),
variable_symbol= self.inputVS.text(),
specific_symbol= self.inputSS.text(),
constant_symbol= self.inputCS.text(),
)
img = qrcode.make(code)
image = ImageQt.ImageQt(img)
self.imgQR = QImage(image)
self.pixmap = QPixmap.fromImage(self.imgQR)
self.labelQR.setPixmap(self.pixmap)
self.copyBtn.setVisible(True)
self.saveBtn.setVisible(True)
def copyQR(self):
clipboard = QApplication.clipboard()
clipboard.setPixmap(self.pixmap)
def saveQR(self):
filePath, _ = QFileDialog.getSaveFileName(self, "Save Image", "","PNG(*.png);;All Files(*.*) ")
if filePath == "":
return
self.pixmap.save(filePath)
if __name__ == "__main__":
app = QApplication(sys.argv)
#Checks if config file is not missing and whether it contains valid options
errCode = configChecker()
if errCode > 0:
msg = QMessageBox()
msg.setWindowTitle("Missing config data")
if errCode == 1:
msg.setText("Config file was not found.\n A new config file has been created.")
elif errCode == 2:
msg.setText("Some (or all) of config data not found.\n Account data were updated to default settings.")
x = msg.exec()
window = UI()
app.exec() | radoslavpalenik/SEPA-QR-generator | main.py | main.py | py | 2,631 | python | en | code | 0 | github-code | 13 |
2641273373 | email = input("Enter your Email ID: ")
domain = '@gmail.com'
ledo = len(domain)
lema = len(email)
sub = email[lema-ledo:]
if sub == domain:
if ledo != lema:
print("It is a valid Email ID: ")
else:
print("This is a invalid Email ID: ")
else:
print("This email ID is either not valid or belongs to some other domain. ")
| avrajit-das/Python-Programming | Email_Checker.py | Email_Checker.py | py | 326 | python | en | code | 0 | github-code | 13 |
39151922455 | import cv2
import numpy as np
from glob import glob
import matplotlib.pyplot as plt
from tqdm import tqdm
from multiprocessing import Pool
from itertools import product
import pandas as pd
from well_matrix import Well
import datetime
from datetime import datetime
import os
from sklearn.cluster import KMeans
from collections import Counter
from well_matrix_creation.preprocess_tray import rotate_n_flip, cor_lum, crop_to_plate, find_wells, read_wells, map_columns
from well_matrix_creation.find_clusters import find_clusters
from well_matrix_creation.subgrouper import Subgrouper
class PreprocessedTray:
def __init__(self, path, well_feature_vec_size, should_save_key_states_well=True, img_rot=None):
self.path = path
self.img_rot = img_rot
self.date_obj = None
self.plate_rot = None
self.plate_cor = None
self.circles = None
self.well_images = None
self.mapper = None
self.assigned_map = None
self.well_feature_vec_size = well_feature_vec_size
self.should_save_key_states_well = should_save_key_states_well
self.img = cv2.imread(self.path)
self.name_to_date()
def name_to_date(self):
self.date_obj = datetime.strptime(self.path.split(os.sep)[-1], '%Y%m%d_%H%M%S.jpg')
def process(self):
plate = crop_to_plate(self.img)
if self.img_rot is None:
raise Exception("Image rotation not defined")
if self.img_rot:
self.plate_rot = rotate_n_flip(plate, rot=cv2.ROTATE_90_COUNTERCLOCKWISE)
else:
self.plate_rot = rotate_n_flip(plate)
self.plate_cor = cor_lum(self.plate_rot)
gray_plate_cor = cv2.cvtColor(self.plate_cor, cv2.COLOR_BGR2GRAY)
self.circles = find_wells(gray_plate_cor)
# self.mapper contains the (x, y) coordinates of the wells in this tray.
self.well_images, self.mapper = read_wells(gray_plate_cor, self.circles)
self.well_objects = [Well(well_image, self.well_feature_vec_size, type='COVID') for well_image in self.well_images]
def to_frame(self):
if self.well_images is not None:
# each well here is a flattened image vector, which gets represented as a column in the df.
df = pd.DataFrame({tuple(map_): [obj] for map_, obj in zip(self.mapper, self.well_objects)})
return df
def multi_analysis(path, well_feature_vec_size, should_save_key_states_well, img_rot):
"""
Allows us to process a tray at the provided path.
"""
wells = PreprocessedTray(path, well_feature_vec_size, should_save_key_states_well, img_rot=img_rot)
wells.process()
return wells
def multi_analysis_wrap(args):
"""
Wrapper for multiprocessing.
"""
return multi_analysis(*args)
def compute_well_matrix_dfs(folder_path, well_feature_vec_size, should_save_key_states_well, well_metadata, max_processes):
"""
Reads the well image dataset, and stores all well images in a 5D numpy array.
"""
if folder_path[-1] != '/':
folder_path += '/'
# Iterates through every image in the folder
img_file_list = glob(folder_path+'*.jpg')
well_trays = []
print('Preprocessing tray image')
with Pool(processes=max_processes) as p:
with tqdm(total=len(img_file_list)) as pbar:
for i, well_tray in enumerate(p.imap_unordered(multi_analysis_wrap,
product(img_file_list, [well_feature_vec_size], [should_save_key_states_well], [well_metadata.A1_top]))):
well_trays.append(well_tray)
pbar.update()
print('Finding clusters')
main_df = find_clusters(well_trays)
main_df.columns = map_columns(main_df)
# Drop all wells that aren't being analyzed
wells_analyzed = well_metadata.sample_wells
main_df.drop(main_df.columns.difference(wells_analyzed), 1, inplace=True)
return main_df
def subgroup_main_df(folder_path, main_df, group_data):
subgrouper = Subgrouper(main_df, group_data, folder_path)
main_df_subgrouped = subgrouper.map_all()
return main_df_subgrouped
| sidguptacode/ML_AT_Interpretation | agglutination-detection/well_matrix_creation/compute_well_matrix.py | compute_well_matrix.py | py | 4,172 | python | en | code | 0 | github-code | 13 |
18304161525 | import pexpect
import sys
import ipaddress
def process_wordlist(filepath, shell):
fd = open(filepath, 'r')
for line in fd:
print("trying password:", line)
shell.sendline(line)
response = shell.expect(['#\$', '(yes/no)?', '[Tt]erminal type', '[Pp]ermission denied'], timeout=5)
if response == 0:
return shell
if response == 1:
print('Continue connecting to unknown host')
shell.sendline('yes')
shell.expect('[#\$] ')
return shell
if response == 2:
print('Login OK... need to send terminal type.')
shell.sendline('vt100')
shell.expect('[#\$] ')
if response == 3:
print('Permission denied on host. Can\'t login')
return None
return None
def is_valid_ip(ip_addr):
# Validate that ip address is valid.
try:
ipaddress.ip_address(ip_addr)
return True
except:
print("Not a valid ip")
return False
def connect_ssh_session(hostname, ip_addr):
target = "ssh " + hostname + "@" + ip_addr
print('target=', target)
shell = pexpect.spawn(target)
shell.expect('[Pp]assword:')
print(shell.after)
return process_wordlist('./test_file', shell)
if __name__=='__main__':
hostname = sys.argv[1]
ip_addr = sys.argv[2]
if not is_valid_ip(ip_addr):
print("This is not a valid ip")
exit()
connection = connect_ssh_session(hostname, ip_addr)
if connection is not None:
connection.interact()
exit()
| bhrtdas/Scripts | login-scripts-master/login-scripts-master/ssh_login.py | ssh_login.py | py | 1,613 | python | en | code | 0 | github-code | 13 |
26137422688 | import os
import sys
import struct
import ctypes
import ctypes.util
import functools
LIBNAME = 'libramses.so'
_physaddr_t = ctypes.c_ulonglong
BADADDR = _physaddr_t(-1).value
class RamsesError(Exception):
"""Exception class used to encapsulate RAMSES errors"""
@functools.total_ordering
class DRAMAddr(ctypes.Structure):
_fields_ = [('chan', ctypes.c_ubyte),
('dimm', ctypes.c_ubyte),
('rank', ctypes.c_ubyte),
('bank', ctypes.c_ubyte),
('row', ctypes.c_ushort),
('col', ctypes.c_ushort)]
def __str__(self):
return '({0.chan:1x} {0.dimm:1x} {0.rank:1x} {0.bank:1x} {0.row:4x} {0.col:3x})'.format(self)
def __repr__(self):
return '{0}({1.chan}, {1.dimm}, {1.rank}, {1.bank}, {1.row}, {1.col})'.format(type(self).__name__, self)
def __eq__(self, other):
if isinstance(other, DRAMAddr):
return self.numeric_value == other.numeric_value
else:
return NotImplemented
def __lt__(self, other):
if isinstance(other, DRAMAddr):
return self.numeric_value < other.numeric_value
else:
return NotImplemented
def __hash__(self):
return self.numeric_value
def __len__(self):
return len(self._fields_)
def __getitem__(self, key):
if isinstance(key, int):
return getattr(self, self._fields_[key][0])
elif isinstance(key, slice):
start, stop, step = key.indices(len(self._fields_))
return tuple(getattr(self, self._fields_[k][0]) for k in range(start, stop, step))
else:
raise TypeError('{} object cannot be indexed by {}'.format(type(self).__name__, type(key).__name__))
def same_bank(self, other):
return (self.chan == other.chan and self.dimm == other.dimm and
self.rank == other.rank and self.bank == other.bank)
@property
def numeric_value(self):
return (self.col + (self.row << 16) + (self.bank << 32) +
(self.rank << 40) + (self.dimm << 48) + (self.chan << 52))
def __add__(self, other):
if isinstance(other, DRAMAddr):
return type(self)(
self.chan + other.chan,
self.dimm + other.dimm,
self.rank + other.rank,
self.bank + other.bank,
self.row + other.row,
self.col + other.col
)
else:
return NotImplemented
def __sub__(self, other):
if isinstance(other, DRAMAddr):
return type(self)(
self.chan - other.chan,
self.dimm - other.dimm,
self.rank - other.rank,
self.bank - other.bank,
self.row - other.row,
self.col - other.col
)
else:
return NotImplemented
def _assert_lib():
if _lib is None:
init_lib()
class _MappingProps(ctypes.Structure):
_fields_ = [('granularity', _physaddr_t),
('bank_cnt', ctypes.c_uint),
('col_cnt', ctypes.c_uint),
('cell_size', ctypes.c_uint)]
class _Mapping(ctypes.Structure):
_fields_ = [('map', ctypes.c_void_p),
('map_reverse', ctypes.c_void_p),
('twiddle_gran', ctypes.c_void_p),
('flags', ctypes.c_int),
('arg', ctypes.c_void_p),
('props', _MappingProps)]
class MemorySystem(ctypes.Structure):
_fields_ = [('mapping', _Mapping),
('nremaps', ctypes.c_size_t),
('remaps', ctypes.c_void_p),
('nallocs', ctypes.c_size_t),
('allocs', ctypes.c_void_p)]
def load(self, s):
_assert_lib()
cs = ctypes.c_char_p(s.encode('utf-8'))
r = _lib.ramses_msys_load(cs, ctypes.byref(self), None)
if r:
raise RamsesError('ramses_msys_load error: ' +
_lib.ramses_msys_load_strerr(r).decode('ascii'))
def load_file(self, fname):
with open(fname, 'r') as f:
return self.load(f.read())
def granularity(self, pagesize):
_assert_lib()
return _lib.ramses_msys_granularity(ctypes.byref(self), pagesize)
def resolve(self, phys_addr):
_assert_lib()
return _lib.ramses_resolve(ctypes.byref(self), phys_addr)
def resolve_reverse(self, dram_addr):
_assert_lib()
return _lib.ramses_resolve_reverse(ctypes.byref(self), dram_addr)
def __del__(self):
try:
if _lib is not None:
_lib.ramses_msys_free(ctypes.byref(self))
except NameError:
pass
class _TranslateArg(ctypes.Union):
_fields_ = [('p', ctypes.c_void_p),
('pa', _physaddr_t),
('val', ctypes.c_int)]
_TranslateFunc = ctypes.CFUNCTYPE(
_physaddr_t, ctypes.c_void_p, ctypes.c_int, _TranslateArg
)
_TranslateRangeFunc = ctypes.CFUNCTYPE(
ctypes.c_size_t,
ctypes.c_void_p, ctypes.c_size_t, ctypes.c_void_p, ctypes.c_int, _TranslateArg
)
class _Translation(ctypes.Structure):
_fields_ = [('translate', _TranslateFunc),
('translate_range', _TranslateRangeFunc),
('page_shift', ctypes.c_int),
('arg', _TranslateArg)]
_nulltrans = lambda: _Translation(_TranslateFunc(0), _TranslateRangeFunc(0),
0, _TranslateArg(0))
class _VMMap:
def __enter__(self):
return self
def __exit__(self, e_type, e_val, traceb):
return False
def translate(self, addr):
return self.trans.translate(addr, self.trans.page_shift, self.trans.arg)
def translate_range(self, addr, page_count):
obuf = ctypes.create_string_buffer(page_count * ctypes.sizeof(ctypes.c_ulonglong))
cnt = self.trans.translate_range(
addr, page_count, obuf, self.trans.page_shift, self.trans.arg
)
unpacker = struct.iter_unpack('Q', obuf.raw)
return [next(unpacker)[0] for _ in range(cnt)]
class Pagemap(_VMMap):
def __init__(self, pid=None):
pidstr = str(pid) if pid is not None else 'self'
self.pagemap_path = os.path.join('/proc', pidstr, 'pagemap')
self.trans = None
self.fd = -1
def __enter__(self):
_assert_lib()
self.fd = os.open(self.pagemap_path, os.O_RDONLY)
self.trans = _nulltrans()
_lib.ramses_translate_pagemap(ctypes.byref(self.trans), self.fd)
return self
def __exit__(self, e_type, e_val, traceb):
self.trans = None
os.close(self.fd)
self.fd = -1
return False
class Heurmap(_VMMap):
def __init__(self, cont_bits, base):
_assert_lib()
self.trans = _nulltrans()
_lib.ramses_translate_heuristic(ctypes.byref(self.trans), cont_bits, base)
# Module init code
try:
_MODULE_DIR = os.path.abspath(os.path.dirname(sys.modules[__name__].__file__))
except AttributeError:
_MODULE_DIR = os.getcwd()
_SEARCH_PATHS = [os.path.realpath(os.path.join(_MODULE_DIR, x)) for x in
['.', '..']
]
_lib = None
def init_lib(extra_paths=None):
global _lib
if extra_paths is not None:
search_paths = list(extra_paths) + _SEARCH_PATHS
else:
search_paths = _SEARCH_PATHS
for p in search_paths:
try:
_lib = ctypes.cdll.LoadLibrary(os.path.join(p, LIBNAME))
break
except OSError as e:
pass
else:
_lib = ctypes.cdll.LoadLibrary(LIBNAME)
_lib.ramses_msys_load.restype = ctypes.c_int
_lib.ramses_msys_load.argtypes = [ctypes.c_char_p, ctypes.c_void_p, ctypes.c_void_p]
_lib.ramses_msys_load_strerr.restype = ctypes.c_char_p
_lib.ramses_msys_load_strerr.argtypes = [ctypes.c_int]
_lib.ramses_resolve.restype = DRAMAddr
_lib.ramses_resolve.argtypes = [ctypes.c_void_p, _physaddr_t]
_lib.ramses_resolve_reverse.restype = _physaddr_t
_lib.ramses_resolve_reverse.argtypes = [ctypes.c_void_p, DRAMAddr]
_lib.ramses_msys_granularity.restype = ctypes.c_size_t
_lib.ramses_msys_granularity.argtypes = [ctypes.c_void_p, ctypes.c_size_t]
_lib.ramses_translate_heuristic.restype = None
_lib.ramses_translate_heuristic.argtypes = [ctypes.c_void_p, ctypes.c_int, _physaddr_t]
_lib.ramses_translate_pagemap.restype = None
_lib.ramses_translate_pagemap.argtypes = [ctypes.c_void_p, ctypes.c_int]
# End module init code
| vusec/ramses | pyramses/__init__.py | __init__.py | py | 8,561 | python | en | code | 9 | github-code | 13 |
29402813688 | import datetime
import csv
import logging
from .models import Project
def do_upload_projects_from_csv(request):
"""Upload project data from csv into the database.
The given file should be in legal csv format and include the field names in its first row
"""
# get the given csv file name parametr from the request
file_name = request.POST['fname']
try:
csv_file = open(file_name)
except (FileNotFoundError, IOError):
return "Wrong file or file path."
with csv_file:
csvfile = csv.reader(csv_file, delimiter=',')
# ignore first row and it expect it to include the field names
header = next(csvfile)
row_values = []
try:
# get all row values and try to insert it to the database
for row in csvfile:
obj, created = Project.objects.update_or_create(
survey_id = row[0],
team = row[1],
survey_type = row[2],
survey_status = row[3],
created_date = datetime.datetime.strptime(row[4].strip(), '%Y-%m-%d %H:%M:%S').date(),
project_name = row[5],
survey_url = row[6],
)
except:
logging.info(f"User {request.user} had a problem uploading project from CSV file.")
return "Problem on uploading projects, please check the file format."
finally:
csv_file.close()
return "Success"
| maozmussel/Demo | myworkspace/upload_data_from_csv.py | upload_data_from_csv.py | py | 1,509 | python | en | code | 0 | github-code | 13 |
8107288810 | from django.urls import path, include
from . import views
urlpatterns = [
path('', views.home, name='home'),
path('register', views.register, name='register'),
path('login', views.user_login, name='login'),
path('logout', views.user_logout, name='logout'),
path('hello', views.hello, name='hello'),
path('api/markers', views.sendJsonMarkers, name='sendJsonMarkers'),
path('language/new', views.new_language, name='new_language'),
path('profile', views.view_profile, name='profile'),
] | Cyrusluke925/languagefinder | languagefinder/urls.py | urls.py | py | 517 | python | en | code | 0 | github-code | 13 |
17088445804 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.MsgSendErrorData import MsgSendErrorData
class AlipayOpenPublicMessagePreviewSendResponse(AlipayResponse):
def __init__(self):
super(AlipayOpenPublicMessagePreviewSendResponse, self).__init__()
self._error_datas = None
@property
def error_datas(self):
return self._error_datas
@error_datas.setter
def error_datas(self, value):
if isinstance(value, list):
self._error_datas = list()
for i in value:
if isinstance(i, MsgSendErrorData):
self._error_datas.append(i)
else:
self._error_datas.append(MsgSendErrorData.from_alipay_dict(i))
def parse_response_content(self, response_content):
response = super(AlipayOpenPublicMessagePreviewSendResponse, self).parse_response_content(response_content)
if 'error_datas' in response:
self.error_datas = response['error_datas']
| alipay/alipay-sdk-python-all | alipay/aop/api/response/AlipayOpenPublicMessagePreviewSendResponse.py | AlipayOpenPublicMessagePreviewSendResponse.py | py | 1,102 | python | en | code | 241 | github-code | 13 |
15597690753 |
import os
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import glob
def format_loss_columns(df, col=""):
gabor_model_loss_suffix = len('tensor-')
df[f'{col}'] = df[f'{col}'].str[gabor_model_loss_suffix:]
df[f'{col}'] = df[f'{col}'].str.split(',').str[0]
df[f'{col}'] = df[f'{col}'].astype('float')
return df
# PATHS
save_dir = "/home/ainedineen/blurry_vision/pytorch_untrained_models/imagenet/fit_receptive_field/plot_rf_params-Mar23"
path_to_dfs = "/home/ainedineen/blurry_vision/pytorch_untrained_models/imagenet/fit_receptive_field/rf_model_params-Mar23"
all_models= []
for file in glob.glob(f"{path_to_dfs}/*.csv"):
print(f'Filename is {file}')
len_file_suffix = len('_rf_model_params.csv')
network_name = file[:-len_file_suffix]
network_name= network_name.split("/")[-1]
epoch = network_name.split("_e")[-1]
epoch_suffix = "_e" + epoch
print(epoch_suffix)
print(network_name) #still has epocg eg. _e60
print('network_name')
network_name_lessEpoch = network_name[:-len(epoch_suffix)]
print(network_name_lessEpoch)
rf_param_df = pd.read_csv(file)
rf_param_df.drop(columns=['Unnamed: 0'], inplace=True)
rf_param_df['network']= network_name_lessEpoch
rf_param_df['network_epoch'] = network_name
rf_param_df['epoch']= int(epoch)
print(rf_param_df.columns)
all_models.append(rf_param_df)
allModels_df = pd.concat(all_models, axis=0, ignore_index=True)
print(allModels_df)
allModels_df = format_loss_columns(allModels_df,'gabor_model_loss')
allModels_df = format_loss_columns(allModels_df,'dog_model_loss')
print(allModels_df.columns)
# Index(['filename', 'dog_sig1', 'dog_sig2', 'dog_amp1', 'dog_amp2',
# 'dog_center', 'dog_image_size', 'dog_target_std', 'gabor_sigma',
# 'gabor_theta', 'gabor_Lambda', 'gabor_psi', 'gabor_gamma',
# 'gabor_center', 'gabor_image_size', 'gabor_target_std',
# 'dog_model_loss', 'gabor_model_loss', 'best_model', 'network', 'epoch'],
# dtype='object')
# To compare loss for DOG vs GAB - need to melt the df! to have both in their own column
# use filename!
# # $\sigma$ is the sigma/standard deviation of the Gaussian envelop
# print(rf_param_df['best_model'])
# print(rf_param_df.columns)
# rf_param_df['Network_epoch'] = 'supRN50_conv1_21_g0_60e_e60'
# rf_param_df_gab = rf_param_df[rf_param_df['best_model']=='gab']
# print(rf_param_df_gab['gabor_sigma'])
# # HUE Gab vs DOG...
# # Plot distribution of RFs as Violin Plots?
# rf_param_df_dog = rf_param_df[rf_param_df['best_model']=='dog']
# print(len(rf_param_df_dog['dog_sig1']))
# # gabor_sigma
# Output loss in readable format:
# print(type(rf_param_df['gabor_model_loss'][0]))
# print(rf_param_df['gabor_model_loss'])
# print(rf_param_df['dog_model_loss'])
# # Plot the gaussians......
# # plot
# # hue for which was best
# Change aspect ratio!
# hue_order=subset_of_nets, order=order_of_cat, errorbar=None, palette=sns.color_palette("Paired"), ,
gabor_best_models = allModels_df[allModels_df['best_model']=='gab']
ax = sns.catplot(data=allModels_df, kind="violin",x="gabor_sigma", y="network_epoch" , hue='best_model',aspect=6)
# plt.ylabel("Shape Bias (%)")
# plt.xlabel("Category")
plt.title(f'Plot Gabor Sigma for all RFs (Seperated by ideal model - DOG vs Gabor Patch)')
# fig.set_figwidth(20)
plt.savefig(f'{save_dir}/Gabor_sigma_plot-violin.png', bbox_inches="tight")
print(allModels_df['gabor_sigma'].describe())
print(gabor_best_models['network_epoch'].value_counts())
for chosen_epoch in [35,60]:
# chosen_epoch = 35
allModels_df_epoch35 = allModels_df[allModels_df['epoch']==chosen_epoch]
ax = sns.catplot(data=allModels_df_epoch35, kind="violin",x="gabor_sigma", y="network_epoch" ,aspect =2.5)
# plt.ylabel("Shape Bias (%)")
# plt.xlabel("Category")
plt.title(f'Plot Gabor Sigma for all RFs at epoch {chosen_epoch}')
# fig.set_figwidth(20)
plt.savefig(f'{save_dir}/gabor_sigma_allRFs_epoch-{chosen_epoch}_plot-violin.png', bbox_inches="tight")
# chosen_epoch = 35
print(allModels_df['network'].unique())
# ['supRN50_conv1_21_g0_30e_g4_30e' 'supRN50_conv1_21_g4_30e_g0_30e'
# 'supRN50_conv1_21_g0_60e' 'supRN50_conv1_21_g4_60e']
rename_chosen_models_exp={
'supRN50_conv1_21_g0_30e_g4_30e':'HighRes-Blur',
'supRN50_conv1_21_g4_30e_g0_30e':'Blur-HighRes',
'supRN50_conv1_21_g0_60e':'HighRes',
'supRN50_conv1_21_g4_60e':'Blur',
}
subset_of_nets = ['HighRes','Blur-HighRes','Blur']
def rename_and_select_rows_from_df(df, rename_models_dict, subset_of_nets):
df.replace(regex=rename_chosen_models_exp, inplace=True)
df = df.rename(columns={"network": "Network"})
df = df.loc[df['Network'].isin(subset_of_nets)]
return df
allModels_df_subset = rename_and_select_rows_from_df(allModels_df, rename_chosen_models_exp, subset_of_nets)
print(allModels_df_subset)
ax = sns.catplot(data=allModels_df_subset, kind="violin",y="gabor_sigma", hue="Network", hue_order=subset_of_nets,x='epoch', aspect=1.7, palette=sns.color_palette("Blues_r"))
plt.title(f'Size of Receptive Fields for Networks over Time')
# fig.set_figwidth(20)
# ax.legend(title="Network", loc='center left', bbox_to_anchor=(1, 0.50))
plt.ylabel("Size of Receptive Field ($\sigma$ of modelled Gabor Patch)")
plt.xlabel("Epoch")
plt.savefig(f'{save_dir}/compare_gabor_sigma_allRFs_epoch-35-60_plot-violin.png', bbox_inches="tight")
# ax = sns.catplot(data=rf_param_df, kind="violin",y="gabor_model_loss", x="Network_epoch", hue="best_model", )
# # plt.ylabel("Shape Bias (%)")
# # plt.xlabel("Category")
# plt.title(f'Plot loss for Gabor model - depending on whicg model was preferred')
# # fig.set_figwidth(20)
# plt.savefig(f'{save_dir}/Gabor_loss_plot-violin.png', bbox_inches="tight")
# ax = sns.catplot(data=rf_param_df, kind="violin",y="dog_model_loss", x="Network_epoch", hue="best_model", )
# # plt.ylabel("Shape Bias (%)")
# # plt.xlabel("Category")
# plt.title(f'Plot loss for DOG model - depending on whicg model was preferred')
# # fig.set_figwidth(20)
# plt.savefig(f'{save_dir}/DOG_loss_plot-violin.png', bbox_inches="tight")
# # MELT THIS DF to get # | dineenai/fit_receptive_field | plot_rf_size.py | plot_rf_size.py | py | 6,382 | python | en | code | 0 | github-code | 13 |
25518348784 | import sys
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.ensemble import RandomForestRegressor
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import r2_score, mean_absolute_error
from sklearn.ensemble import VotingRegressor
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, ExtraTreesRegressor
import mlflow
data = pd.read_csv("./datasets/laptops.csv", encoding='latin-1')
# Remove extra unnecessary details form Product Columns
data["Product"] = data["Product"].str.split("(").apply(lambda x: x[0])
# Extract CPU Vender, CPU Type and CPU Speed in Different Columns
data["Cpu_Speed"] = data["Cpu"].str.split(" ").apply(
lambda x: x[-1]).str.replace("GHz", "")
data["Cpu_Vender"] = data["Cpu"].str.split(" ").apply(lambda x: x[0])
data["Cpu_Type"] = data["Cpu"].str.split(" ").apply(lambda x: x[1:4] if x[1] == "Celeron" and "Pentium" and "Xeon" else (
x[1:3] if (x[1] == "Core" or x[0] == "AMD") else x[0]))
data["Cpu_Type"] = data["Cpu_Type"].apply(lambda x: ' '.join(x))
# Extract Memory type from Memory Column
split_mem = data['Memory'].str.split(' ', 1, expand=True)
data['Storage Type'] = split_mem[1]
data['Memory'] = split_mem[0]
data["Ram"] = data["Ram"].str.replace("GB", "")
df_mem = data['Memory'].str.split('(\\d+)', expand=True)
data['Memory'] = pd.to_numeric(df_mem[1])
data.rename(columns={'Memory': 'Memory (GB or TB)'}, inplace=True)
data['Memory (GB or TB)'] = data['Memory (GB or TB)'].apply(
lambda x: 1024 if x == 1 else x)
data['Memory (GB or TB)'] = data['Memory (GB or TB)'].apply(
lambda x: 2048 if x == 2 else x)
data.rename(columns={'Memory (GB or TB)': 'Storage (GB)'}, inplace=True)
data["Weight"] = data["Weight"].str.replace("kg", "")
# Extract GPU Vender, GPU Type in Different Columns
gpu_distribution_list = data["Gpu"].str.split(" ")
data["Gpu_Vender"] = data["Gpu"].str.split(" ").apply(lambda x: x[0])
data["Gpu_Type"] = data["Gpu"].str.split(" ").apply(lambda x: x[1:])
data["Gpu_Type"] = data["Gpu_Type"].apply(lambda x: ' '.join(x))
# Extract IPS and Touchscreen Feature form ScreenResolution Column
data['Touchscreen'] = data['ScreenResolution'].apply(
lambda x: 1 if 'Touchscreen' in x else 0)
data['Ips'] = data['ScreenResolution'].apply(lambda x: 1 if 'IPS' in x else 0)
# Catorizing The Operating System
def cat_os(inp):
if inp == 'Windows 10' or inp == 'Windows 7' or inp == 'Windows 10 S':
return 'Windows'
elif inp == 'macOS' or inp == 'Mac OS X':
return 'MacOS'
else:
return 'Others/No OS/Linux'
data['OpSys'] = data['OpSys'].apply(cat_os)
# Fetching Out The Use Full Columns the Leaving The Rest
data = data.reindex(
columns=[
"Company",
"TypeName",
"Inches",
"Touchscreen",
"Ips",
"Cpu_Vender",
"Cpu_Type",
"Ram",
"Storage (GB)",
"Storage Type",
"Gpu_Vender",
"Gpu_Type",
"Weight",
"OpSys",
"Price_euros"])
# Transforming the Data Type of some of the Columns: Ram Storage Weight
data["Ram"] = data["Ram"].astype("int")
data["Storage (GB)"] = data["Storage (GB)"].astype("int")
data["Weight"] = data["Weight"].astype("float")
# Split the data
X = data.drop(columns=['Price_euros'])
y = data['Price_euros']
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.15, random_state=2)
n_estimators = int(sys.argv[1])
max_samples = float(sys.argv[2])
max_features = float(sys.argv[3])
max_depth = None if sys.argv[4] == 'None' else int(sys.argv[4])
with mlflow.start_run():
Transformer = ColumnTransformer(
transformers=[
('col_tnf', OneHotEncoder(
sparse=False, handle_unknown='ignore'), [
0, 1, 5, 6, 9, 10, 11, 13])], remainder='passthrough')
Regressor = RandomForestRegressor(
n_estimators=n_estimators,
max_samples=max_samples,
max_features=max_features,
max_depth=max_depth)
pipe = Pipeline([
('transformer', Transformer),
('Regressor', Regressor)
])
pipe.fit(X_train, y_train)
y_pred = pipe.predict(X_test)
r2_score = r2_score(y_test, y_pred)
mean_absolute_error = mean_absolute_error(y_test, y_pred)
mlflow.log_param("n_estimators", n_estimators)
mlflow.log_param("max_samples", max_samples)
mlflow.log_param("max_features", max_features)
mlflow.log_param("max_depth", max_depth)
mlflow.log_metric("r2_score", r2_score)
mlflow.log_metric("mae", mean_absolute_error)
mlflow.sklearn.log_model(pipe, "Predictor")
print('R2 score', r2_score)
print('MAE', mean_absolute_error)
| ysfesr/Laptop-Price-Prediction | Project/train.py | train.py | py | 4,822 | python | en | code | 3 | github-code | 13 |
30567836553 | import random
import Characters
import PlayerStrings
import BossStrings
import DropStrings
import CharactersGenerator
import InteractionParameters
import FightCycle
import FightStrings
class BossList():
list_easy = [BossStrings.Palich.name, BossStrings.Chaikovskii.name,
BossStrings.Viv.name, BossStrings.Sasha.name, BossStrings.Tvar.name,
BossStrings.Randomich.name, BossStrings.Kitty.name]
list_medium = [BossStrings.Inkvisizia.name, BossStrings.DocLeha.name,
BossStrings.DrunkLeha.name, BossStrings.Mel.name,
BossStrings.Redhead.name, BossStrings.Sledovatel.name]
list_hard = [BossStrings.Doner.name, BossStrings.BlackStas.name,
BossStrings.Dron.name, BossStrings.Glad.name, BossStrings.Shiva.name]
def boss_difficult_choice(win_rate):
global boss_name
if win_rate < 3:
boss_name = random.choice(BossList.list_easy)
BossList.list_easy.remove(boss_name)
elif win_rate < 6 and win_rate >= 3:
boss_name = random.choice(BossList.list_medium)
BossList.list_medium.remove(boss_name)
elif win_rate < 9 and win_rate >= 6:
boss_name = random.choice(BossList.list_hard)
BossList.list_hard.remove(boss_name)
elif win_rate == 9:
boss_name = [BossStrings.Makar.name]
def boss_prelude_skill_activation(boss_name):
# активация способности босса до боя
global prelude_skill_message
prelude_skill_message = False
# Палыч
if boss_name == BossStrings.Palich.name:
CharactersGenerator.player.silence = True
prelude_skill_message = BossStrings.Palich.prelude_skill
# Рыжий
elif boss_name == BossStrings.Redhead.name:
CharactersGenerator.player.poison = True
prelude_skill_message = BossStrings.Redhead.prelude_skill
# Следователь
elif boss_name == BossStrings.Sledovatel.name:
drugs = DropStrings.Buffs.marki_name, \
DropStrings.Items.madam_name, \
DropStrings.Items.shiga_name
cross_check = [x for x in drugs if x in CharactersGenerator.player.all_items]
# снижение урона игрока, если его больше 500
if CharactersGenerator.player.damage > 500:
CharactersGenerator.player.damage_down_procent \
(InteractionParameters.Boss.sledovatel_damage_down)
prelude_skill_message = BossStrings.Sledovatel.damage_down_skill()
# увеличение накопительной способности босса при заданных условиях
if CharactersGenerator.player.mitya_elexir_count > 0 or len(cross_check) > 0:
CharactersGenerator.player.police_wanted += \
InteractionParameters.Boss.sledovatel_drugs
prelude_skill_message += '\n' + BossStrings.Sledovatel.drugs_check()
elif boss_name == BossStrings.Dron.name:
CharactersGenerator.boss.skill_meter_level += \
len(CharactersGenerator.player.all_items) * 5
prelude_skill_message = BossStrings.Dron.bratishki_interaction()
if DropStrings.Buffs.dron_meat_name in CharactersGenerator.player.all_items:
CharactersGenerator.boss.skill_meter_level += \
CharactersGenerator.boss.skill_meter_level_up
prelude_skill_message += '\n' + BossStrings.Dron.dron_meat_interaction()
elif boss_name == BossStrings.Doner.name and \
DropStrings.Buffs.everlast_name in CharactersGenerator.player.all_items:
CharactersGenerator.boss.health_up_procent(InteractionParameters.Boss.doner_everlast)
CharactersGenerator.boss.damage_up_procent(InteractionParameters.Boss.doner_everlast)
prelude_skill_message = BossStrings.Doner.everlast_interaction()
elif boss_name == BossStrings.BlackStas.name and \
CharactersGenerator.player.name == PlayerStrings.Mitya.name:
CharactersGenerator.boss.damage_up(CharactersGenerator.player.mitya_elexir_count *
CharactersGenerator.player.mitya_damage_up_skill_value)
prelude_skill_message = BossStrings.BlackStas.mitya_interaction
def chance(x):
# генератор вероятности
chance = random.randint(1, 100) in range(1, x)
return chance
def bleeding():
# проверка игрока на кровотечение и отсутствие иммунитета к нему, применение кровотечения
if CharactersGenerator.player.bleeding == True and \
CharactersGenerator.player.immunity == False:
CharactersGenerator.player.health_down(Characters.Pers.bleeding_damage)
FightCycle.Attack_messages.messages_pool.append(FightStrings.Banners.bleeding(True))
# проверка босса на кровотечение, его применение и вывод сообщения
if CharactersGenerator.boss.bleeding == True:
CharactersGenerator.boss.health_down(Characters.Pers.bleeding_damage)
FightCycle.Attack_messages.messages_pool.append(FightStrings.Banners.bleeding(False))
def poison():
# проверка игрока на отравление и отсутствие иммунитета к нему, применение, вывод сообщения
if CharactersGenerator.player.poison == True and \
CharactersGenerator.player.immunity == False:
CharactersGenerator.player.health_down_procent(Characters.Pers.poison_damage)
FightCycle.Attack_messages.messages_pool.append(FightStrings.Banners.poison(True))
# проверка босса на отравление, применение, вывод сообщения
if CharactersGenerator.boss.poison == True:
CharactersGenerator.boss.health_down_procent(Characters.Pers.poison_damage)
FightCycle.Attack_messages.messages_pool.append(FightStrings.Banners.poison(False))
# увеличение на 10% урона отравления на время боя
Characters.Pers.poison_damage += 10
def regeneration():
# проверка игрока на регенерацию, применение, вывод сообщения
if CharactersGenerator.player.regeneration > 0:
CharactersGenerator.player.health_up(Characters.Pers.regeneration_value)
FightCycle.Attack_messages.messages_pool.append(FightStrings.Banners.regeneration(True))
# проверка босса на регенерацию, применение, вывод сообщения
if CharactersGenerator.boss.regeneration > 0:
CharactersGenerator.boss.health_up(Characters.Pers.regeneration_value)
FightCycle.Attack_messages.messages_pool.append(FightStrings.Banners.regeneration(False))
def lifesteal():
global lifesteal_heal
# проверка на вампиризм игрока, применение эффекта
if CharactersGenerator.player.lifesteal > 0:
lifesteal_heal = FightCycle.player_attack_damage * \
CharactersGenerator.player.lifesteal // 100
CharactersGenerator.player.health_up(lifesteal_heal)
FightCycle.Attack_messages.messages_pool.append(FightStrings.Banners.lifesteal())
def boss_returnal():
global returnal_damage
# проверка на обратку босса, применение эффекта
if CharactersGenerator.boss.returnal_value > 0:
returnal_damage = FightCycle.player_attack_damage * \
CharactersGenerator.boss.returnal_value // 100
CharactersGenerator.player.health_down(returnal_damage)
FightCycle.Attack_messages.messages_pool.append(FightStrings.Banners.returnal(False)) | AssBurger69/BurgerGame | FightFunctions.py | FightFunctions.py | py | 7,667 | python | en | code | 0 | github-code | 13 |
9255633778 | from tkinter import *
from admin import Admin
from admin_gui import admin_gui
from manager import Manager
from managergui import Manager_Gui
from user import User
from usergui import User_Gui
class System_Gui(Tk):
def __init__(self, estate_system):
super().__init__()
self.estate_system = estate_system
self.title("Login")
self.configure(bg="#eee",
height=500,
width=500)
self.add_username_label()
self.add_username_entry()
self.add_submit_button()
def add_username_label(self):
self.username_label = Label()
self.username_label.configure(text="Enter Username")
self.username_label.pack()
def add_username_entry(self):
self.username_entry = Entry()
self.username_entry.pack()
def add_submit_button(self):
self.submit_button = Button()
self.submit_button.configure(text="Submit")
self.submit_button.pack()
self.submit_button.bind("<ButtonRelease-1>", self.submit_button_clicked)
def submit_button_clicked(self, event):
un = self.username_entry.get()
for user in self.estate_system.users:
if user.username == un:
if isinstance(user, Admin):
self.destroy()
self.estate_system.current_user = user
admingui = admin_gui(self.estate_system)
admingui.mainloop()
elif isinstance(user, Manager):
self.destroy()
self.estate_system.current_user = user
managergui = Manager_Gui(self.estate_system)
managergui.mainloop()
elif isinstance(user, User):
self.destroy()
self.estate_system.current_user = user
usergui = User_Gui(self.estate_system)
usergui.mainloop()
| cwilson98/projects | Estate Management System/systemgui.py | systemgui.py | py | 1,970 | python | en | code | 0 | github-code | 13 |
16502343137 | """
1. 엑셀 데이터 읽고 쓰기
2. 엑셀 데이터 편집하기
3. 엑셀 데이터 출력하기
4. XML 기상청 날씨 데이터 지역별 파싱 및 출력
5. 본인 거주 지역 날씨 정보 XML 파싱 및 출력
"""
import xml.etree.ElementTree as ET
from urllib.request import urlopen
from bs4 import BeautifulSoup
import numpy as np
import pandas as pd
# 기상청 URL
URL = 'http://www.weather.go.kr/weather/lifenindustry/sevice_rss.jsp'
html = urlopen(URL)
bs = BeautifulSoup(html,'html.parser')
province = [] # 각 지방의 날씨 정보를 담는 변수이다.(DataFrame type의 변수들이 담긴다.)
# 한 도시의 날씨 정보를 출력한다.
def residence_weather(city):
info = pd.read_excel(r'weather.xlsx')
match_col = []
# enumerate : 리스트가 있는 경우 순서와 리스트의 값을 전달해주는 함수이다.
# 주로 for문과 자주 사용된다.
# 도시명이 매개변수 'city'와 일치하는 행의 index를 match_col에 저장한다.
for index,data in enumerate(info['도시명']):
if(city == str(data)):
match_col.append(index)
# new_datas 리스트에 match_col 행에 값을 저정한다.
# iloc : 행 번호를 기준으로 행 데이터 읽기
# loc : 인덱스 기준으로 행 데이터 일기
new_datas = info.iloc[match_col] # 각 인덱스에 해당하는 데이터를 읽어서 저장한다.
# 읽은 데이터들을 새로운 엑셀 파일로 저장한다.
new_datas.to_excel('./'+city+'_weather.xlsx',sheet_name='Sheet1',index=False)
# 한 도시의 날씨 정보를 얻는다.
def getInfo(city_info):
header = ['시/도','도시명','날짜','날씨','최저 기온','최고 기온','강수 확률']
weather = []
province_name = city_info.find('province')
city_name = city_info.find('city')
data = city_info.find_all('data')
for o in data:
city_detail = []
city_detail.append(province_name.text)
city_detail.append(city_name.text)
cd = o.text.strip().split('\n')
for i in range(1,7):
if i != 5:
city_detail.append(cd[i])
# print(city_detail,sep='\n')
weather.append(city_detail) # 한 날짜의 날씨 정보 리스트를 삽입
return pd.DataFrame(weather,columns=header) # 한 도시의 날씨 정보리스트를 반환
def city_weather(url):
# 해당 지역 xml 파싱.
global province
city = urlopen(url)
xml = BeautifulSoup(city,'lxml')
# 날짜(오전, 오후), 날씨, 최저 기온, 최고 기온, 강수 확률
city_info = xml.find_all('location')
for i in city_info:
province.append(getInfo(i)) # province 배열에 도시별로 날씨정보를 저장하자.(dataframe type)
# 3. 엑셀로 정리된 지역별 날씨를 출력하는 함수이다.
def print_weather():
info = pd.read_excel('weather.xlsx')
# 모든 정보를 출력한다.
print(info)
# 지역의 날씨 중에서 도시명, 날짜 최저 기온, 최고 기온만 출력한다.
ex_info = info[['도시명','날짜','최저 기온','최고 기온']]
print(ex_info)
# 엑셀 파일을 편집하는 함수이다.
def edit_excel():
df = pd.read_excel('weather.xlsx')
# 날짜를 추출하여 새로운 열을 추가한다.
df['일'] = df['날짜'].str.slice(start=8,stop=10)
df.head()
df.to_excel('edit_weather.xlsx')
# 해당 지역의 날씨 정보를 불러와 엑셀의 한 시트로 저장하기.
"""
for i in range(2,11):
city_xml = bs.find('a',{'id':'dfs_rss'+str(i)}).get('href')
city_weather(city_xml) # 시도군별 링크 전송
# DataFrame type의 각 지역의 날씨정보들을 모두 합친다.
df = pd.concat(province, axis=0,ignore_index=True)
# 하나의 엑셀파일로 만든다.
df.to_excel('weather.xlsx',index=False)
name = input('거주지를 입력하세요 : ')
residence_weather(name)
print_weather()
"""
edit_excel()
| So-chankyun/Crawling_Study | week3/extract_weather.py | extract_weather.py | py | 4,088 | python | ko | code | 0 | github-code | 13 |
35128720979 | class Solution:
def increasingTriplet(self, nums: List[int]) -> bool:
# https://leetcode.com/problems/increasing-triplet-subsequence/discuss/79004/Concise-Java-solution-with-comments.
# linear scan
# time O(n)
# space O(1)
n = len(nums)
if n < 3: return False
first = second = math.inf
for num in nums:
if num <= first: first = num
elif num <= second: second = num
else: return True
return False
| aakanksha-j/LeetCode | 334. Increasing Triplet Subsequence/linear_scan_constant_space_1.py | linear_scan_constant_space_1.py | py | 565 | python | en | code | 0 | github-code | 13 |
5551584371 | array = list(map (int, input("Введите массив:").split()))
delta = input("Введите delta:")
try:
delta = int(delta)
except ValueError:
print("Ошибка")
exit()
c = abs(delta)
a = min(array)
b = len([x for x in array if x == a + c])
print(b) | maaar18/task6 | 1.py | 1.py | py | 271 | python | ru | code | 0 | github-code | 13 |
3139265197 | import os
from django_template.setting_basic import BASE_DIR
DEBUG = False
# -- add --
import logging
import django.utils.log
import logging.handlers
# -- modify --
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'standard': {
'format': '%(asctime)s %(levelname)s %(pathname)s %(filename)s %(module)s %(funcName)s %(lineno)d: %(message)s'
},
'simple': {
'format': '%(asctime)s %(levelname)s : %(message)s'
},
# 日志格式
},
'filters': {
},
'handlers': {
'default': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': 'log/all.log', # 日志输出文件
'maxBytes': 50000 * 1024,
'backupCount': 20,
'formatter': 'simple',
},
'request_handler': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': 'log/request.log',
'maxBytes': 50000 * 1024,
'backupCount': 20,
'formatter': 'simple',
},
'error': {
'level': 'ERROR',
'class': 'logging.handlers.RotatingFileHandler',
'filename': 'log/error.log',
'maxBytes': 50000 * 1024,
'backupCount': 20,
'formatter': 'standard',
},
'console': {
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'mydb': {
'level': 'DEBUG',
'class': 'apps.user.handlers.DatabaseHandler',
},
},
'loggers': {
'django': {
'handlers': ['console', 'default', 'mydb'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),
'propagate': True
},
'django.security.DisallowedHost': {
'handlers': ['error', 'mydb'],
'propagate': False,
},
'django.request': {
'handlers': ['error'],
'level': 'WARN',
'propagate': False
},
'django.print': {
'handlers': ['console', 'error'],
'level': 'DEBUG',
'propagate': False
},
'log_request': {
'handlers': ['request_handler'],
'level': 'DEBUG'
}
}
}
ALLOWED_HOSTS = ['*']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'django',
'USER': 'django',
'PASSWORD': 'django_template',
'HOST': '192.168.1.35',
'PORT': '9306',
},
}
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "collectedstatic")
SESSION_SAVE_EVERY_REQUEST = True
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
SESSION_COOKIE_AGE = 7200
| mo891916/django_template | django_template/setting_prod.py | setting_prod.py | py | 2,808 | python | ar | code | 1 | github-code | 13 |
38796447020 | import pandas as pd
import numpy as np
np.random.seed(2)
from sklearn.model_selection import train_test_split
from keras.utils.np_utils import to_categorical
from keras.preprocessing.image import ImageDataGenerator
def normalize_data(X_train, test):
# Transforming the data, that is of range [0..255],
# to [0..1]
X_train = X_train / 255.0
test = test / 255.0
return X_train, test
def reshape_data(X_train, test):
"""
Reshape the images in 3 dimensions
28px height, 28px width and 1 color channel
"""
X_train = X_train.values.reshape(-1, 28, 28, 1)
test = test.values.reshape(-1, 28, 28, 1)
return X_train, test
def split_train_and_validation_data(X_train, Y_train):
random_seed = 2
return train_test_split(
X_train, Y_train, test_size=0.1, random_state=random_seed
)
train = pd.read_csv('data/raw/train.csv')
test = pd.read_csv('data/raw/test.csv')
# getting the label (the "response") from the train data
Y_train = train['label']
# label encoding
Y_train = to_categorical(Y_train, num_classes=10)
# removing the label from the train data
X_train = train.drop(labels=["label"], axis=1)
X_train, test = normalize_data(X_train, test)
X_train, test = reshape_data(X_train, test)
X_train, X_val, Y_train, Y_val = split_train_and_validation_data(
X_train, Y_train
)
np.savez(
'data/processed/processed_data',
X_train=X_train,
X_val=X_val,
Y_train=Y_train,
Y_val=Y_val
)
| lievi/cnn_tutorial | src/data/process_mnist.py | process_mnist.py | py | 1,475 | python | en | code | 0 | github-code | 13 |
73704427857 | from django.urls import path, include
from rest_framework.routers import DefaultRouter
from record import views
app_name = 'record'
class CustomDefaultRouter(DefaultRouter):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.trailing_slash = '/?'
router = CustomDefaultRouter()
router.register('emotions', views.RecordViewSet)
router.register('insights', views.InsightViewSet)
#name field is the name for reverse function to map
urlpatterns = [
path('', include(router.urls))
] | emilylu123/MindSpace_Angular_Django | MindSpaceApi/app/record/urls.py | urls.py | py | 532 | python | en | code | 1 | github-code | 13 |
8630176581 | from Training.util.binary import dc,assd
import os
import numpy as np
from Training.data_process.data_process_func import load_nifty_volume_as_array
def one_hot(img, nb_classes):
hot_img = np.zeros([nb_classes]+list(img.shape))
for i in range(nb_classes):
hot_img[i][np.where(img == i)] = 1
return hot_img
def evaluation(folder, evaluate_dice, evaluate_assd):
patient_list = os.listdir(folder)
dice_all_data = []
assd_all_data = []
for patient in patient_list:
s_name = os.path.join(folder, patient + '/label.npy')
g_name = os.path.join(folder, patient + '/InterSeg.nii.gz')
s_volume = np.int64(np.load(s_name))
g_volume = load_nifty_volume_as_array(g_name)
s_volume = one_hot(s_volume, nb_classes=5)
g_volume = one_hot(g_volume, nb_classes=5)
if evaluate_dice:
dice=[]
for i in range(5):
dice.append(dc(g_volume[i], s_volume[i]))
dice_all_data.append(dice)
print(patient, dice)
if evaluate_assd:
Assd = []
for i in range(5):
Assd.append(assd(g_volume[i], s_volume[i]))
assd_all_data.append(Assd)
print(patient, Assd)
if evaluate_dice:
dice_all_data = np.asarray(dice_all_data)
dice_mean = [dice_all_data.mean(axis = 0)]
dice_std = [dice_all_data.std(axis = 0)]
np.savetxt(folder + '/dice_all.txt', dice_all_data)
np.savetxt(folder + '/dice_mean.txt', dice_mean)
np.savetxt(folder + '/dice_std.txt', dice_std)
print('dice mean ', dice_mean)
print('dice std ', dice_std)
if evaluate_assd:
assd_all_data = np.asarray(assd_all_data)
assd_mean = [assd_all_data.mean(axis = 0)]
assd_std = [assd_all_data.std(axis = 0)]
np.savetxt(folder + '/dice_all.txt', assd_all_data)
np.savetxt(folder + '/dice_mean.txt', assd_mean)
np.savetxt(folder + '/dice_std.txt', assd_std)
print('assd mean ', assd_mean)
print('assd std ', assd_std)
evaluate_dice=False
evaluate_assd=True
if __name__ =='__main__':
folder = '/lyc/Head-Neck-CT/3D_data/valid'
evaluation(folder, evaluate_dice, evaluate_assd)
| HiLab-git/SepNet | util/visualization/evalution.py | evalution.py | py | 2,262 | python | en | code | 18 | github-code | 13 |
11642118382 | import json
import pytest
from unittest.mock import patch, mock_open, MagicMock
from checkout_and_payment import checkoutAndPayment, update_users_json, products
@pytest.fixture
def mock_open_users_file():
"""Fixture to mock opening of the users file with predefined user data."""
users = [{"username": "user1", "wallet": 100}, {"username": "user2", "wallet": 200}]
return mock_open(read_data=json.dumps(users))
@pytest.fixture
def mock_input(mocker):
"""Fixture to mock user input."""
return mocker.patch('builtins.input')
@pytest.fixture
def mock_check_cart(mocker):
"""Fixture to mock cart checking functionality."""
return mocker.patch('checkout_and_payment.check_cart', return_value=True)
@pytest.fixture
def mock_logout(mocker):
"""Fixture to mock logout functionality."""
return mocker.patch('checkout_and_payment.logout', return_value=True)
@pytest.fixture
def mock_update_users_json(mocker):
"""Fixture to mock update_users_json function."""
return mocker.patch('checkout_and_payment.update_users_json')
def capture_write_calls(mock_file):
"""Helper function to capture write calls to a mock file."""
content = []
original_write = mock_file.write
def side_effect_write(data):
content.append(data)
return original_write(data)
mock_file.write = MagicMock(side_effect=side_effect_write)
return content
def test_update_users_json_existing_user(mock_open_users_file):
"""Test updating an existing user's wallet amount in the JSON."""
with patch("builtins.open", mock_open_users_file) as mock_file:
content = capture_write_calls(mock_file())
update_users_json("user1", 150)
assert json.loads(''.join(content)) == [{"username": "user1", "wallet": 150}, {"username": "user2", "wallet": 200}]
def test_update_users_json_new_user(mock_open_users_file):
"""Test adding a new user to the JSON."""
with patch("builtins.open", mock_open_users_file) as mock_file:
content = capture_write_calls(mock_file())
update_users_json("new_user", 300)
assert json.loads(''.join(content)) == [{"username": "user1", "wallet": 100}, {"username": "user2", "wallet": 200}, {"username": "new_user", "wallet": 300}]
def test_update_users_json_exceptions():
"""Test the behavior of update_users_json with invalid input or file."""
with patch("builtins.open", mock_open(read_data="not valid json")), pytest.raises(ValueError):
update_users_json("user1", 150)
with pytest.raises(FileNotFoundError):
update_users_json("user1", 150, "nonexistent_file.json")
@pytest.mark.parametrize("invalid_login_info", [
"invalid_string", 12345, 4.56,
{"username": "testuser"}, {"wallet": 100},
{"user": "testuser", "wallet": 100}
])
def test_checkout_and_payment_invalid_login_info(invalid_login_info, mock_input, mock_check_cart, mock_logout, mock_update_users_json):
"""Test checkoutAndPayment with various invalid login info formats."""
with pytest.raises(TypeError):
checkoutAndPayment(invalid_login_info)
@pytest.mark.parametrize("mock_input_value, expected_output", [
(['l'], "You have been logged out."),
(['c', 'l'], "You have been logged out."),
(['1', 'c', 'l'], "Apple added to your cart."),
([str(len(products) + 1), 'l'], "\nInvalid input. Please try again."),
(['apple', 'l'], "\nInvalid input. Please try again."),
(['0.75', 'l'], "\nInvalid input. Please try again."),
(['[]', 'l'], "\nInvalid input. Please try again.")
])
def test_checkout_and_payment_scenarios(mock_input_value, expected_output, mock_input, mock_check_cart, mock_logout, mock_update_users_json, capsys):
"""Test various scenarios in checkoutAndPayment based on different user inputs."""
mock_input.side_effect = mock_input_value
checkoutAndPayment({"username": "testuser", "wallet": 100})
captured = capsys.readouterr()
assert expected_output in captured.out
mock_logout.assert_called_once()
def test_checkout_and_payment_print_products(mock_input, mock_check_cart, mock_logout, mock_update_users_json, capsys):
"""Test that all products are printed correctly in the checkout process."""
mock_input.side_effect = ['l']
checkoutAndPayment({"username": "testuser", "wallet": 100})
captured = capsys.readouterr()
for product in products:
assert f"{product.name} - ${product.price}" in captured.out
mock_logout.assert_called_once()
def test_checkout_and_payment_session_management(mock_input, mock_check_cart, mock_logout, mock_update_users_json, capsys):
"""Test the management of user session in repeated calls of checkoutAndPayment."""
mock_input.side_effect = ['1', 'l', '2', 'l']
user_info = {"username": "testuser", "wallet": 100}
# Test with two consecutive calls to simulate user session
checkoutAndPayment(user_info)
checkoutAndPayment(user_info)
captured = capsys.readouterr()
assert captured.out.count("You have been logged out.") == 2
assert mock_logout.call_count == 2
| guritaalexandru/SoftwareTestingA1 | A1_unit_testing_students/test_checkout_and_payment.py | test_checkout_and_payment.py | py | 5,064 | python | en | code | 0 | github-code | 13 |
43351515673 | import sys
import heapq
right_left = [(0, 1), (0, -1)]
up_down = [(-1, 0), (1, 0)]
n, m = map(int, sys.stdin.readline().split())
matrix = list()
for _ in range(n):
matrix.append(list(map(int, sys.stdin.readline().split())))
d = int(sys.stdin.readline())
item_list = list()
for _ in range(d):
a, b = map(int, sys.stdin.readline().split())
item_list.append((a-1, b-1))
start_x = 0
start_y = 0
answer = matrix[0][0]
for a, b in item_list:
save_matrix = [[sys.maxsize for _ in range(m)] for _ in range(n)]
queue = list()
heapq.heappush(queue, (0, start_x, start_y))
save_matrix[start_x][start_y] = 0
while queue:
damage, x, y = heapq.heappop(queue)
if damage > save_matrix[x][y]:
continue
if x == a and y == b:
answer += damage
break
else:
for xi, yi in right_left:
new_x = x + xi
new_y = y + yi
if 0 <= new_x < n and 0 <= new_y < m:
if damage + matrix[new_x][new_y] < save_matrix[new_x][new_y]:
save_matrix[new_x][new_y] = damage + matrix[new_x][new_y]
heapq.heappush(queue, (damage + matrix[new_x][new_y], new_x, new_y))
if y == 0 or y == m-1:
for xi, yi in up_down:
new_x = x + xi
new_y = y + yi
if 0 <= new_x < n and 0 <= new_y < m:
if damage + matrix[new_x][new_y] < save_matrix[new_x][new_y]:
save_matrix[new_x][new_y] = damage + matrix[new_x][new_y]
heapq.heappush(queue, (damage + matrix[new_x][new_y], new_x, new_y))
start_x = a
start_y = b
print(answer) | W00SUNGLEE/codingmasters | 4263/4263.py | 4263.py | py | 1,784 | python | en | code | 0 | github-code | 13 |
72943534418 | # Write a program that reads a single string with
# numbers separated by comma and space ", ". Print the indices of all even numbers.
string = input().split(", ")
# с лист компрехеншан
int_as_str_1 = [int(i) for i in string]
# с мап
int_as_str = list(map(int, string))
# с лист компрехеншан
filter1 = [index for index in range(len(int_as_str_1)) if int_as_str[index] % 2 == 0]
# с филтър ламбда и мап
filter = list(map(lambda el: int_as_str.index(el), list(filter(lambda el: el % 2 == 0, int_as_str))))
print(filter1)
print(filter)
| Andon-ov/Python-Fundamentals | 13_lists_advanced_lab/05_even_numbers.py | 05_even_numbers.py | py | 592 | python | en | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.