hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringdate 2015-01-01 00:00:47 2022-03-31 23:42:18 ⌀ | max_issues_repo_issues_event_max_datetime stringdate 2015-01-01 17:43:30 2022-03-31 23:59:58 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
282ccb40eb876af8551ca4938f1b672da9c20208 | 1,189 | py | Python | helpers.py | AHS-Open-Sorcery/HTNE-Project | c6caf57f1e89302c06ef0a84ddb83c645274d183 | [
"MIT"
] | null | null | null | helpers.py | AHS-Open-Sorcery/HTNE-Project | c6caf57f1e89302c06ef0a84ddb83c645274d183 | [
"MIT"
] | null | null | null | helpers.py | AHS-Open-Sorcery/HTNE-Project | c6caf57f1e89302c06ef0a84ddb83c645274d183 | [
"MIT"
] | null | null | null | from data_retrieval import *
import emailer
from login.config import Config
from sentiment_analysis import *
import tweepy
import json
def send_email(user_id, post_id): # Note: can determine user_id from post_id
try:
emailer.send_email(Config.EMAIL_SENDER, get_email(user_id), get_post_content(post_id), Config.EMAIL_PASSWORD)
except:
print("Unable to send email to user {}".format(user_id))
pass
def notify_users():
users = get_all_users()
for user in users:
posts = get_expired_posts(user[0])
for post in posts:
send_email(user[0], post[0])
def postTweet(user_id, post_id):
tokens = query(access_accounts(), "SELECT token FROM flask_dance_oauth WHERE id=?;", (user_id, ))
x = json.loads(tokens[0][0])
auth = tweepy.OAuthHandler(Config.TWITTER_OAUTH_CLIENT_KEY, Config.TWITTER_OAUTH_CLIENT_SECRET)
auth.set_access_token(x["oauth_token"], x["oauth_token_secret"])
api = tweepy.API(auth)
api.update_status(get_post_content(post_id))
resolve_post(post_id)
def time_to_ms(time):
return time.timestamp() * 1000
def ms_to_time(ms):
return datetime.fromtimestamp(ms / 1000)
| 27.022727 | 117 | 0.710681 |
282cfc4e7936eaf93017c54214756f8505acd57a | 3,291 | py | Python | attack_metrics/mr.py | asplos2020/DRTest | c3de497142d9b226e518a1a0f95f7350d2f7acd6 | [
"MIT"
] | 1 | 2021-04-01T07:31:17.000Z | 2021-04-01T07:31:17.000Z | attack_metrics/mr.py | Justobe/DRTest | 85c3c9b2a46cafa7184130f2596c5f9eb3b20bff | [
"MIT"
] | null | null | null | attack_metrics/mr.py | Justobe/DRTest | 85c3c9b2a46cafa7184130f2596c5f9eb3b20bff | [
"MIT"
] | 1 | 2020-12-24T12:12:54.000Z | 2020-12-24T12:12:54.000Z | """
This tutorial shows how to generate adversarial examples using FGSM
and train a model using adversarial training with TensorFlow.
It is very similar to mnist_tutorial_keras_tf.py, which does the same
thing but with a dependence on keras.
The original paper can be found at:
https://arxiv.org/abs/1412.6572
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
import numpy as np
import tensorflow as tf
from scipy.misc import imsave, imread
from tensorflow.python.platform import flags
sys.path.append("../")
from nmutant_data.mnist import data_mnist
from nmutant_data.cifar10 import data_cifar10
from nmutant_data.svhn import data_svhn
from nmutant_util.utils_tf import model_argmax, model_prediction
from nmutant_model.model_operation import model_load
from nmutant_attack.attacks import FastGradientMethod
from nmutant_util.utils_imgproc import deprocess_image_1, preprocess_image_1, deprocess_image_1
from nmutant_data.data import get_data, get_shape
from nmutant_util.utils import batch_indices
from nmutant_util.utils_file import get_data_file
import time
import math
FLAGS = flags.FLAGS
def mr(datasets, model_name, attack, va, epoch=49):
"""
:param datasets
:param sample: inputs to attack
:param target: the class want to generate
:param nb_classes: number of output classes
:return:
"""
tf.reset_default_graph()
X_train, Y_train, X_test, Y_test = get_data(datasets)
input_shape, nb_classes = get_shape(datasets)
sample=X_test
sess, preds, x, y, model, feed_dict = model_load(datasets, model_name, epoch=epoch)
probabilities = model_prediction(sess, x, preds, sample, feed=feed_dict, datasets=datasets)
if sample.shape[0] == 1:
current_class = np.argmax(probabilities)
else:
current_class = np.argmax(probabilities, axis=1)
# only for correct:
acc_pre_index=[]
for i in range(0, sample.shape[0]):
if current_class[i]==np.argmax(Y_test[i]):
acc_pre_index.append(i)
print(len(acc_pre_index))
sess.close()
total=0
if attack=='fgsm':
samples_path='../adv_result/'+datasets+'/'+attack+'/'+model_name+'/'+str(va)
[image_list, image_files, real_labels, predicted_labels] = get_data_file(samples_path)
num=len(image_list)
return num/len(acc_pre_index)
else:
total=0
for tar in range(0,nb_classes):
samples_path='../adv_result/'+datasets+'/'+attack+'/'+model_name+'/'+str(va)+'_'+str(tar)
[image_list, image_files, real_labels, predicted_labels] = get_data_file(samples_path)
total+=len(image_list)
return total/len(acc_pre_index)
def main(argv=None):
result=mr(datasets='cifar10',
model_name='vgg11',
attack='cw',
va=0.1)
print(result)
if __name__ == '__main__':
flags.DEFINE_string('datasets', 'mnist', 'The target datasets.')
#flags.DEFINE_string('sample', '../datasets/integration/mnist/0.png', 'The path to load sample.')
flags.DEFINE_string('model', 'lenet4', 'The name of model.')
flags.DEFINE_string('attack', 'fgsm', 'step size of fgsm')
tf.app.run()
| 33.242424 | 101 | 0.717107 |
282d70a51f36fc140a29300e30c60da47fb2411b | 7,434 | py | Python | detect_deeplabv3plus_ascend.py | jackhanyuan/deeplabv3plus-ascend | 817006a4514257aa8cd07d752b70bbff9709ba9f | [
"Apache-2.0"
] | null | null | null | detect_deeplabv3plus_ascend.py | jackhanyuan/deeplabv3plus-ascend | 817006a4514257aa8cd07d752b70bbff9709ba9f | [
"Apache-2.0"
] | null | null | null | detect_deeplabv3plus_ascend.py | jackhanyuan/deeplabv3plus-ascend | 817006a4514257aa8cd07d752b70bbff9709ba9f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# by [jackhanyuan](https://github.com/jackhanyuan) 07/03/2022
import argparse
import copy
import glob
import os
import re
import sys
import time
from pathlib import Path
import cv2
import acl
import torch
import numpy as np
from PIL import Image
import torch.nn.functional as F
from acl_net import check_ret, Net
FILE = Path(__file__).resolve()
ROOT = FILE.parents[0] # Root directory
if str(ROOT) not in sys.path:
sys.path.append(str(ROOT)) # add ROOT to PATH
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
IMG_EXT = ('.bmp', '.dib', '.png', '.jpg', '.jpeg', '.pbm', '.pgm', '.ppm', '.tif', '.tiff')
def cvtColor(image):
if len(np.shape(image)) == 3 and np.shape(image)[2] == 3:
return image
else:
image = image.convert('RGB')
return image
def resize_image(image, size):
iw, ih = image.size
w, h = size
scale = min(w / iw, h / ih)
nw = int(iw * scale)
nh = int(ih * scale)
image = image.resize((nw, nh), Image.BICUBIC)
new_image = Image.new('RGB', size, (128, 128, 128))
new_image.paste(image, ((w - nw) // 2, (h - nh) // 2))
return new_image, nw, nh
def preprocess_input(image):
image /= 255.0
return image
def detect_image(image, num_classes, input_shape=(512, 512), fp16=False):
image = cvtColor(image)
image_h, image_w = image.size
org_img = copy.deepcopy(image)
dtype = np.float16 if fp16 else np.float32
img_data, nw, nh = resize_image(image, (input_shape[1], input_shape[0]))
img_data = np.expand_dims(np.transpose(preprocess_input(np.array(img_data, dtype)), (2, 0, 1)), 0)
if fp16:
img_data = img_data.astype("float16")
img_bytes = np.frombuffer(img_data.tobytes(), dtype)
result = net.run([img_bytes])[0]
pred = np.frombuffer(bytearray(result), dtype)
pred = pred.reshape(num_classes, input_shape[0], input_shape[1])
pred = torch.from_numpy(pred)
pred = F.softmax(pred.float().permute(1, 2, 0), dim=-1).numpy()
pred = pred[int((input_shape[0] - nh) // 2): int((input_shape[0] - nh) // 2 + nh), \
int((input_shape[1] - nw) // 2): int((input_shape[1] - nw) // 2 + nw)]
pred = cv2.resize(pred, (image_h, image_w), interpolation=cv2.INTER_LINEAR)
pred_img = pred.argmax(axis=-1)
return org_img, pred_img
def draw_image(org_img, pred, num_classes, blend=True):
colors = [ (0, 0, 0), (128, 0, 0), (0, 128, 0), (128, 128, 0), (0, 0, 128), (128, 0, 128), (0, 128, 128),
(128, 128, 128), (64, 0, 0), (192, 0, 0), (64, 128, 0), (192, 128, 0), (64, 0, 128), (192, 0, 128),
(64, 128, 128), (192, 128, 128), (0, 64, 0), (128, 64, 0), (0, 192, 0), (128, 192, 0), (0, 64, 128),
(128, 64, 12)]
seg_img = np.zeros((np.shape(pred)[0], np.shape(pred)[1], 3))
for c in range(num_classes):
seg_img[:, :, 0] += ((pred[:, :] == c) * (colors[c][0])).astype('uint8')
seg_img[:, :, 1] += ((pred[:, :] == c) * (colors[c][1])).astype('uint8')
seg_img[:, :, 2] += ((pred[:, :] == c) * (colors[c][2])).astype('uint8')
image = Image.fromarray(np.uint8(seg_img))
if blend:
image = Image.blend(org_img, image, 0.7)
return image
def load_label(label_name):
label_lookup_path = label_name
with open(label_lookup_path, 'r') as f:
label_contents = f.readlines()
labels = np.array(list(map(lambda x: x.strip(), label_contents)))
return labels
def increment_path(path, exist_ok=False, sep='', mkdir=False):
# Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc.
path = Path(path) # os-agnostic
if path.exists() and not exist_ok:
path, suffix = (path.with_suffix(''), path.suffix) if path.is_file() else (path, '')
dirs = glob.glob(f"{path}{sep}*") # similar paths
matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs]
i = [int(m.groups()[0]) for m in matches if m] # indices
n = max(i) + 1 if i else 2 # increment number
path = Path(f"{path}{sep}{n}{suffix}") # increment path
if mkdir:
path.mkdir(parents=True, exist_ok=True) # make directory
return path
def parse_opt():
parser = argparse.ArgumentParser()
parser.add_argument('--weights', type=str, default=ROOT / 'ascend/deeplab_mobilenetv2.om',
help='str: weights path.')
parser.add_argument('--labels', nargs='+', type=str, default=ROOT / 'ascend/deeplabv3plus.label')
parser.add_argument('--imgsz', nargs='+', type=int, default=(512, 512),
help='int tuple: the model inference size (w, h).')
parser.add_argument('--images-dir', type=str, default=ROOT / 'img')
parser.add_argument('--output-dir', type=str, default=ROOT / 'img_out')
parser.add_argument('--device', type=int, default=0, help='int: npu device id, i.e. 0 or 1.')
parser.add_argument('--save-img', action='store_true', default=True,
help='bool: whether to save image, default=True.')
parser.add_argument('--blend', action='store_true', default=True,
help='bool: whether to mix the original image and the predicted image.')
opt = parser.parse_args()
return opt
if __name__ == "__main__":
opt = parse_opt()
t0 = time.perf_counter()
print("ACL Init:")
ret = acl.init()
check_ret("acl.init", ret)
device_id = opt.device
# 1.Load model
print("Loading model %s." % opt.weights)
model_path = str(opt.weights)
net = Net(device_id, model_path)
input_size = opt.imgsz
output_dir = increment_path(Path(opt.output_dir) / 'exp', exist_ok=False) # increment path
output_dir.mkdir(parents=True, exist_ok=True) # make dir
# 2.Load label
label_path = opt.labels
labels = load_label(label_path)
num_classes = len(labels)
# 3.Start Detect
print()
print("Start Detect:")
images_dir = opt.images_dir
images = sorted(os.listdir(images_dir))
count = 0
total_count = len(images)
for image_name in images:
if image_name.lower().endswith(IMG_EXT):
t1 = time.perf_counter()
count += 1
image_path = os.path.join(images_dir, image_name)
image = Image.open(image_path)
# detect image
org_img, pred_img = detect_image(image, num_classes=num_classes, input_shape=input_size, fp16=False)
# count area for every labels
s = ""
for i in range(len(labels)):
count_area = int(np.sum(pred_img == i))
if count_area > 0:
s += f"{count_area} pixel{'s' * (count_area > 1)} {labels[i]}, " # add to string
# draw imgage
output_img = draw_image(org_img, pred_img, num_classes=num_classes, blend=opt.blend)
# save image
if opt.save_img:
output_path = os.path.join(output_dir, image_name)
output_img.save(output_path)
t2 = time.perf_counter()
t = t2 - t1
print('image {}/{} {}: {}Done. ({:.3f}s)'.format(count, total_count, image_path, s, t))
t3 = time.perf_counter()
t = t3 - t0
print('This detection cost {:.3f}s.'.format(t))
print("Results saved to {}.".format(output_dir))
print() | 34.738318 | 114 | 0.595776 |
282db7a977d5ee21b51eae4baa89a5c662e12b73 | 1,044 | py | Python | trim2.py | AmadeusChan/GeminiGraph | 893b05ee5c560ec51d41ab6a58a300baade8a9f5 | [
"Apache-2.0"
] | null | null | null | trim2.py | AmadeusChan/GeminiGraph | 893b05ee5c560ec51d41ab6a58a300baade8a9f5 | [
"Apache-2.0"
] | null | null | null | trim2.py | AmadeusChan/GeminiGraph | 893b05ee5c560ec51d41ab6a58a300baade8a9f5 | [
"Apache-2.0"
] | null | null | null | import os
with open("./simple_graph.txt", "r") as f:
N = int(f.readline())
edges = []
while True:
line = f.readline().strip()
if line == None or len(line) == 0:
break
line = line.split(" ")
edges.append([int(line[0]), int(line[1])])
con = []
for i in range(N):
con.append([])
for edge in edges:
con[edge[0]].append(edge[1])
"""
for i in range(N):
active.append(N)
"""
trimmed_num = 0
trimmed = []
for i in range(N):
trimmed.append(False)
it = 0
while True:
it += 1
degree = []
newly_trimmed = 0
for i in range(N):
degree.append(0)
for i in range(N):
if trimmed[i] == False:
for j in con[i]:
degree[j] += 1
for i in range(N):
if trimmed[i] == False and degree[i] < 2:
trimmed[i] = True
newly_trimmed += 1
if newly_trimmed == 0:
break
trimmed_num += newly_trimmed
print "iter =", it, " trimmed = ", trimmed_num
print N - trimmed_num
| 18.642857 | 50 | 0.51341 |
282de6dc694665ce1234b3c8d5c5765ed89dc979 | 3,516 | py | Python | Classes/Data.py | zerowsir/stock_study | ae2f3fab2b0cb3f4c980f0b229547867902415c4 | [
"MIT"
] | 5 | 2020-04-27T08:07:06.000Z | 2022-01-02T14:47:21.000Z | Classes/Data.py | zerowsir/stock_study | ae2f3fab2b0cb3f4c980f0b229547867902415c4 | [
"MIT"
] | null | null | null | Classes/Data.py | zerowsir/stock_study | ae2f3fab2b0cb3f4c980f0b229547867902415c4 | [
"MIT"
] | 3 | 2020-04-25T12:29:09.000Z | 2021-07-09T05:47:01.000Z | # coding=utf-8
"""
__title__ = ''
__file__ = ''
__author__ = 'tianmuchunxiao'
__mtime__ = '2019/7/4'
"""
import requests
import datetime
import pandas as pd
from io import StringIO
TODAY = datetime.date.strftime(datetime.date.today(), '%Y%m%d')
class Data(object):
URL = ''
PARAMS = {}
HEADERS = {}
file_path = ''
def get_data(self, code, end=TODAY):
self.PARAMS['code'] = code
self.PARAMS['end'] = end
print('正在获取{}数据……'.format(code))
response = requests.get(url=self.URL,
params=self.PARAMS,
headers=self.HEADERS)
print('正在处理{}数据...'.format(code))
data_df = pd.read_csv(StringIO(response.content.decode('gbk')), skip_blank_lines=True)
print(data_df)
data_df = data_df.sort_values(by='日期')
if data_df.empty:
print('空数据', code)
else:
data_df.to_csv(self.file_path + str(code[1:]) + '.csv', encoding='gbk',index=False)
print('{}数据处理完成!!'.format(code))
class Stock_data(Data):
URL = 'http://quotes.money.163.com/service/chddata.html'
PARAMS = {
'code': '',
'start': '19900101',
'end': '',
'fields': 'TCLOSE;HIGH;LOW;TOPEN;LCLOSE;CHG;PCHG;TURNOVER;VOTURNOVER;VATURNOVER;TCAP;MCAP'
}
HEADERS = {
'Cookie': 'Province=0; City=0; UM_distinctid=16c05496622f1-00e8d8cb7044e48-4c312272-15f900-16c054966245cc; _ntes_nnid=0213f9288c03916f18ed2634a6a3506d,1563456793050; vjuids=1be4f793f.16c054a41b6.0.6b5b7a77d19a78; vjlast=1563456848.1563930352.13; vinfo_n_f_l_n3=ad2a50d90e25c7dc.1.4.1563456848324.1563950911150.1563963465898; usertrack=ezq0ZV03rush6S+BCCg6Ag==; _ntes_nuid=0213f9288c03916f18ed2634a6a3506d; NNSSPID=bcf860b5427949c599552390d570c1d0; _ntes_stock_recent_plate_=%7Chy006000%3A%E6%89%B9%E5%8F%91%E9%9B%B6%E5%94%AE; _ntes_stock_recent_=0601857%7C0601326%7C0600682; _ntes_stock_recent_=0601857%7C0601326%7C0600682; _ntes_stock_recent_=0601857%7C0601326%7C0600682; ne_analysis_trace_id=1563963422398; s_n_f_l_n3=ad2a50d90e25c7dc1563963422401; _antanalysis_s_id=1563963428611; pgr_n_f_l_n3=ad2a50d90e25c7dc15639634493333113',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,'
'application/signed-exchange;v=b3',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh,en-US;q=0.9,en;q=0.8,zh-TW;q=0.7,zh-CN;q=0.6',
'Connection': 'keep-alive',
'Host': 'quotes.money.163.com',
'Referer': 'http://quotes.money.163.com / trade / lsjysj_601857.html',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/75.0.3770.100 Safari/537.36'
}
file_path = 'F:/Stock_Data/stock_data/'
class Index_data(Data):
URL = 'http://quotes.money.163.com/service/chddata.html'
HEADERS = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,'
'application/signed-exchange;v=b3',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh,en-US;q=0.9,en;q=0.8,zh-TW;q=0.7,zh-CN;q=0.6',
'Connection': 'keep-alive',
'Host': 'quotes.money.163.com',
'Referer': 'http://quotes.money.163.com/trade/lsjysj_zhishu_000003.html',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/75.0.3770.100 Safari/537.36'
}
PARAMS = {
'start': '19900101',
'fields': 'TCLOSE;HIGH;LOW;TOPEN;LCLOSE;CHG;PCHG;VOTURNOVER;VATURNOVER '
}
file_path = 'F:/Stock_Data/index_data/'
| 42.878049 | 834 | 0.700512 |
28306d7ad64f3293bff710f9c34f8a63246637c0 | 4,151 | py | Python | tledb/common/misc.py | rtubio/tledb | 2bfb13497d4ba7c155505fa7396abd7bf837b3a5 | [
"Apache-2.0"
] | null | null | null | tledb/common/misc.py | rtubio/tledb | 2bfb13497d4ba7c155505fa7396abd7bf837b3a5 | [
"Apache-2.0"
] | 5 | 2020-11-09T00:24:16.000Z | 2022-02-10T15:10:19.000Z | tledb/common/misc.py | rtubio/tledb | 2bfb13497d4ba7c155505fa7396abd7bf837b3a5 | [
"Apache-2.0"
] | 1 | 2020-11-08T10:35:23.000Z | 2020-11-08T10:35:23.000Z |
import datetime
import logging
import pytz
import socket
logger = logging.getLogger('common')
def get_fqdn(ip_address):
"""
Function that transforms a given IP address into the associated FQDN name
for that host.
:param ip_address: IP address of the remote host.
:return: FQDN name for that host.
"""
return socket.gethostbyaddr(ip_address)
# noinspection PyBroadException
def get_fqdn_ip():
"""
Function that returns the hostname as read from the socket library and
the IP address for that hostname.
:return: (String with the name of the current host, IP)
"""
hn = 'localhost'
try:
hn = socket.getfqdn()
except Exception:
pass
return hn, socket.gethostbyname(hn)
def get_now_utc(no_microseconds=True):
"""
This method returns now's datetime object UTC localized.
:param no_microseconds: sets whether microseconds should be cleared.
:return: the just created datetime object with today's date.
"""
if no_microseconds:
return pytz.utc.localize(datetime.datetime.utcnow()).replace(
microsecond=0
)
else:
return pytz.utc.localize(datetime.datetime.utcnow())
def get_utc_window(center=None, duration=None, no_microseconds=True):
"""X minutes window
Function that returns a time window (start, end tuple) centered at the
current instant and with a length of as many minutes as specified as a
parameter. By default, the lenght is 10 minutes and the center of the
window is the execution instant.
Args:
center: datetime.datetime object that defines the center of the window
duration: datetime.timedelta object with the duration of the window
no_microseconds: flag that indicates whether the microseconds should
be included in the window tuple or not
Returns: (start, end) tuple that defines the window
"""
if not center:
center = get_now_utc(no_microseconds=no_microseconds)
if not duration:
duration = datetime.timedelta(minutes=5)
return center - duration, center + duration
def get_now_hour_utc(no_microseconds=True):
"""
This method returns now's hour in the UTC timezone.
:param no_microseconds: sets whether microseconds should be cleared.
:return: The time object within the UTC timezone.
"""
if no_microseconds:
return datetime.datetime.utcnow().replace(microsecond=0).time()
else:
return datetime.datetime.utcnow().time()
def get_today_utc():
"""
This method returns today's date localized with the microseconds set to
zero.
:return: the just created datetime object with today's date.
"""
return pytz.utc.localize(datetime.datetime.utcnow()).replace(
hour=0, minute=0, second=0, microsecond=0
)
def get_next_midnight():
"""
This method returns today's datetime 00am.
:return: the just created datetime object with today's datetime 00am.
TODO :: unit test
"""
return pytz.utc.localize(datetime.datetime.today()).replace(
hour=0, minute=0, second=0, microsecond=0
) + datetime.timedelta(days=1)
def localize_date_utc(date):
"""
Localizes in the UTC timezone the given date object.
:param date: The date object to be localized.
:return: A localized datetime object in the UTC timezone.
TODO :: unit test
"""
return pytz.utc.localize(
datetime.datetime.combine(
date, datetime.time(hour=0, minute=0, second=0)
)
)
TIMESTAMP_0 = localize_date_utc(datetime.datetime(year=1970, month=1, day=1))
def get_utc_timestamp(utc_datetime=None):
"""
Returns a timestamp with the number of microseconds ellapsed since January
1st of 1970 for the given datetime object, UTC localized.
:param utc_datetime: The datetime whose timestamp is to be calculated.
:return: The number of miliseconds since 1.1.1970, UTC localized (integer)
"""
if utc_datetime is None:
utc_datetime = get_now_utc()
diff = utc_datetime - TIMESTAMP_0
return int(diff.total_seconds() * 10**6)
| 30.748148 | 78 | 0.688991 |
2832742f4b503c46d1f0a267a28c5b2b06f21e83 | 2,455 | py | Python | CoC/CoC_Default_CmdSets.py | macorvalan/MyGame | 29a14bcb1ffb11b158d325112d5698107d8f1188 | [
"Unlicense"
] | null | null | null | CoC/CoC_Default_CmdSets.py | macorvalan/MyGame | 29a14bcb1ffb11b158d325112d5698107d8f1188 | [
"Unlicense"
] | null | null | null | CoC/CoC_Default_CmdSets.py | macorvalan/MyGame | 29a14bcb1ffb11b158d325112d5698107d8f1188 | [
"Unlicense"
] | null | null | null | """
"""
from evennia import default_cmds
from evennia import CmdSet
class CoCCharacterCmdSet(default_cmds.CharacterCmdSet):
"""
The `CharacterCmdSet` contains general in-game commands like `look`,
`get`, etc available on in-game Character objects. It is merged with
the `AccountCmdSet` when an Account puppets a Character.
"""
key = "DefaultCharacter"
def at_cmdset_creation(self):
"""
Populates the cmdset
"""
super().at_cmdset_creation()
#
# any commands you add below will overload the default ones.
#
class CoCAccountCmdSet(default_cmds.AccountCmdSet):
"""
This is the cmdset available to the Account at all times. It is
combined with the `CharacterCmdSet` when the Account puppets a
Character. It holds game-account-specific commands, channel
commands, etc.
"""
key = "DefaultAccount"
def at_cmdset_creation(self):
"""
Populates the cmdset
"""
super().at_cmdset_creation()
#
# any commands you add below will overload the default ones.
#
class CoCUnloggedinCmdSet(default_cmds.UnloggedinCmdSet):
"""
Command set available to the Session before being logged in. This
holds commands like creating a new account, logging in, etc.
"""
key = "DefaultUnloggedin"
def at_cmdset_creation(self):
"""
Populates the cmdset
"""
super().at_cmdset_creation()
#
# any commands you add below will overload the default ones.
#
class CoCSessionCmdSet(default_cmds.SessionCmdSet):
"""
This cmdset is made available on Session level once logged in. It
is empty by default.
"""
key = "DefaultSession"
def at_cmdset_creation(self):
"""
This is the only method defined in a cmdset, called during
its creation. It should populate the set with command instances.
As and example we just add the empty base `Command` object.
It prints some info.
"""
super().at_cmdset_creation()
#
# any commands you add below will overload the default ones.
#
class CoCCharGenCmdSet(CmdSet):
"""
"""
key = "CharGen"
def at_cmdset_creation(self):
"""
"""
super().at_cmdset_creation()
#
# any commands you add below will overload the default ones.
# | 24.068627 | 72 | 0.627291 |
2832f2cc2b9e737de1d148d89d82f9fe93379119 | 6,300 | py | Python | dit/inference/counts.py | leoalfonso/dit | e7d5f680b3f170091bb1e488303f4255eeb11ef4 | [
"BSD-3-Clause"
] | 1 | 2021-03-15T08:51:42.000Z | 2021-03-15T08:51:42.000Z | dit/inference/counts.py | leoalfonso/dit | e7d5f680b3f170091bb1e488303f4255eeb11ef4 | [
"BSD-3-Clause"
] | null | null | null | dit/inference/counts.py | leoalfonso/dit | e7d5f680b3f170091bb1e488303f4255eeb11ef4 | [
"BSD-3-Clause"
] | null | null | null | """
Non-cython methods for getting counts and distributions from data.
"""
import numpy as np
try: # cython
from .pycounts import counts_from_data, distribution_from_data
except ImportError: # no cython
from boltons.iterutils import windowed_iter
from collections import Counter, defaultdict
from itertools import product
from .. import modify_outcomes
from ..exceptions import ditException
def counts_from_data(data, hLength, fLength, marginals=True, alphabet=None, standardize=True):
"""
Returns conditional counts from `data`.
To obtain counts for joint distribution only, use fLength=0.
Parameters
----------
data : NumPy array
The data used to calculate morphs. Note: `data` cannot be a generator.
Also, if standardize is True, then data can be any indexable iterable,
such as a list or tuple.
hLength : int
The maxmimum history word length used to calculate morphs.
fLength : int
The length of future words that defines the morph.
marginals : bool
If True, then the morphs for all histories words from L=0 to L=hLength
are calculated. If False, only histories of length L=hLength are
calculated.
alphabet : list
The alphabet to use when creating the morphs. If `None`, then one is
obtained from `data`. If not `None`, then the provided alphabet
supplements what appears in the data. So the data is always scanned
through in order to get the proper alphabet.
standardize : bool
The algorithm requires that the symbols in data be standardized to
a canonical alphabet consisting of integers from 0 to k-1, where k
is the alphabet size. If `data` is already standard, then an extra
pass through the data can be avoided by setting `standardize` to
`False`, but note: if `standardize` is False, then data MUST be a
NumPy array.
Returns
-------
histories : list
A list of observed histories, corresponding to the rows in `cCounts`.
cCounts : NumPy array
A NumPy array representing conditional counts. The rows correspond to
the observed histories, so this is sparse. The number of rows in this
array cannot be known in advance, but the number of columns will be
equal to the alphabet size raised to the `fLength` power.
hCounts : NumPy array
A 1D array representing the count of each history word.
alphabet : tuple
The ordered tuple representing the alphabet of the data. If `None`,
the one is created from the data.
Notes
-----
This requires three complete passes through the data. One to obtain
the full alphabet. Another to standardize the data. A final pass to
obtain the counts.
This is implemented densely. So during the course of the algorithm,
we work with a large array containing a row for each possible history.
Only the rows corresponding to observed histories are returned.
"""
try:
data = list(map(tuple, data))
except TypeError:
pass
counts = Counter(windowed_iter(data, hLength+fLength))
cond_counts = defaultdict(lambda: defaultdict(int))
for word, count in counts.items():
cond_counts[word[:hLength]][word[hLength:]] += count
histories = sorted(counts.keys())
alphabet = set(alphabet) if alphabet is not None else set()
alphabet = tuple(sorted(alphabet.union(*[set(hist) for hist in histories])))
cCounts = np.empty((len(histories), len(alphabet)**fLength))
for i, hist in enumerate(histories):
for j, future in enumerate(product(alphabet, repeat=fLength)):
cCounts[i, j] = cond_counts[hist][future]
hCounts = cCounts.sum(axis=1)
return histories, cCounts, hCounts, alphabet
def distribution_from_data(d, L, trim=True, base=None):
"""
Returns a distribution over words of length `L` from `d`.
The returned distribution is the naive estimate of the distribution,
which assigns probabilities equal to the number of times a particular
word appeared in the data divided by the total number of times a word
could have appeared in the data.
Roughly, it corresponds to the stationary distribution of a maximum
likelihood estimate of the transition matrix of an (L-1)th order Markov
chain.
Parameters
----------
d : list
A list of symbols to be converted into a distribution.
L : integer
The length of the words for the distribution.
trim : bool
If true, then words with zero probability are trimmed from the
distribution.
base : int or string
The desired base of the returned distribution. If `None`, then the
value of `dit.ditParams['base']` is used.
"""
from dit import ditParams, Distribution
try:
d = list(map(tuple, d))
except TypeError:
pass
if base is None:
base = ditParams['base']
words, _, counts, _ = counts_from_data(d, L, 0)
# We turn the counts to probabilities
pmf = counts/counts.sum()
dist = Distribution(words, pmf, trim=trim)
dist.set_base(base)
if L == 1:
try:
dist = modify_outcomes(dist, lambda o: o[0])
except ditException:
pass
return dist
def get_counts(data, length):
"""
Count the occurrences of all words of `length` in `data`.
Parameters
----------
data : iterable
The sequence of samples
length : int
The length to group samples into.
Returns
-------
counts : np.array
Array with the count values.
"""
hists, _, counts, _ = counts_from_data(data, length, 0)
mask = np.array([len(h) == length for h in hists])
counts = counts[mask]
return counts
| 35 | 98 | 0.619524 |
28336284c8b0c58eec05e4f7f5c39c75af17be88 | 1,845 | py | Python | tests/test_get_meetings.py | GeorgianBadita/Dronem-gym-envirnoment | f3b488f6a4b55722c4b129051555a68d7775278c | [
"MIT"
] | 5 | 2020-06-13T10:43:42.000Z | 2022-01-25T10:37:32.000Z | tests/test_get_meetings.py | GeorgianBadita/Dronem-gym-envirnoment | f3b488f6a4b55722c4b129051555a68d7775278c | [
"MIT"
] | null | null | null | tests/test_get_meetings.py | GeorgianBadita/Dronem-gym-envirnoment | f3b488f6a4b55722c4b129051555a68d7775278c | [
"MIT"
] | null | null | null | """
@author: Badita Marin-Georgian
@email: geo.badita@gmail.com
@date: 21.03.2020 00:58
"""
from env_interpretation import Meeting
from env_interpretation.meeting import get_valid_meetings
def test_get_meetings_4_robots(env4_robots):
env_data = env4_robots.get_env_metadata()
cycles_length = [len(x) for x in env_data['cycles']]
meetings_at_0 = get_valid_meetings(0, env_data['meetings'], cycles_length)
assert meetings_at_0 == []
meetings_at_2 = get_valid_meetings(2, env_data['meetings'], cycles_length)
assert meetings_at_2 == [
Meeting(r1=2, r2=3, first_time=2),
Meeting(r1=0, r2=1, first_time=2)
]
assert get_valid_meetings(5, env_data['meetings'], cycles_length) == []
meetings_at_8 = get_valid_meetings(8, env_data['meetings'], cycles_length)
assert set(meetings_at_8) == {Meeting(r1=1, r2=2, first_time=4),
Meeting(r1=0, r2=1, first_time=2),
Meeting(r1=0, r2=2, first_time=4)
}
assert get_valid_meetings(9, env_data['meetings'], cycles_length) == []
assert set(get_valid_meetings(14, env_data['meetings'], cycles_length)) == {
Meeting(r1=2, r2=3, first_time=2),
Meeting(r1=0, r2=1, first_time=2)
}
def test_get_meetings_3_robots(env3_robots):
env_data = env3_robots.get_env_metadata()
cycles_length = [len(x) for x in env_data['cycles']]
assert get_valid_meetings(0, env_data['meetings'], cycles_length) == []
assert get_valid_meetings(1, env_data['meetings'], cycles_length) == [
Meeting(r1=0, r2=1, first_time=1)
]
assert get_valid_meetings(23, env_data['meetings'], cycles_length) == [
Meeting(r1=1, r2=2, first_time=5)] == get_valid_meetings(5, env_data['meetings'], cycles_length)
| 38.4375 | 104 | 0.656369 |
2834130633642f13cf991cd575cca24813206c77 | 1,670 | py | Python | python/test/environment/test_reset.py | stacyvjong/PandemicSimulator | eca906f5dc8135d7c90a1582b96621235f745c17 | [
"Apache-2.0"
] | null | null | null | python/test/environment/test_reset.py | stacyvjong/PandemicSimulator | eca906f5dc8135d7c90a1582b96621235f745c17 | [
"Apache-2.0"
] | null | null | null | python/test/environment/test_reset.py | stacyvjong/PandemicSimulator | eca906f5dc8135d7c90a1582b96621235f745c17 | [
"Apache-2.0"
] | null | null | null | # Confidential, Copyright 2020, Sony Corporation of America, All rights reserved.
import copy
import numpy as np
from pandemic_simulator.environment import CityRegistry, Home, GroceryStore, Office, School, Hospital, PopulationParams, \
LocationParams
from pandemic_simulator.script_helpers import make_standard_locations, make_us_age_population
tiny_population_params = PopulationParams(
num_persons=10,
location_type_to_params={
Home: LocationParams(num=3),
GroceryStore: LocationParams(num=1, worker_capacity=5, visitor_capacity=30),
Office: LocationParams(num=1, worker_capacity=200, visitor_capacity=0),
School: LocationParams(num=1, worker_capacity=40, visitor_capacity=300),
Hospital: LocationParams(num=1, worker_capacity=30, visitor_capacity=2),
})
def test_location_and_person_reset():
population_params = tiny_population_params
numpy_rng = np.random.RandomState(0)
cr = CityRegistry()
locations = make_standard_locations(population_params, registry=cr, numpy_rng=numpy_rng)
persons = make_us_age_population(population_params, registry=cr, numpy_rng=numpy_rng)
loc_states = [copy.deepcopy(loc.state) for loc in locations]
per_states = [copy.deepcopy(per.state) for per in persons]
for loc in locations:
loc.reset()
for per in persons:
per.reset()
new_loc_states = [copy.deepcopy(loc.state) for loc in locations]
new_per_states = [copy.deepcopy(per.state) for per in persons]
for st1, st2 in zip(loc_states, new_loc_states):
assert st1 == st2
for st1, st2 in zip(per_states, new_per_states):
assert st1 == st2
| 36.304348 | 122 | 0.742515 |
28343120a82f0ad353610fd53956f8cb3bf271dc | 1,008 | py | Python | Groups/Group_ID_6/SIFT_and_RESIFT/Code_files/sift.py | sonaldangi12/DataScience | 3d7cd529a96f37c2ef179ee408e2c6d8744d746a | [
"MIT"
] | 5 | 2020-12-13T07:53:22.000Z | 2020-12-20T18:49:27.000Z | Groups/Group_ID_6/SIFT_and_RESIFT/Code_files/sift.py | Gulnaz-Tabassum/DataScience | 1fd771f873a9bc0800458fd7c05e228bb6c4e8a0 | [
"MIT"
] | null | null | null | Groups/Group_ID_6/SIFT_and_RESIFT/Code_files/sift.py | Gulnaz-Tabassum/DataScience | 1fd771f873a9bc0800458fd7c05e228bb6c4e8a0 | [
"MIT"
] | 24 | 2020-12-12T11:23:28.000Z | 2021-10-04T13:09:38.000Z | from libs import *
def SIFT_algo(training_image,training_gray,test_image,test_gray):
#test_gray = cv2.cvtColor(test_image, cv2.COLOR_RGB2GRAY)
#training_gray = cv2.cvtColor(training_image, cv2.COLOR_RGB2GRAY)
# Creating SIFT Object
sift = cv2.SIFT_create()
# Detecting features
train_keypoints, train_descriptor = sift.detectAndCompute(training_gray, None)
test_keypoints, test_descriptor = sift.detectAndCompute(test_gray, None)
keypoints_without_size = np.copy(training_image)
keypoints_with_size = np.copy(training_image)
# Drawing keypoints and extent of their importance as descriptor
cv2.drawKeypoints(training_image, train_keypoints, keypoints_without_size, color = (0, 255, 0))
cv2.drawKeypoints(training_image, train_keypoints, keypoints_with_size, flags = cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
return keypoints_with_size,keypoints_without_size,train_descriptor,train_keypoints,test_descriptor,test_keypoints | 56 | 128 | 0.779762 |
283446254a407be87bc62c4c4206eacf19fcc853 | 651 | py | Python | Q14__/23_Maximum_Points_You_Can_Obtain_from_Cards/Solution.py | hsclinical/leetcode | 48a57f6a5d5745199c5685cd2c8f5c4fa293e54a | [
"Apache-2.0"
] | null | null | null | Q14__/23_Maximum_Points_You_Can_Obtain_from_Cards/Solution.py | hsclinical/leetcode | 48a57f6a5d5745199c5685cd2c8f5c4fa293e54a | [
"Apache-2.0"
] | null | null | null | Q14__/23_Maximum_Points_You_Can_Obtain_from_Cards/Solution.py | hsclinical/leetcode | 48a57f6a5d5745199c5685cd2c8f5c4fa293e54a | [
"Apache-2.0"
] | null | null | null | from typing import List
class Solution:
def maxScore(self, cardPoints: List[int], k: int) -> int:
if k <= 0:
return 0
else:
numOfCards = len(cardPoints)
totalPart1 = sum(cardPoints) * (k//numOfCards)
k = k%numOfCards
totalPart2 = sum(cardPoints[:k])
currValue = totalPart2
for i in range(k):
currValue = currValue - cardPoints[k-i-1] + cardPoints[-(i+1)]
if totalPart2 < currValue:
totalPart2 = currValue
total = totalPart1 + totalPart2
return total | 32.55 | 79 | 0.506912 |
28349545dacdb38c6ebe53a67d01ff333f29fa0c | 1,192 | py | Python | utils/editResult.py | JasonHippo/Scene_text_detection_and_recognition | c0da141d71b7b888d560296b201aecbbd735b565 | [
"MIT"
] | 4 | 2021-12-27T14:37:33.000Z | 2022-03-30T10:56:57.000Z | utils/editResult.py | JasonHippo/Scene_text_detection_and_recognition | c0da141d71b7b888d560296b201aecbbd735b565 | [
"MIT"
] | null | null | null | utils/editResult.py | JasonHippo/Scene_text_detection_and_recognition | c0da141d71b7b888d560296b201aecbbd735b565 | [
"MIT"
] | null | null | null | import pandas as pd
import argparse
badword = ["!", "$","%","&","'","(",")","*","+",",","-",".","/",":",";","<","=",">","?","@","[","/","]","^","_","`","{","|","}","~","の","®"]
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--path', help='the path of file')
opt = parser.parse_args()
print(len(badword))
df = pd.read_csv(opt.path)
print(len(df))
nan_list = list()
for i in range(len(df)):
row = df.loc[i]
print(i)
print(row)
try:
if any(bad_word in row['pred'] for bad_word in badword):
ss = row['pred']
for j in range(len(badword)):
ss = ss.replace(badword[j],"")
if ss =="":
df.loc[i,'pred']= "の"
print(df.loc[i])
continue
df.loc[i,'pred']= ss
except:
nan_list.append(i)
df = df.drop(nan_list)
df = df.drop(df.loc[df['pred']=='の'].index)
print(df.head)
df.to_csv("{}_post.csv".format(opt.path.split('.csv')[0]),index=False,encoding="utf-8") | 32.216216 | 141 | 0.428691 |
28360d264e9f210900fd3d3f89893b647f81343f | 133 | py | Python | Service/Ipv4_stun/startup.py | zlf7735268/Tenet | 54005ad5d17b5d1f5ef4cc04aa6eb7939e58c2c5 | [
"Apache-2.0"
] | 2 | 2021-12-17T01:21:19.000Z | 2021-12-17T14:49:42.000Z | Service/Ipv4_stun/startup.py | zlf7735268/Tenet | 54005ad5d17b5d1f5ef4cc04aa6eb7939e58c2c5 | [
"Apache-2.0"
] | null | null | null | Service/Ipv4_stun/startup.py | zlf7735268/Tenet | 54005ad5d17b5d1f5ef4cc04aa6eb7939e58c2c5 | [
"Apache-2.0"
] | null | null | null | from Ipv4_stun.transfer import Transfer
#m=Transfer(address=('172.16.0.156',9080))
m=Transfer(address=('127.0.0.1', 82))
m.run() | 26.6 | 43 | 0.691729 |
2837b7ad00fad751653116c498c45a40929e5a19 | 4,030 | py | Python | generator/season.py | fraziermatthew/njba | acedec351543b8ecf339beb2d27c635f3377e929 | [
"MIT"
] | null | null | null | generator/season.py | fraziermatthew/njba | acedec351543b8ecf339beb2d27c635f3377e929 | [
"MIT"
] | null | null | null | generator/season.py | fraziermatthew/njba | acedec351543b8ecf339beb2d27c635f3377e929 | [
"MIT"
] | null | null | null | """season.py: Generates random NJBA season data."""
__author__ = "Matthew Frazier"
__copyright__ = "Copyright 2019, University of Delaware, CISC 637 Database Systems"
__email__ = "matthew@udel.edu"
from datetime import timedelta
import calendar
import csv
'''
Steps to run this project:
1. Create a virtual env and activate source
virtualenv -p python3 .
./bin/activate
2. Install names PyPi Module - https://pypi.org/project/names/
pip install names
3. Run the project
python3 generate-seasons.py
'''
numOfSeasons = 50
seasonType = ["Pre", "Regular", "Post"]
id = 1
cal = calendar.Calendar(firstweekday = calendar.SUNDAY)
year = 2019 # Start Year
# month = 10 # October
# month2 = 4 # April
# month3 = 6 # June
with open('data/seasons2.csv', mode = 'w') as season_file:
season_writer = csv.writer(season_file, delimiter = ',', quotechar = '"', quoting = csv.QUOTE_MINIMAL)
for j in range(numOfSeasons):
for index in range(len(seasonType)):
# id, start-date, end-date, start-year, end-year, seasonType
# Create the season list
season = []
# monthcal = cal.monthdatescalendar(year,month)
if (seasonType[index] == "Pre"):
monthcal = cal.monthdatescalendar(year, 9)
elif (seasonType[index] == "Regular"):
monthcal = cal.monthdatescalendar(year, 10)
else:
monthcal = cal.monthdatescalendar(year + 1, 4)
# ID
season.append(id)
if (seasonType[index] == "Pre"):
# Pre Season
# Start date is 4th Saturday of every September
start_date = [day for week in monthcal for day in week if \
day.weekday() == calendar.SATURDAY][3]
# Start date
season.append(start_date)
# End date is 3rd Monday of every October
monthcal = cal.monthdatescalendar(year, 10)
end_date = [day for week in monthcal for day in week if \
day.weekday() == calendar.TUESDAY][2]
end_date = end_date - timedelta(days = 1)
# End date
season.append(end_date)
if (seasonType[index] == "Regular"):
# Regular Season
# Start date is 3rd Tuesday of every October
start_date = [day for week in monthcal for day in week if \
day.weekday() == calendar.TUESDAY][2]
# Start date
season.append(start_date)
# End date is 2nd Wednesday of every April
monthcal2 = cal.monthdatescalendar(year + 1, 4)
end_date = [day for week in monthcal2 for day in week if \
day.weekday() == calendar.WEDNESDAY][1]
# End date
season.append(end_date)
if (seasonType[index] == "Post"):
# Post Season
# Start date is 2nd Thursday of every April
start_date = [day for week in monthcal2 for day in week if \
day.weekday() == calendar.WEDNESDAY][1]
start_date = start_date + timedelta(days = 1)
# Start date
season.append(start_date)
# End date is 3rd Tursday of every June
monthcal = cal.monthdatescalendar(year + 1, 6)
end_date = [day for week in monthcal for day in week if \
day.weekday() == calendar.THURSDAY][2]
# End date
season.append(end_date)
# # Year Abbreviation
# abbr = str(year + 1)
# season.append(str(year) + "-" + str(year + 1))
# seasonType
season.append(seasonType[index])
id += 1
season_writer.writerow(season)
year += 1
| 33.032787 | 106 | 0.536476 |
2837e96df35c3511f85fcf1e1d1d5915d5541eac | 2,746 | py | Python | src/pace/allele_similarity.py | tmadden/pace | 0be5d92579efc4e6219f5c58bb4e4ac6754e865e | [
"MIT"
] | null | null | null | src/pace/allele_similarity.py | tmadden/pace | 0be5d92579efc4e6219f5c58bb4e4ac6754e865e | [
"MIT"
] | 9 | 2019-01-16T15:13:37.000Z | 2019-07-29T18:31:58.000Z | src/pace/allele_similarity.py | tmadden/pace | 0be5d92579efc4e6219f5c58bb4e4ac6754e865e | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import pace
from pace.definitions import amino_acids, builtin_aa_encodings, builtin_allele_similarities
from pace.sklearn import create_one_hot_encoder
from pkg_resources import resource_stream
def load_allele_similarity(allele_similarity_name):
allele_similarity = pd.read_csv(
resource_stream("pace", "data/allele_similarity_mat_{}.txt".format(allele_similarity_name)),
sep=' ', index_col=0)
return allele_similarity
def get_allele_similarity_mat(allele_similarity_name):
"""
Get a matrix of pre-computed allele similarities
Parameters
----------
allele_similarity_name : str
Pre-computed allele similarity matrices are availble based on
observed peptide binding motifs ('motifs') or HLA protein binding
pocket residues ('pockets').
Returns
-------
pandas.core.frame.DataFrame
allele similarity matrix
"""
return load_allele_similarity(allele_similarity_name)
def get_similar_alleles(allele_similarity_name, allele, similarity_threshold):
"""
Get the most similar alleles to a given allele, based on a specified
allele similarity matrix and similarity threshold.
Parameters
----------
allele_similarity_name : str
Pre-computed allele similarity matrices are availble based on
observed peptide binding motifs ('motifs') or HLA protein binding
pocket residues ('pockets').
allele : str
The allele for which to determine similar alleles
similarity_threshold
Numerical threhosld value that determins the cutoff for considering
an allele similar to the given allele.
Returns
-------
pandas.core.frame.DataFrame
The similar alleles satisfying the specifid threshold along
with the numerical similarity values. Note that the given allele
is also returned.
"""
assert(allele_similarity_name in builtin_allele_similarities)
allele_similarity = get_allele_similarity_mat(allele_similarity_name)
similar_alleles = allele_similarity[allele]
if allele_similarity_name == 'motifs': # higher values => more similar alleles
similar_alleles_thr = similar_alleles[similar_alleles > similarity_threshold]
similar_alleles_thr = similar_alleles_thr[(similar_alleles_thr*-1).argsort()]
if allele_similarity_name == 'pockets': # higher values => less similar alleles
similar_alleles_thr = similar_alleles[similar_alleles < similarity_threshold]
similar_alleles_thr = similar_alleles_thr[similar_alleles_thr.argsort()]
return similar_alleles_thr.to_frame()
| 36.131579 | 101 | 0.717407 |
2838bf018ab619624c31ab34ebae1fd0c469063d | 13,884 | py | Python | client/forms/reader_form.py | zhmzlzn/Network-Pj-BookReader | 891d395f2db464f4d4e7b84dd03c3cddebbafd30 | [
"MIT"
] | 6 | 2019-11-28T10:47:46.000Z | 2021-11-04T08:22:56.000Z | client/forms/reader_form.py | zhmzlzn/python-BookReader | 891d395f2db464f4d4e7b84dd03c3cddebbafd30 | [
"MIT"
] | null | null | null | client/forms/reader_form.py | zhmzlzn/python-BookReader | 891d395f2db464f4d4e7b84dd03c3cddebbafd30 | [
"MIT"
] | 4 | 2019-12-17T15:29:22.000Z | 2021-05-28T16:39:51.000Z | import tkinter as tk
from tkinter import *
from tkinter import messagebox
from tkinter.simpledialog import askinteger
from protocol.secure_transmission.secure_channel import establish_secure_channel_to_server
from protocol.message_type import MessageType
from protocol.data_conversion.from_byte import deserialize_message
from client.memory import current_user
import client.memory
class ReaderForm(tk.Frame):
def __init__(self, bkname, master=None):
super().__init__(master)
self.master = master
self.bkname = bkname
self.user = client.memory.current_user
self.sc = client.memory.sc
self.page_num = 0 # 当前所在页数
self.total_page = 0 # 当前书的总页数
self.chapter = [] # 当前书的章节列表
self.chap_num = 0 # 当前所在的章数(由章节列表计算得出)
self.total_chapter = 0 # 当前书的总章节数(由章节列表计算得出)
self.createForm()
master.protocol("WM_DELETE_WINDOW", self.update_bookmark)
def createForm(self):
self.master.title("Jack的阅读器")
# 章节
self.chapbtn = Button(self, command=self.jump_chapter)
self.chapbtn.pack(side=TOP, fill=X, expand=YES)
self.text = Text(self, height=35)
self.text.pack(side=TOP, fill=BOTH)
self.start_read()
self.buttonframe = Frame(self)
self.buttonframe.pack(side=BOTTOM, fill=BOTH, expand=YES)
self.prechap = Button(self.buttonframe, text="上一章", command=self.previous_chapter)
self.prechap.pack(side=LEFT, fill=X, expand=YES)
self.prepg = Button(self.buttonframe, text="上一页", command=self.previous_page)
self.prepg.pack(side=LEFT, fill=X, expand=YES)
self.pagebtn = Button(self.buttonframe, text=str(self.page_num+1) + '/' + str(self.total_page+1), command=self.jump_page)
self.pagebtn.pack(side=LEFT, fill=X, expand=YES)
self.nxtpg = Button(self.buttonframe, text="下一页", command=self.next_page)
self.nxtpg.pack(side=LEFT, fill=X, expand=YES)
self.nxtchap = Button(self.buttonframe, text="下一章", command=self.next_chapter)
self.nxtchap.pack(side=LEFT, fill=X, expand=YES)
self.pack()
def get_chapter(self):
"""通过章节列表获得当前页所处的章序号"""
for i in range(self.total_chapter):
if self.page_num >= self.chapter[i][1]:
if i == self.total_chapter - 1 or self.page_num < self.chapter[i+1][1]:
return i
def start_read(self):
"""请求服务器发送书签页"""
self.sc.send_message(MessageType.start_read, self.user + '*' + self.bkname)
# 接收书签所处页数
message = self.sc.recv_message()
if message['type'] == MessageType.page_num:
self.page_num = message['parameters']
print('《{}》书签位于第{}页'.format(self.bkname, message['parameters']))
elif message['type'] == MessageType.no_book:
messagebox.showerror('请求失败', '查无此书,请返回刷新书籍列表!')
return
else:
print('未能成功接收到书签页数!错误:{}'.format(message['type']))
messagebox.showerror('请求失败', '未能成功接收到书签页数!错误:{}'.format(message['type']))
return
# 接收总页数
message = self.sc.recv_message()
if message['type'] == MessageType.total_page:
self.total_page = message['parameters']
print('《{}》共{}页'.format(self.bkname, message['parameters']))
elif message['type'] == MessageType.no_book:
messagebox.showerror('请求失败', '查无此书,请返回刷新书籍列表!')
return
else:
print('未能成功接收到总页数!错误:{}'.format(message['type']))
return
# 接收章节列表
message = self.sc.recv_message()
if message['type'] == MessageType.send_chapter:
self.chapter = message['parameters']
self.total_chapter = len(self.chapter)
self.chap_num = self.get_chapter()
self.chapbtn['text'] = self.chapter[self.chap_num][0] # 更新要显示的章节名
print('《{}》共{}章'.format(self.bkname, self.total_chapter))
elif message['type'] == MessageType.no_book:
messagebox.showerror('请求失败', '查无此书,请返回刷新书籍列表!')
return
else:
print('未能成功接收到章节列表!错误:{}'.format(message['type']))
return
#接收书签页
message = self.sc.recv_message()
if not message:
messagebox.showerror('连接失败', 'QAQ 网络出现了问题,请稍后再试~')
elif message['type'] == MessageType.no_book:
messagebox.showerror('请求失败', '查无此书,请返回刷新书籍列表!')
return
elif message['type'] == MessageType.send_page:
print('成功接收书签页')
if message['parameters'][0] == '#':
message['parameters'] = message['parameters'][1:]
self.text.insert(1.0, message['parameters'])
else:
messagebox.showerror('请求失败','请求失败,服务器未返回书签页!')
return
def jump_page(self):
"""跳转到某一页"""
self.page_num = askinteger('页面跳转', '要跳转的页数', initialvalue=self.page_num+1, maxvalue=self.total_page + 1, minvalue=1) - 1
self.sc.send_message(MessageType.require_page, self.bkname + '*' + str(self.page_num))
message = self.sc.recv_message()
if not message:
messagebox.showerror('连接失败', 'QAQ 网络出现了问题,请稍后再试~')
elif message['type'] == MessageType.no_book:
messagebox.showerror('请求失败', '查无此书,请返回刷新书籍列表!')
elif message['type'] == MessageType.send_page:
print('成功接收第{}页'.format(self.page_num))
self.chap_num = self.get_chapter()
self.chapbtn['text'] = self.chapter[self.chap_num][0] # 更新要显示的章节名
self.pagebtn['text'] = str(self.page_num+1) + '/' + str(self.total_page+1) # 更新页码
self.text.delete('1.0', 'end') # 清空text文本框
if message['parameters'][0] == '#':
message['parameters'] = message['parameters'][1:]
self.text.insert(1.0, message['parameters'])
else:
messagebox.showerror('请求失败','请求失败,服务器未返回该页!')
return
def previous_page(self):
"""上一页"""
if self.page_num == 0:
messagebox.showwarning('警告!','已经是第一页!')
return
self.page_num = self.page_num - 1
# 这里我们需要同时发送书名和当前页数,而send_message只能发送一种类型的数据,所以全部转化为str,用“*”分隔
self.sc.send_message(MessageType.require_page, self.bkname + '*' + str(self.page_num))
message = self.sc.recv_message()
if not message:
messagebox.showerror('连接失败', 'QAQ 网络出现了问题,请稍后再试~')
elif message['type'] == MessageType.no_book:
messagebox.showerror('请求失败', '查无此书,请返回刷新书籍列表!')
elif message['type'] == MessageType.send_page:
print('成功接收第{}页'.format(self.page_num))
self.chap_num = self.get_chapter()
self.chapbtn['text'] = self.chapter[self.chap_num][0] # 更新要显示的章节名
self.pagebtn['text'] = str(self.page_num+1) + '/' + str(self.total_page+1) # 更新页码
self.text.delete('1.0', 'end') # 清空text文本框
if message['parameters'][0] == '#':
message['parameters'] = message['parameters'][1:]
self.text.insert(1.0, message['parameters'])
else:
messagebox.showerror('请求失败','请求失败,服务器未返回上一页!')
return
def next_page(self):
"""下一页"""
if self.page_num == self.total_page: # 页数从0开始
messagebox.showwarning('警告!','已经是最后一页!')
return
self.page_num = self.page_num + 1
# 这里我们需要同时发送书名和当前页数,而send_message只能发送一种类型的数据,所以全部转化为str,用“*”分隔
self.sc.send_message(MessageType.require_page, self.bkname + '*' + str(self.page_num))
message = self.sc.recv_message()
if not message:
messagebox.showerror('连接失败', 'QAQ 网络出现了问题,请稍后再试~')
elif message['type'] == MessageType.no_book:
messagebox.showerror('请求失败', '查无此书,请返回刷新书籍列表!')
elif message['type'] == MessageType.send_page:
print('成功接收第{}页'.format(self.page_num))
self.chap_num = self.get_chapter()
self.chapbtn['text'] = self.chapter[self.chap_num][0] # 更新要显示的章节名
self.pagebtn['text'] = str(self.page_num+1) + '/' + str(self.total_page+1) # 更新页码
self.text.delete('1.0', 'end') # 清空text文本框
if message['parameters'][0] == '#':
message['parameters'] = message['parameters'][1:]
self.text.insert(1.0, message['parameters'])
else:
messagebox.showerror('请求失败','请求失败,服务器未返回下一页!')
return
def jump_chapter(self):
"""跳章"""
chap_name = self.ask_chap()
if chap_name is None: return
for i in range(self.total_chapter):
if chap_name == self.chapter[i][0]:
self.chap_num = i
self.page_num = self.chapter[self.chap_num][1]
self.pagebtn['text'] = str(self.page_num+1) + '/' + str(self.total_page+1) # 更新页码
self.chapbtn['text'] = self.chapter[self.chap_num][0] # 更新要显示的章节名
self.sc.send_message(MessageType.require_page, self.bkname + '*' + str(self.page_num))
# 接收该页
message = self.sc.recv_message()
if not message:
messagebox.showerror('连接失败', 'QAQ 网络出现了问题,请稍后再试~')
elif message['type'] == MessageType.no_book:
messagebox.showerror('请求失败', '查无此书,请返回刷新书籍列表!')
elif message['type'] == MessageType.send_page:
print('成功接收第{}章'.format(self.chap_num))
self.text.delete('1.0', 'end') # 清空text文本框
if message['parameters'][0] == '#':
message['parameters'] = message['parameters'][1:]
self.text.insert(1.0, message['parameters'])
else:
messagebox.showerror('请求失败','请求失败,服务器未返回下一章!')
return
def ask_chap(self):
"""章节列表弹窗"""
dialog = ChapterList(self.chapter)
self.wait_window(dialog)
return dialog.chap_name
def previous_chapter(self):
"""上一章"""
if self.chap_num == 0:
messagebox.showwarning('警告!','已经是第一章!')
return
self.chap_num = self.chap_num - 1
self.page_num = self.chapter[self.chap_num][1]
self.pagebtn['text'] = str(self.page_num+1) + '/' + str(self.total_page+1) # 更新页码
self.chapbtn['text'] = self.chapter[self.chap_num][0] # 更新要显示的章节名
self.sc.send_message(MessageType.require_page, self.bkname + '*' + str(self.page_num))
# 接收该页
message = self.sc.recv_message()
if not message:
messagebox.showerror('连接失败', 'QAQ 网络出现了问题,请稍后再试~')
elif message['type'] == MessageType.no_book:
messagebox.showerror('请求失败', '查无此书,请返回刷新书籍列表!')
elif message['type'] == MessageType.send_page:
print('成功接收第{}章'.format(self.chap_num))
self.text.delete('1.0', 'end') # 清空text文本框
if message['parameters'][0] == '#':
message['parameters'] = message['parameters'][1:]
self.text.insert(1.0, message['parameters'])
else:
messagebox.showerror('请求失败','请求失败,服务器未返回上一章!')
return
def next_chapter(self):
"""下一章"""
if self.chap_num >= self.total_chapter-1:
messagebox.showwarning('警告!','已经是最后一章!')
return
self.chap_num = self.chap_num + 1
self.page_num = self.chapter[self.chap_num][1]
self.pagebtn['text'] = str(self.page_num+1) + '/' + str(self.total_page+1) # 更新页码
self.chapbtn['text'] = self.chapter[self.chap_num][0] # 更新要显示的章节名
self.sc.send_message(MessageType.require_page, self.bkname + '*' + str(self.page_num))
# 接收该页
message = self.sc.recv_message()
if not message:
messagebox.showerror('连接失败', 'QAQ 网络出现了问题,请稍后再试~')
elif message['type'] == MessageType.no_book:
messagebox.showerror('请求失败', '查无此书,请返回刷新书籍列表!')
elif message['type'] == MessageType.send_page:
print('成功接收第{}章'.format(self.chap_num))
self.text.delete('1.0', 'end') # 清空text文本框
if message['parameters'][0] == '#':
message['parameters'] = message['parameters'][1:]
self.text.insert(1.0, message['parameters'])
else:
messagebox.showerror('请求失败','请求失败,服务器未返回下一章!')
return
def update_bookmark(self):
"""关闭时调用,更新书签"""
# 发送 用户名 + 书名 + 页数
self.sc.send_message(MessageType.update_bookmark, self.user + '*' + self.bkname + '*' + str(self.page_num))
self.master.destroy()
return
class ChapterList(tk.Toplevel):
def __init__(self, chapter):
super().__init__()
self.chapter = chapter
self.chap_name = ''
self.createForm()
def createForm(self):
self.title("请选择章节")
self.sb = Scrollbar(self)
self.sb.pack(side=RIGHT, fill=Y)
self.chaplist = Listbox(self, height=15, width=40, yscrollcommand=self.sb.set)
for chap in self.chapter:
self.chaplist.insert(END, chap[0])
self.chaplist.pack(side=TOP, fill=BOTH)
self.sb.config(command=self.chaplist.yview)
self.buttonframe = Frame(self)
self.buttonframe.pack(side=BOTTOM, fill=BOTH, expand=YES)
self.jmpbtn = Button(self.buttonframe, text="跳转", command=self.jump)
self.jmpbtn.pack(side=LEFT, fill=X, expand=YES)
self.cncbtn = Button(self.buttonframe, text="取消", command=self.cancel)
self.cncbtn.pack(side=LEFT, fill=X, expand=YES)
def jump(self):
"""回传选择的值"""
self.chap_name = self.chaplist.get(self.chaplist.curselection()) # 得到选择的小说名
self.destroy()
def cancel(self):
"""取消,直接销毁窗口"""
self.destroy() | 43.118012 | 129 | 0.584054 |
2838cb9fb3068b931f43ec405489f94b2abb45c7 | 3,285 | py | Python | akispy/__init__.py | ryanleland/Akispy | dbbb85a1d1b027051e11179289cc9067cb90baf6 | [
"MIT"
] | null | null | null | akispy/__init__.py | ryanleland/Akispy | dbbb85a1d1b027051e11179289cc9067cb90baf6 | [
"MIT"
] | 2 | 2017-05-19T21:59:04.000Z | 2021-06-25T15:28:07.000Z | akispy/__init__.py | ryanleland/Akispy | dbbb85a1d1b027051e11179289cc9067cb90baf6 | [
"MIT"
] | 1 | 2017-05-18T05:23:47.000Z | 2017-05-18T05:23:47.000Z | #!/usr/bin/env python
"""Light weight python client for Akismet API."""
__title__ = 'akispy'
__version__ = '0.2'
__author__ = 'Ryan Leland'
__copyright__ = 'Copyright 2012 Ryan Leland'
import http.client, urllib.request, urllib.parse, urllib.error
class Connection(object):
_key = None
_version = None
_conn = None
def __init__(self, api_key, version="1.1", service_url="rest.akismet.com"):
"""Akispy class constructor.
API key can be acquired from http://akismet.com/.
"""
self._key = str(api_key)
self._version = version
# Get connection
self._conn = http.client.HTTPConnection('%s.%s' % (self._key, service_url))
def verify_key(self, url):
"""For verifying your API key.
Provide the URL of your site or blog you will be checking spam from.
"""
response = self._request('verify-key', {
'blog': url,
'key': self._key
})
if response.status is 200:
# Read response (trimmed of whitespace)
return response.read().strip() == "valid"
return False
def comment_check(self, params):
"""For checking comments."""
# Check required params for comment-check
for required in ['blog', 'user_ip', 'user_agent']:
if required not in params:
raise MissingParams(required)
response = self._request('comment-check', params)
if response.status is 200:
# Read response (trimmed of whitespace)
return response.read().strip() == "true"
return False
def submit_spam(self, params):
"""For submitting a spam comment to Akismet."""
# Check required params for submit-spam
for required in ['blog', 'user_ip', 'user_agent']:
if required not in params:
raise MissingParams(required)
response = self._request('submit-spam', params)
if response.status is 200:
return response.read() == "true"
return False
def submit_ham(self, params):
"""For submitting a ham comment to Akismet."""
# Check required params for submit-ham
for required in ['blog', 'user_ip', 'user_agent']:
if required not in params:
raise MissingParams(required)
response = self._request('submit-ham', params)
if response.status is 200:
return response.read() == "true"
return False
def _request(self, function, params, method='POST', headers={}):
"""Builds a request object."""
if method is 'POST':
params = urllib.parse.urlencode(params)
headers = { "Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain" }
path = '/%s/%s' % (self._version, function)
self._conn.request(method, path, params, headers)
return self._conn.getresponse()
class MissingParam(Exception):
"""Thrown if a required Akismet API method param is missing."""
| 30.990566 | 101 | 0.557991 |
283a11f41a96444be50edc2390d31671ac936bd1 | 1,599 | py | Python | scripts/extract_mfcc.py | xavierfav/coala | b791ad6bb5c4f7b8f8f8fa8e0c5bd5b89b0ecbc3 | [
"MIT"
] | 34 | 2020-06-12T15:54:22.000Z | 2021-12-16T08:16:45.000Z | scripts/extract_mfcc.py | xavierfav/ae-w2v-attention | 8039c056ad365769bdf8d77d6292d4f3cfb957a4 | [
"MIT"
] | 3 | 2020-06-22T09:06:27.000Z | 2021-07-10T09:58:30.000Z | scripts/extract_mfcc.py | xavierfav/coala | b791ad6bb5c4f7b8f8f8fa8e0c5bd5b89b0ecbc3 | [
"MIT"
] | 4 | 2020-10-23T03:29:35.000Z | 2021-08-19T09:31:57.000Z | """
This script is used to compute mffc features for target task datasets.
Warning: Need manual editing for switching datasets
"""
import os
import librosa
import soundfile as sf
import numpy as np
from tqdm import tqdm
from pathlib import Path
FILES_LOCATION = '../data/UrbanSound8K/audio'
FILES_LOCATION = '../data/GTZAN/genres'
SAVE_LOCATION = '../data/embeddings/gtzan/mfcc'
SAVE_LOCATION = '../data/embeddings/nsynth/test/mfcc'
def compute_mfcc(filename, sr=22000):
# zero pad and compute log mel spec
try:
audio, sr = librosa.load(filename, sr=sr, res_type='kaiser_fast')
except:
audio, o_sr = sf.read(filename)
audio = librosa.core.resample(audio, o_sr, sr)
mfcc = librosa.feature.mfcc(y=audio, sr=sr)
mfcc_delta = librosa.feature.delta(mfcc, width=5, mode='nearest')
mfcc_delta2 = librosa.feature.delta(mfcc, order=2, width=5, mode='nearest')
feature = np.concatenate((np.mean(mfcc, axis=1), np.var(mfcc, axis=1),
np.mean(mfcc_delta, axis=1), np.var(mfcc_delta, axis=1),
np.mean(mfcc_delta2, axis=1), np.var(mfcc_delta2, axis=1)))
return feature
if __name__ == "__main__":
# p = Path(FILES_LOCATION)
# filenames = p.glob('**/*.wav')
# # filenames = p.glob('*')
p = Path('../data/nsynth/nsynth-test/audio')
filenames = p.glob('*.wav')
for f in tqdm(filenames):
try:
y = compute_mfcc(str(f))
np.save(Path(SAVE_LOCATION, str(f.stem)+'.npy'), y)
except RuntimeError as e:
print(e, f)
| 30.75 | 89 | 0.632896 |
283a66142e84ec8f0ef344d826e481e7692288fc | 391 | py | Python | sqli/__main__.py | everilae/sqli | 8a63076dc8316b38ce521b63e67bea8d2ccf2a80 | [
"MIT"
] | null | null | null | sqli/__main__.py | everilae/sqli | 8a63076dc8316b38ce521b63e67bea8d2ccf2a80 | [
"MIT"
] | 1 | 2017-10-22T11:13:58.000Z | 2020-06-01T09:20:20.000Z | sqli/__main__.py | everilae/sqli | 8a63076dc8316b38ce521b63e67bea8d2ccf2a80 | [
"MIT"
] | null | null | null | import argparse
import astunparse
import sys
from . import check
parser = argparse.ArgumentParser()
parser.add_argument("file", nargs="?", type=argparse.FileType("r"),
default=sys.stdin)
args = parser.parse_args()
poisoned = check(args.file.read())
print("Possible SQL injections:")
for p in poisoned:
print("line {}: {}".format(p.get_lineno(), p.get_source()))
| 23 | 67 | 0.682864 |
283aae358baff3c73b726efc887ef653ab678494 | 318 | py | Python | rammstein.py | wildekek/rammstein-generator | fc7ef34260c4dddaba01ff4c964349e13bd4bf1a | [
"MIT"
] | 3 | 2015-10-11T15:39:30.000Z | 2019-06-18T19:20:00.000Z | rammstein.py | wildekek/rammstein-generator | fc7ef34260c4dddaba01ff4c964349e13bd4bf1a | [
"MIT"
] | null | null | null | rammstein.py | wildekek/rammstein-generator | fc7ef34260c4dddaba01ff4c964349e13bd4bf1a | [
"MIT"
] | 1 | 2019-06-16T21:49:16.000Z | 2019-06-16T21:49:16.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
a='Du hast mich'
b='Du, du hast'+'\n'+a
c=a+' gefragt'
def j(i):
return '\n'.join(i)
d=j([b,b,''])
e=j(['Willst du bis der Tod euch scheidet','Treurig sein für alle Tage?','Nein, nein!',''])
f=j([d,b,a,'',j([c,c,c,'Und ich hab nichts gesagt','']),e,e])
print j([d,f,f,e]) | 28.909091 | 91 | 0.566038 |
283bc06264c86314c6694bec3801d78f4ce49fef | 799 | py | Python | tests/test_06_reporting_classes.py | themperek/pyuvm | 12abf6b0a631321c0fcce6ebbc04b8cc9900c6a8 | [
"Apache-2.0"
] | null | null | null | tests/test_06_reporting_classes.py | themperek/pyuvm | 12abf6b0a631321c0fcce6ebbc04b8cc9900c6a8 | [
"Apache-2.0"
] | null | null | null | tests/test_06_reporting_classes.py | themperek/pyuvm | 12abf6b0a631321c0fcce6ebbc04b8cc9900c6a8 | [
"Apache-2.0"
] | null | null | null | import pyuvm_unittest
from pyuvm import *
class s06_reporting_classes_TestCase(pyuvm_unittest.pyuvm_TestCase):
"""Basic test cases."""
def test_object_creation(self):
"""
Test that we actually get a logger in our object.
"""
ro = uvm_report_object('ro')
self.assertTrue(hasattr(ro, "logger"))
with self.assertLogs(ro.logger, level='DEBUG') as cm:
ro.logger.debug('debug')
ro.logger.info('info')
ro.logger.error('error')
ro.logger.critical('critical')
self.assertEqual(cm.output, ['DEBUG:ro:debug',
'INFO:ro:info',
'ERROR:ro:error',
'CRITICAL:ro:critical'])
| 34.73913 | 68 | 0.524406 |
283c7a1056bf5070026e3e0618dc2649d53dd2ec | 203 | py | Python | sucursal_crud/sucursal_crud_api/models/puntoDeRetiro.py | cassa10/challenge-api | 0bcc4f38b049f930faca45b80d869835650e2a23 | [
"MIT"
] | null | null | null | sucursal_crud/sucursal_crud_api/models/puntoDeRetiro.py | cassa10/challenge-api | 0bcc4f38b049f930faca45b80d869835650e2a23 | [
"MIT"
] | null | null | null | sucursal_crud/sucursal_crud_api/models/puntoDeRetiro.py | cassa10/challenge-api | 0bcc4f38b049f930faca45b80d869835650e2a23 | [
"MIT"
] | null | null | null | from django.db import models
from django.core.validators import MinValueValidator
from .nodo import Nodo
class PuntoDeRetiro(Nodo):
capacidad = models.IntegerField(validators=[MinValueValidator(1)]) | 33.833333 | 70 | 0.817734 |
283cfb68139e552afe4cbfabaafdcde926934b65 | 8,429 | py | Python | trinity/sync/header/chain.py | g-r-a-n-t/trinity | f108b6cd34ed9aabfcf9e235badd91597650ecd5 | [
"MIT"
] | 1 | 2021-04-07T07:33:28.000Z | 2021-04-07T07:33:28.000Z | trinity/sync/header/chain.py | g-r-a-n-t/trinity | f108b6cd34ed9aabfcf9e235badd91597650ecd5 | [
"MIT"
] | null | null | null | trinity/sync/header/chain.py | g-r-a-n-t/trinity | f108b6cd34ed9aabfcf9e235badd91597650ecd5 | [
"MIT"
] | null | null | null | import asyncio
from typing import Sequence
from async_service import Service, background_asyncio_service
from eth.abc import BlockHeaderAPI
from eth.exceptions import CheckpointsMustBeCanonical
from eth_typing import BlockNumber
from trinity._utils.pauser import Pauser
from trinity.chains.base import AsyncChainAPI
from trinity.db.eth1.chain import BaseAsyncChainDB
from trinity.protocol.eth.peer import ETHPeerPool
from trinity.protocol.eth.sync import ETHHeaderChainSyncer
from trinity._utils.logging import get_logger
from trinity.sync.common.checkpoint import Checkpoint
from trinity.sync.common.constants import (
MAX_BACKFILL_HEADERS_AT_ONCE,
MAX_SKELETON_REORG_DEPTH,
)
from trinity.sync.common.headers import persist_headers
from trinity.sync.common.strategies import (
FromCheckpointLaunchStrategy,
FromGenesisLaunchStrategy,
FromBlockNumberLaunchStrategy,
SyncLaunchStrategyAPI,
)
class HeaderChainSyncer(Service):
def __init__(self,
chain: AsyncChainAPI,
db: BaseAsyncChainDB,
peer_pool: ETHPeerPool,
enable_backfill: bool = True,
checkpoint: Checkpoint = None) -> None:
self.logger = get_logger('trinity.sync.header.chain.HeaderChainSyncer')
self._db = db
self._checkpoint = checkpoint
self._enable_backfill = enable_backfill
self._chain = chain
self._peer_pool = peer_pool
if checkpoint is None:
self._launch_strategy: SyncLaunchStrategyAPI = FromGenesisLaunchStrategy(db)
else:
self._launch_strategy = FromCheckpointLaunchStrategy(
db,
chain,
checkpoint,
peer_pool,
)
self._header_syncer = ETHHeaderChainSyncer(chain, db, peer_pool, self._launch_strategy)
async def run(self) -> None:
head = await self._db.coro_get_canonical_head()
if self._checkpoint is not None:
self.logger.info(
"Initializing header-sync; current head: %s, using checkpoint: %s",
head,
self._checkpoint,
)
else:
self.logger.info("Initializing header-sync; current head: %s", head)
try:
await self._launch_strategy.fulfill_prerequisites()
except asyncio.TimeoutError as exc:
self.logger.exception(
"Timed out while trying to fulfill prerequisites of "
f"sync launch strategy: {exc} from {self._launch_strategy}"
)
self.manager.cancel()
return
# Because checkpoints are only set at startup (for now): once all gaps are filled, no new
# ones will be created. So we can simply run this service till completion and then exit.
if self._enable_backfill:
backfiller = SequentialHeaderChainGapSyncer(self._chain, self._db, self._peer_pool)
self.manager.run_child_service(backfiller)
self.manager.run_daemon_child_service(self._header_syncer)
self.manager.run_daemon_task(self._persist_headers)
# run sync until cancelled
await self.manager.wait_finished()
async def _persist_headers(self) -> None:
async for persist_info in persist_headers(self.logger, self._db, self._header_syncer):
if len(persist_info.new_canon_headers):
head = persist_info.new_canon_headers[-1]
else:
head = await self._db.coro_get_canonical_head()
self.logger.info(
"Imported %d headers in %0.2f seconds, new head: %s",
len(persist_info.imported_headers),
persist_info.elapsed_time,
head,
)
class HeaderChainGapSyncer(Service):
def __init__(self,
chain: AsyncChainAPI,
db: BaseAsyncChainDB,
peer_pool: ETHPeerPool,
max_headers: int = None) -> None:
self.logger = get_logger('trinity.sync.header.chain.HeaderChainGapSyncer')
self._chain = chain
self._db = db
self._peer_pool = peer_pool
self._max_headers = max_headers
async def run(self) -> None:
available_gaps, _ = self._db.get_header_chain_gaps()
if len(available_gaps):
gap = available_gaps[0]
else:
self.logger.debug("No gaps to fill. Stopping")
return
launch_block_number = BlockNumber(max(0, gap[0] - MAX_SKELETON_REORG_DEPTH))
self.logger.info(f"Launching from %s", launch_block_number)
launch_strategy = FromBlockNumberLaunchStrategy(self._db, launch_block_number)
gap_length = gap[1] - gap[0]
if self._max_headers and gap_length > self._max_headers:
final_block_number = BlockNumber(gap[0] + self._max_headers)
else:
final_block_number = gap[1]
self._header_syncer = ETHHeaderChainSyncer(
self._chain, self._db, self._peer_pool, launch_strategy)
await launch_strategy.fulfill_prerequisites()
self.logger.info(
"Initializing gap-fill header-sync; filling gap: %s", (gap[0], final_block_number)
)
self.manager.run_child_service(self._header_syncer)
self.manager.run_task(self._persist_headers, final_block_number)
# run sync until cancelled
await self.manager.wait_finished()
async def _persist_headers(self, gap_end: BlockNumber) -> None:
async def _is_at_end_of_gap(headers: Sequence[BlockHeaderAPI]) -> bool:
all_headers_too_advanced = headers[0].block_number > gap_end
if all_headers_too_advanced:
self.manager.cancel()
return True
else:
return False
try:
async for persist_info in persist_headers(
self.logger, self._db, self._header_syncer, _is_at_end_of_gap):
self.logger.info(
"Imported %d gap headers from %s to %s in %0.2f seconds,",
len(persist_info.imported_headers),
persist_info.imported_headers[0],
persist_info.imported_headers[-1],
persist_info.elapsed_time,
)
except CheckpointsMustBeCanonical as err:
self.logger.warning("Attempted to fill gap with invalid header: %s", err)
self.manager.cancel()
class SequentialHeaderChainGapSyncer(Service):
def __init__(self,
chain: AsyncChainAPI,
db: BaseAsyncChainDB,
peer_pool: ETHPeerPool) -> None:
self.logger = get_logger('trinity.sync.header.chain.SequentialHeaderChainGapSyncer')
self._chain = chain
self._db = db
self._peer_pool = peer_pool
self._pauser = Pauser()
self._max_backfill_header_at_once = MAX_BACKFILL_HEADERS_AT_ONCE
def pause(self) -> None:
"""
Pause the sync after the current operation has finished.
"""
# We just switch the toggle but let the sync finish the current segment. It will wait for
# the resume call before it starts a new segment.
self._pauser.pause()
self.logger.debug2(
"Pausing SequentialHeaderChainGapSyncer after current operation finishs"
)
def resume(self) -> None:
"""
Resume the sync.
"""
self._pauser.resume()
self.logger.debug2("SequentialHeaderChainGapSyncer resumed")
async def run(self) -> None:
while self.manager.is_running:
if self._pauser.is_paused:
await self._pauser.await_resume()
gaps, _ = self._db.get_header_chain_gaps()
if len(gaps) < 1:
self.logger.info("No more gaps to fill. Exiting")
self.manager.cancel()
return
else:
self.logger.debug(f"Starting gap sync at {gaps[0]}")
syncer = HeaderChainGapSyncer(
self._chain,
self._db,
self._peer_pool,
max_headers=self._max_backfill_header_at_once,
)
async with background_asyncio_service(syncer) as manager:
await manager.wait_finished()
| 37.629464 | 97 | 0.626409 |
283d50169f9d4063fc968899a7356c0ef91c4024 | 2,436 | py | Python | plato/clients/scaffold.py | iQua/plato | 76fdac06af8b4d85922cd12749b4a687e3161745 | [
"Apache-2.0"
] | null | null | null | plato/clients/scaffold.py | iQua/plato | 76fdac06af8b4d85922cd12749b4a687e3161745 | [
"Apache-2.0"
] | null | null | null | plato/clients/scaffold.py | iQua/plato | 76fdac06af8b4d85922cd12749b4a687e3161745 | [
"Apache-2.0"
] | 1 | 2021-05-18T15:03:32.000Z | 2021-05-18T15:03:32.000Z | """
A federated learning client using SCAFFOLD.
Reference:
Karimireddy et al., "SCAFFOLD: Stochastic Controlled Averaging for Federated Learning"
(https://arxiv.org/pdf/1910.06378.pdf)
"""
import os
from dataclasses import dataclass
import torch
from plato.clients import simple
@dataclass
class Report(simple.Report):
"""Client report sent to the SCAFFOLD federated learning server."""
payload_length: int
class Client(simple.Client):
"""A SCAFFOLD federated learning client who sends weight updates
and client control variate."""
def __init__(self):
super().__init__()
self.client_update_direction = None
self.server_update_direction = None
self.new_client_update_direction = None
async def train(self):
# Initialize the server update direction and client update direction for trainer
if self.server_update_direction is not None:
self.trainer.client_update_direction = self.client_update_direction
self.trainer.server_update_direction = self.server_update_direction
report, weights = await super().train()
# Get new client update direction from the trainer
self.new_client_update_direction = self.trainer.new_client_update_direction
# Compute deltas for update directions
deltas = []
if self.client_update_direction is None:
self.client_update_direction = [0] * len(
self.new_client_update_direction)
for client_update_direction_, new_client_update_direction_ in zip(
self.client_update_direction,
self.new_client_update_direction):
delta = torch.sub(new_client_update_direction_,
client_update_direction_)
deltas.append(delta)
# Update client update direction
self.client_update_direction = self.new_client_update_direction
fn = f"new_client_update_direction_{self.client_id}.pth"
os.remove(fn)
return Report(report.num_samples, report.accuracy,
report.training_time, report.data_loading_time,
2), [weights, deltas]
def load_payload(self, server_payload):
"Load model weights and server update direction from server payload onto this client"
self.algorithm.load_weights(server_payload[0])
self.server_update_direction = server_payload[1]
| 35.304348 | 93 | 0.694992 |
283e3728c74f274d987dc75b56ca98081fe4485b | 2,307 | py | Python | test/test.py | SK-415/bilireq | ce4dfa2ae05a88291162907b86caf29ab868bedc | [
"MIT"
] | 2 | 2021-10-20T06:32:35.000Z | 2022-03-26T11:40:07.000Z | test/test.py | SK-415/bilireq | ce4dfa2ae05a88291162907b86caf29ab868bedc | [
"MIT"
] | 1 | 2021-12-06T01:37:08.000Z | 2021-12-06T01:37:08.000Z | test/test.py | SK-415/bilireq | ce4dfa2ae05a88291162907b86caf29ab868bedc | [
"MIT"
] | null | null | null | import asyncio
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).parent.parent))
from bilireq.auth import Auth
from bilireq.dynamic import get_user_dynamics, get_followed_dynamics_update_info, get_followed_new_dynamics, get_followed_history_dynamics
from bilireq.live import get_rooms_info_by_ids
from bilireq.login import Login, get_token_info, refresh_token
from bilireq.user import get_user_info
from test_data import AUTH, PASSWORD, PHONE, UID, USERNAME
async def main():
pass
# 登录相关测试
# auth = Auth()
# print(auth.get_cookies(), auth.get_tokens(), auth["access_token"])
# auth = Auth(AUTH)
# print(await auth.get_info())
print(await test_qrcode_login())
# print(await test_sms_login())
# print(await test_pwd_login())
# await test_pwd_login_duration()
# print(await refresh_token(access_token=ACCESS_TOKEN, refresh_token=REFRESH_TOKEN))
# print(await get_token_info(auth))
# print(await get_user_dynamics(UID))
# print(await get_followed_new_dynamics(auth=auth, reqtype="app"))
# print(await get_followed_dynamics_update_info(auth=auth))
# print(await get_followed_history_dynamics(578888250640167815, auth=auth))
# print(await get_rooms_info_by_ids(room_ids=[1,2], auth=auth, reqtype="app"))
# print(await get_room_info(UID))
# print(await get_user_info(str(UID)))
# print(await get_user_info(20709866, auth=auth, reqtype="app"))
# print(await get_user_info(20709866, auth=auth, reqtype="web"))
async def test_qrcode_login():
login = Login()
await login.get_qrcode(print_=True)
return await login.qrcode_login(retry=-1)
async def test_sms_login():
login = Login()
await login.send_sms(tel=PHONE)
while True:
try:
return await login.sms_login(input())
except Exception as e:
print(e)
async def test_pwd_login():
login = Login()
return await login.pwd_login(USERNAME, PASSWORD)
async def test_pwd_login_duration():
from datetime import datetime
print(datetime.now())
while True:
try:
print(await test_pwd_login())
print(datetime.now())
break
except Exception as e:
print(e)
await asyncio.sleep(10)
asyncio.run(main())
| 30.355263 | 138 | 0.704378 |
283f0444ed2c9cb2e8181317df155e9ffdbf38c6 | 1,105 | py | Python | scripts/pgmviz.py | anindex/auto_localization | e8acc6fb4a4221115e2d4f9ba87fd077ad741b70 | [
"MIT"
] | 1 | 2020-09-03T14:29:27.000Z | 2020-09-03T14:29:27.000Z | scripts/pgmviz.py | anindex/auto_localization | e8acc6fb4a4221115e2d4f9ba87fd077ad741b70 | [
"MIT"
] | null | null | null | scripts/pgmviz.py | anindex/auto_localization | e8acc6fb4a4221115e2d4f9ba87fd077ad741b70 | [
"MIT"
] | 2 | 2019-09-26T15:20:37.000Z | 2021-07-14T11:00:49.000Z | import re
import numpy
def read_pgm(filename, byteorder='>'):
"""Return image data from a raw PGM file as numpy array.
Format specification: http://netpbm.sourceforge.net/doc/pgm.html
"""
with open(filename, 'rb') as f:
buffer = f.read()
try:
header, width, height, maxval = re.search(
b"(^P5\s(?:\s*#.*[\r\n])*"
b"(\d+)\s(?:\s*#.*[\r\n])*"
b"(\d+)\s(?:\s*#.*[\r\n])*"
b"(\d+)\s(?:\s*#.*[\r\n]\s)*)", buffer).groups()
except AttributeError:
raise ValueError("Not a raw PGM file: '%s'" % filename)
return numpy.frombuffer(buffer,
dtype='u1' if int(maxval) < 256 else byteorder+'u2',
count=int(width)*int(height),
offset=len(header)
).reshape((int(height), int(width)))
if __name__ == "__main__":
from matplotlib import pyplot
image = read_pgm("/home/anindex/robotics_ws/src/krp_localization/maps/test.pgm", byteorder='<')
pyplot.imshow(image, pyplot.cm.gray)
pyplot.show()
| 34.53125 | 99 | 0.529412 |
283fe692e8590b67f92e91991108dc8259bb2861 | 406 | py | Python | sgnlp/models/span_extraction/__init__.py | vincenttzc/sgnlp | 44ae12a5ae98c9a1945d346e9373854c7d472a4b | [
"MIT"
] | null | null | null | sgnlp/models/span_extraction/__init__.py | vincenttzc/sgnlp | 44ae12a5ae98c9a1945d346e9373854c7d472a4b | [
"MIT"
] | null | null | null | sgnlp/models/span_extraction/__init__.py | vincenttzc/sgnlp | 44ae12a5ae98c9a1945d346e9373854c7d472a4b | [
"MIT"
] | null | null | null | from .config import RecconSpanExtractionConfig
from .tokenization import RecconSpanExtractionTokenizer
from .modeling import RecconSpanExtractionModel
from .preprocess import RecconSpanExtractionPreprocessor
from .postprocess import RecconSpanExtractionPostprocessor
from .train import train
from .eval import evaluate
from .utils import load_examples
from .data_class import RecconSpanExtractionArguments
| 40.6 | 58 | 0.889163 |
2841fd174e918cba71310ebfefa31472caa5fe2f | 331 | py | Python | pandoc-starter/MarkTex/marktex/config.py | riciche/SimpleCVReproduction | 4075de39f9c61f1359668a413f6a5d98903fcf97 | [
"Apache-2.0"
] | 923 | 2020-01-11T06:36:53.000Z | 2022-03-31T00:26:57.000Z | pandoc-starter/MarkTex/marktex/config.py | riciche/SimpleCVReproduction | 4075de39f9c61f1359668a413f6a5d98903fcf97 | [
"Apache-2.0"
] | 25 | 2020-02-27T08:35:46.000Z | 2022-01-25T08:54:19.000Z | pandoc-starter/MarkTex/marktex/config.py | riciche/SimpleCVReproduction | 4075de39f9c61f1359668a413f6a5d98903fcf97 | [
"Apache-2.0"
] | 262 | 2020-01-02T02:19:40.000Z | 2022-03-23T04:56:16.000Z | '''Don't change the basic param'''
import os
'''prog path'''
config_path = os.path.split(__file__)[0]
marktemp_path = os.path.join(config_path,"markenv.tex")
'''tools setting'''
image_download_retry_time = 10
# 在尝试重试次数达到上限后,是否等待手动下载该文件放到目录
# wait_manully_if_all_failed = False
# 在tex文件里添加图片的时候,使用相对路径还是绝对路径
give_rele_path = True
| 23.642857 | 55 | 0.770393 |
28420d9ddb5dd0a224753623044f62aac5eba76f | 669 | py | Python | ProgramsToRead/ExercisesLists/Lista05/Exer12Lista05.py | ItanuRomero/PythonStudyPrograms | 2b784b2af068b34e65ddf817ca8d99c1ca3a710e | [
"MIT"
] | null | null | null | ProgramsToRead/ExercisesLists/Lista05/Exer12Lista05.py | ItanuRomero/PythonStudyPrograms | 2b784b2af068b34e65ddf817ca8d99c1ca3a710e | [
"MIT"
] | null | null | null | ProgramsToRead/ExercisesLists/Lista05/Exer12Lista05.py | ItanuRomero/PythonStudyPrograms | 2b784b2af068b34e65ddf817ca8d99c1ca3a710e | [
"MIT"
] | null | null | null | # Questão 12. Construa uma função que receba uma string como parâmetro
# e devolva outra string com os carateres emba- ralhados. Por exemplo:
# se função receber a palavra python, pode retornar npthyo, ophtyn ou
# qualquer outra combinação possível, de forma aleatória. Padronize em
# sua função que todos os caracteres serão devolvidos em caixa alta ou
# caixa baixa, independentemente de como foram digitados.
import random
def embaralha(palavra):
shuffled = list(palavra.lower())
random.shuffle(shuffled)
shuffled = ''.join(shuffled)
print(f'A palavra embaralhada é {shuffled}')
palavra = input('Digite uma palavra: ')
embaralha(palavra)
| 37.166667 | 71 | 0.751868 |
28426aef923f9eca775a9b7c35cef0f1597b7c28 | 54,872 | py | Python | DataScience/Numpy.py | AlPus108/Python_lessons | 0e96117d9a8b76fd651e137fc126ddedaa6accd9 | [
"MIT"
] | null | null | null | DataScience/Numpy.py | AlPus108/Python_lessons | 0e96117d9a8b76fd651e137fc126ddedaa6accd9 | [
"MIT"
] | null | null | null | DataScience/Numpy.py | AlPus108/Python_lessons | 0e96117d9a8b76fd651e137fc126ddedaa6accd9 | [
"MIT"
] | null | null | null | # Numpy массивы
# Документация по NumPy: https://numpy.org/doc/stable/
# Список универсальных функций NumPy https://numpy.org/devdocs/reference/ufuncs.html
# NumPy - векторные/научные вычисления. Пакет содержит функциональные средства для работы с многомерными массивами
# и высокоуровневыми математическими функциями
# Это дополнительная библиотека в Питоне. Хотя это самостоятельный модуль, написанный на С(Си), но настолько популярная,
# что ее стали ассоциировать с Питоном. В ней есть специальные массивы. Похожи на list,
# но numpy-массивы, это более крутыые массивы. И поэтому они используются в научных целях.
# В основном numpy нужне для математических вещей, обучающих алгоритмов. и тд.
# Нейронный сети, это в основном numpy-массивы. 60% - numpy и 40% - обычные листы. Мы будем использоват и то и другое.
# Важно научиться в них не путаться.
# Важно знать, что в Питоне нет массивов, они есть только в Нампай.
# Чем отличается Массив от Листа? Лист может содержать элементы разных типов,
# тогда как Массив всегда содержит элементы только одного типа.
# Импоритруем библиотеку
import numpy as np
# ------------------- СПОСОБЫ СОЗДАНИЯ МАССИВА ------------------------
# ------------------------------- array() ------------------------------
# Создание массива из списка
# Массив, это почти то же самое, что и Лист, только немного другая структура, но она близкая.
# Numpy-массив более функционален.
# У нас есть список
a = [2, 4, 5]
# Есть такое понятие, как ndarray (numpy_data_array) - это название типа объектов numpy
# Мы можем взять этот обычный лист и превратить в numpy-массив
a_numpy = np.array(a) # это подобно преобразованию типов. Здесь мы преобразовывыаем list в numpy-list
# При этом значение массива 'a' присваиваем новой переменной.
# Или без создания листа, напрямую
# a = np.array([2, 4, 5]) - это приведение типа лист к массиву
# Выводим тип нового массива
print(type(a_numpy))
# <class 'numpy.ndarray'> - но здесь мы тип не получаем, а получаем класс, так как обычные ф-и здесь уже не работают.
# В бибилотеке numpy, чтобы получить тип, используют ф-ю dtype()
print(a_numpy.dtype) # int32
# Этот тип содержит целочисленные числа. В данном случае int32. 32 - сколько бит он выделяет на каждую ячейку.
# Бывает и 16 и 64. Это не принцимпиально. Для нас главное, что это int - целочисленное число.
# И теперь a_numpy - это уже numpy-массив.
# Выводим его на экран
print(a_numpy) # [2 4 5]
# Разница с выводм Листа в том, что numpy выводится без запятых через пробелы.
# Хотя на вид содержание одно и то же, но структуры разные.
# Создаем новый лист с типом одного значения float
b = [2, 3.14]
# Превращаем его в numpy и выведем тип
b_numpy = np.array(b)
print(b_numpy.dtype) # float64 - тип с плавающей точкой с 64-мя битами в ячейке.
print(b_numpy) # [2. 3.14]
# При преобарзовани типов, первый элемент int был приведен к типу float
# Теперь все элементы массива принадлежат к типу float
# Выводим новый лист с разными типами данных
c = [2, 3.14, 'kotiki']
# Приводим к типу numpy
c_numpy = np.array(c)
print(c_numpy) # ['2' '3.14' 'kotiki'] # получаем тип str
# numpy всегда одного типа и он приводит все элементы к самому старшему типу.
# Так как float нельзя привести к int без потери данных, а str тем более нельзя привести к int и float.
# Поэтому, самым старшим типом здесь является строка.
print(c_numpy.dtype) # <U32 - тип строки
# Дальше, мы помещаем в наш массив еще один лист
my_list = [2, 3.14, 'kotiki', [2, 3, 4]] # третьим элементом листа будет лист.
# Приводим его к типу numpy-массив
my_list_numpy = np.array(my_list)
print(my_list_numpy) # [2 3.14 'kotiki' list([2, 3, 4])] - в массиве уже целый лист и это уже тип - объект.
print(my_list_numpy.dtype) # object
# Теперь в numpy-массив присутствуют только объекты.
# Для numpy-массивов это самый высокий уровень - тип Object - это уже тип numpy.
# Попробуем привести следующую последовательность к типу int
# np.array([2, 3.14, 'kotiki', [2,3,4]], dtype='int64')
# Будет выдана ошибка: ValueError: invalid literal for int() with base 10: 'kotiki'
# Он не может привести строковое значение 'kotiki' к числу.
# То же касается и list, и не важно, что там цифры.
# ----------------------------- Типы данных ---------------------------
# В NumPy реализованы свои типы данных, которые отличаются от встоенных в Пвйтон.
# Например, нативный int в Пайтоне ничем не ограничен (кроме вашей памяти). Но, целые числа в numpy имеют
# фиксированный размер, np.int8, np.int16...
# Другое различие кроется в встроенных методах.
a_python = 123
# Числовое значение Пвйтон мы можем привести к типу numpy
a_numpy = np.int32(123) # int - целочисленное, 32 - сколько памяти мы закладываем под это число - 4 байта.
# Что это меняет? Это меняет максимальное число. Самое максимальное число в этом случае будет - 4 миллиарда.
# Если сделаем 64 - будет в два раза больше. То есть, для значения 132 хватит с большим запасом, хотя под него хыватит и 8
# Смотрим их типы
print(type(a_python)) # <class 'int'> - здесь питоновский тип int
print(type(a_numpy)) # <class 'numpy.int32'> - здесь тип int класса numpy
# Есть тип данных uint - только для положительных числовых значений:
# Это диапазон
# uint8: 0 - 2**8 (для диапазона значений от 0 до 2 в 8-й степени (до 255))
# uint16: 0 - 2**16 (от 0 до 2 в 16-й степени (до 65535))
# uint32: 0 - 2**32 (от 0 до 2 в 32-й степени (до 4294967295))
# uint64: 0 - 2**64 (от 0 до 2 в 32-й степени (до 18446744073709551615))
# Если указем просто int:
# int8: -128 -- +127 (те же 256 значений, но уже включая и отрицательные)
# int16: -32768 -- +32767
# int32: -2147483648 -- +2147483647
# int64: -9223372036854775808 -- +9223372036854775807
# ----------------------------- Одномерные массивы -----------------------
# Массив их целых чисел
# Здесь мы создаем лист, но не выносим его в отдельную переменную, а сразу передаем в numpy
a = np.array([1, 2, 3, 4, 5])
# Создаем массив из строк
b = np.array(['cat', 'mouse', 'dog', 'rat'])
# Рассмотрим их типы
print(a.dtype) # dtype('int64')
print(b.dtype) # dtype('<U5') # это тип "строка" с максимальной длиной 5 - в нашем списке это слово 'mouse'
# Строго говоря, сейчас создали массив не верно. При создании массива надо всегда указывать тип его данных.
# Потому что массивы NumPy имеют строгую типизацию. Опустить указание типа в NumPy возможно лишь в одном случае -
# если мы создаем копию уже существующего массива. Один только int имеет в NumPy четыре типа:
# int8, int16, int32, int64. Важная справка: пользоваться числами больше размерености вашей машины не имеет смысла.
# Если наша опер.система имеет разрядность 32, то использовать int64 не целесообразно. Но, если надо, то можно.
# Вещественные числа бывают трех видов: float16, float32, float64. С ними то же правило - если надо, то можно.
# В NumPy float16 соответствует С(ишному): float; F(ортрановский): REAL; float32 - C:double, F: double precision;
# P(итоновский): float. То есть, Питоновский float, это float32. Float64 (двойная точность) в NumPy ничему не соответствует.
# В классических языках программирования ему нет аналогов.
# В NumPy комплексные числа состоят из двух половинок: complex64 - две части из float32 и complex128 - две части из float64.
# Это и есть "коробочные" родные типы библиотеки NumPy.
# С фиксированной точкой NumPy работать не умеет.
# Итак, если мы хотим создать массив из списка, правильно это сделать будет так:
# Создаем массив типа float (а в большинстве случаем так оно и будет)
# Это будет список, состоящий из четырех списков
x = np.array([[1,2], [3,4], [5,6], [7,8]], dtype=np.float32) # в конце указываем тип
# Выводим на экран
print('Массив со строгой типизацией ', x)
# [[1. 2.]
# [3. 4.]
# [5. 6.]
# [7. 8.]]
# В массиве все числа типа float. То есть, при указании типа, бибилиотека NumPy принудительно приобразовывает
# все элементы массива к этому типу.
# Чтобы узнать тип элементов:
print(x.dtype) # float32
print(type(x)) # <class 'numpy.ndarray'> - класс нампи
# --------------------------------- shape ---------------------------
# Выдает размерность массива - в данном случае (одномерного массива) - одномерный вектор (в одну строку)
print(a.shape) # (5,) - получена размерность массива 'a' - 5 - количество элементов в массиве
print(b.shape) # (4,) - одномерный массив из 4-х элементов
# -------------------------------- size ----------------------------
# Выдает количество элементов в массиве
print(a.size) # 5 - в данном случае совпадает с shape. Это одномерный массив длины - 5 и его размер - 5
# Но size и shape могут различаться. Если бы, например, массив был бы многомерным 10х10, то shape был бы 10х10,
# а size был бы 100
# Сделаем длину 6
a = np.array([1, 2, 3, 4, 5, 6])
print(a)
print(a.shape) # (6,)
# Мы можем изменить этот размер с помощью ф-и reshape()
# ---------------------------- Двухмерные массивы (матрицы) -------------------------------
# Принято Матрицами называть именно Двухмерные массивы,
# а структуры больше двухмерных называют Тензоры.
# Матрица и двухмерный массив, это синонимы.
# Создаем двухмерный массив из нескольких списков
my_array = np.array([[1, 2, 3],[4, 5, 6]])
print(my_array.shape)
print('Массив из двух списков\n', my_array)
# [[1 2 3]
# [4 5 6]]
# ---------------------------------- reshape() -----------------------
# Меняет форму матрицы. Выдает многомерный вектор (многомерный массив)
# Но, при этом сам массив не меняется. reshape() лишь создает копию массива,
# поэтому все изменения, которые она вносит, не влияют на исходный массив
# Создаем одномерный массив с одновременным созданием из него двухмерного
# При этом новая форма должна содержать столько же элементов, сколько и старая форма.
two_dim_arr = np.arange(12).reshape(3, 4)
print('Создание двухмерного массива\n ', two_dim_arr)
# [[ 0 1 2 3]
# [ 4 5 6 7]
# [ 8 9 10 11]]
a_new = a.reshape(3, 2)
print(a_new)
# [[1 2]
# [3 4]
# [5 6]]
print(a_new.shape) # (3, 2)
print(a_new.size) # 6
# Поменяем shape
a_new = a.reshape(2, 3)
print(a_new)
# [[1 2 3]
# [4 5 6]]
print(a_new.shape) # (2, 3)
print(a_new.size) # 6
a_new = a.reshape(1, 6)
print(a_new)
# [[1 2 3 4 5 6]] # это двумерная матрица (видно по двум парам кв.скобок), состоящая из одной строки с шестью элементами
print(a_new.shape) # (1, 6)
print(a_new.size) # 6
# Если количество элементов в массиве нечетное, например - 5, то мы можем сделть размерность только 1х5 или 5х1
# Если мы попытаемся сделать из массива 5 матрицу 2х3 - будет выдана ошибка.
# Хотя в листе это можно сделать
list = [[1, 2, 3], [4, 5]]
# Но, если мы переведем этот list в numpy
np_list = np.array(list)
print(np_list)
# [list([1, 2, 3]) list([4, 5])]
# получим numpy-массив из двух списков. Но, полноценный numpy мы не получим.
# Но, если мы подадим ему лист с четным количеством элементов,
list_2 = [[1, 2, 3], [4, 5, 6]]
np_list_2 = np.array(list_2)
print(np_list_2)
# [[1 2 3]
# [4 5 6]]
# Получаем полноценную numpy-матрицу (двухмерный массив)
# По сути одномерный и многомерный вектор, это одно и тоже, но в numpy это имеет разницу.
# Можно создавать массивы с бесконечным количеством элементов.
# Многомерный массив можно создавать из листа листов
two_dim_list = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
two_dim_np = np.array(two_dim_list)
print('Двухмерный массив из Листа: \n', two_dim_np)
# [[1 2 3]
# [4 5 6]
# [7 8 9]]
# ----------------------- ravel() -------------------------------------
# Из 2D в 1D. Метод ravel() преобразует многомерные массивы в одномерные.
# Тот же параметр order определяет, «схлопнется» ли массив построчно или столбец за столбцом:
# print(a1_2d)
# > [[ 1 2 3 4]
# [ 5 6 7 8]
# [ 9 10 11 12]]
#
# print(a1_2d.ravel()) # строка за строкой
# > [ 1 2 3 4 5 6 7 8 9 10 11 12]
#
# print(a1_2d.ravel(order='F')) # столбец за столбцом
# > [ 1 5 9 2 6 10 3 7 11 4 8 12]
# Эта ф-я вытягивает все элементы массива в строку. То есть, отменяет все операции reshape()
print('Вытягиваем массив в строку ', two_dim_np.ravel())
# [1 2 3 4 5 6 7 8 9]
# Здесь элементы массива расположились в одну строку. Но, на самом деле, в памяти они распологаются по столбцам.
# Чтобы ravel() этого не делал, в аргументы можно вставить 'F'
print('Вытягиваем массив в строку с F ', two_dim_np.ravel('F'))
# Тогда увидим, как они на самом деле располагаются в памяти
# [1 4 7 2 5 8 3 6 9]
# То же касается и ф-и reshape()
print('reshape() с F ', two_dim_np.reshape((3,3), order='F'))
# [[1 2 3]
# [4 5 6]
# [7 8 9]]
# -------------------------- Индексация ------------------------
# Имеем массив
print(a)
# [1 2 3 4 5 6]
# К его элементам мы можем обращаться по индексам
print(a[0]) # 1
print(a[1]) # 2
print(a[-1]) # 6 - последний элемент
# ----------------------- Обращение к элементам многомерного масива ----------------------------
# Можно получать элементы из многомерного массива двумя сособами: запись индексов с двумя парами скобок или с одной
# Если нужно получить конкретынй элемент из массива, который например, находится в строке с индексом 1 (2-я строка),
# в первых скобках указываем индекс строка, во вторых - индекс самого элемента в этой строке (столбец)
print('Элемент из 2-мерного массива ', two_dim_arr[1][1]) # 5
# Второй способ с одной парой скобок, через запятую
print('Элемент из 2-мерного массива ', two_dim_arr[1, 1]) # 5
print(two_dim_arr[:,1]) # [1 5 9] - выводим первый столбец
print(two_dim_arr[1:,1]) # [5 9] - первый столбец и все элементы этого столбца, начиная с первого
# Как выглядит массив two_dim_arr
# [[ 0 1 2 3]
# [ 4 5 6 7]
# [ 8 9 10 11]]
# Обрщаться по одному индексу к многомерному массиву неправильно [1].
# Даже если мы указываем один индекс, рядом с ним должно стоять двоеточие [1,:]
# Если это многомерный массив, то двоеточие используется обязательно.
print('Вывели строку с индексом 1 ', two_dim_arr[1,:]) # [4 5 6 7]
# Запись [1,:] означает, что мы работаем только с 1-й строкой и в этой строке выбираем все столбцы - ':'
# Для разнообразия посмотрим на столбец с индексом 1
print('Вывели столбец с индексом 1 ', two_dim_arr[:,1]) # [1 5 9]
# Чтобы обратиться к столбцу, в начале надо обозначить строку в виде ':' (проходим все строки)
# в них выбираем значение только в первом столбце 1.
# Получили столбец с индексом 1 в виде одномерного массива.
# Очень важный момент - это не равнозначно операции - вырезка из списка.
# В Питоне операция "вырезки из списка" производится через ':', то есть "слайсинг". Но это не то же самое.
# При вырезки из списка возвращается копия фрагмента. В данном случае это не так.
# Присвоим переменной 'a' строку с индексом 2 из массива
a = two_dim_arr[2,:]
print('Вторая строка из массива ', a) # [ 8 9 10 11]
# Меняем в ней первый элемент
a[0] = 25
# Снова выводим
print(a) # [25 9 10 11]
# Затем выводим весь изначальный массив
print(two_dim_arr)
# [[ 0 1 2 3]
# [ 4 5 6 7]
# [25 9 10 11]] - изменение в первом индексе!
# То есть, в этом случае мы не копируем данные из массива, а воздействуем непосредственно на сам массив.
# Тогда, логичный вопрос: как сделать копию, чтобы оставить изначальный массив без изменения?
# В большинстве случаем копию массива делать не надо, так как это пустая трата времени.
# Но, если все же надо, то это делается вот так:
two_dim_arr_cop = np.array(two_dim_arr)
print('Копия ', two_dim_arr_cop)
# [[ 0 1 2 3]
# [ 4 5 6 7]
# [25 9 10 11]]
# Теперь при любом изменении в копии, изначальный массив будет нетронут.
two_dim_arr_cop[0,0] = 1000
print(two_dim_arr_cop)
# [[1000 1 2 3]
# [ 4 5 6 7]
# [ 25 9 10 11]]
print(two_dim_arr) # изначальный массив остался без изменений
# [[ 0 1 2 3]
# [ 4 5 6 7]
# [25 9 10 11]]
# ------------------------ Слайсинг -----------------------------
print(a[:2]) # [1 2] - от 0 до 2-го индекса (исключительно)
print(a[2:]) # [3 4 5 6] - от 2-го индекса и до конца.
# Можно делать отрицательную индексацию
print(a[:-2]) # [1 2 3 4] - от 0 до второго индекса с конца (исключительно)
print(a[1:4]) # [2 3 4] - от 1-го индекса до 4-го (исключительно)
print(a[::]) # выводит весь массив. То есть эта запись аналогична a[].
# Слайсинг можно использовать для заполнения массива элементами (broadcasting)
# Создадим массив
mas = np.arange(0, 10)
print(mas) # [0 1 2 3 4 5 6 7 8 9]
# Заменим элементы в массиве от 5 до последнего индекса числом 10
mas[5:] = 10
print(mas) # [ 0 1 2 3 4 10 10 10 10 10]
# Присваиваем новому массиву часть предыдущего массива
mas_new = mas[3:8]
print(mas_new) # [ 3 4 10 10 10] - присвоили указанный диапазон из старого массива новому
# Сделаем бродкаст для новго массива
mas_new[2:] = 7
print(mas_new) # [3 4 7 7 7] - получили замену трех последних значений
# Но, если сейчас выведим изначальный массив
print(mas)
# [ 0 1 2 3 4 7 7 7 10 10]
# Здесь тоже получаем замену значений, которые передали в новый массив! Не смотря, на то что была создана новая переменная.
# На самом деле при присвоении значения из сторой переменной в новую, мы лишь создаем ссылку на старый массив.
# Но, что делать, если хотим сделать копию массива без изменения первоначального массива?
# Это делается с помощью метода copy()
# ------------------------------------ copy() ---------------------------------
# Создаем копию из массива mas
mas_c = mas.copy()
print('Копия массива ', mas_c) # [ 0 1 2 3 4 7 7 7 10 10]
# Меняем все элементы в новом массиве на нули
mas_c[:] = 0
print(mas_c) # [0 0 0 0 0 0 0 0 0 0]
# Проверяем содержание старого массива
print(mas) # [ 0 1 2 3 4 7 7 7 10 10]
# -------------------------- Условия в аргументах, Маскирование --------------------------------------
# Сравнения в массивах
# Но, что будет, если мы зададим условие?
print(a > 5) # [False False False False False True]
# Получаем инфомацию, какой из элементов массива удовлетворяет условию, а какой нет.
# Или по другому. Выведем нечетные числа
print(a % 2 == 0) # [False True False True False True]
# Эти результаты выводов мы можем сохранять
mask_1 = a > 10 # это называется "Маска"
print(mask_1) # [ True False False True]
# И теперь мы можем использовать этот bool_a, чтобы сделать выборку (подвыборка).
# Для этого помещаем его в изначальный массив
print('Вывод маски ', a[mask_1]) # [25 11]
# Или же это вырожение можно напрямую применить в качество индекса массива
print(a[a < 5]) # получим тот же результат
# Если использовать кв.скобки, можно получать значения
# Или можем ему сказать: выдай пожалуйста все значения, которые по модулю == 0 (то есть четные)
print(a[a % 2 == 0]) # [2 4 6]
# Выдай все позиции, значения которых больше двух и меньше или равно 5
print(a[(a > 2) & (a <= 5)]) # [3 4 5]
# Выдай все значения бьльше 4
print(a[a > 4]) # [5 6]
# Это то, чем удобен numpy и чего нет в list
# Двумерный numpy
# Создаем список с помощью генератора
lst_num = [i for i in range(30)]
print(lst_num)
# [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29]
# - получаем список из 30 чисел.
# Создаем из этого списка numpy-массив
np_lst_num = np.array(lst_num)
print(np_lst_num) # [ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29]
# Делаем решейп
np_lst_num = np_lst_num.reshape(6, 5)
print(np_lst_num)
# [[ 0 1 2 3 4]
# [ 5 6 7 8 9]
# [10 11 12 13 14]
# [15 16 17 18 19]
# [20 21 22 23 24]
# [25 26 27 28 29]]
# Отсюда мы можем вывести избранные строки. Например, первые две
print(np_lst_num[:2, :]) # задаем количество по двум осям через запятую:
# по вертик - 2 строки (тут указываем индекс, по какой выводить; сам он не включается в диапаозон), по горизонтальной - все.
# [[0 1 2 3 4]
# [5 6 7 8 9]]
# Или, по первой оси первые 3 строки (выводятся строки с индексами 0, 1, 2,
# а по второй все элементы через 1 (с шагом 2) (начало : конец : шаг - не обязательный)
print(np_lst_num[:3, ::2]) #
# [[ 0 2 4]
# [ 5 7 9]
# [10 12 14]]
# Или по первой оси выдай первые 4 строки, а по второй 0-ю и 1-ю
print(np_lst_num[:4, [0, 1]]) #
# [[ 0 1]
# [ 5 6]
# [10 11]
# [15 16]]
# Или же без слайсинга, только скобками
print('Изначал', np_lst_num)
# [[ 0 1 2 3 4]
# [ 5 6 7 8 9]
# [10 11 12 13 14]
# [15 16 17 18 19]
# [20 21 22 23 24]
# [25 26 27 28 29]]
print(np_lst_num[[0, 1], [4, 2]]) # обращаемся к 1-й и 2-й строкам, из них берем 4-й и 2-й элементы: [4 7]
# Или вообще хитро сделать (сейчас забегаем немного вперед)
np_lst_sum = np_lst_num.sum(axis=1) # берем сумму по строкам (колонки - 0) - в каждой строке суммируем значения.
print(np_lst_sum) # [ 10 35 60 85 110 135] - это суммы чисел в каждой строке.
# Здесь мы вызвали ф-ю sum() просуммировали значения по горизонтальной оси
# А теперь выведем те строки, у которых сумма от 50 до 100, а по второй оси выводим все значения (:)
# print(np_lst_num[50 <= np_lst_sum <= 100, :])
# Здесь мы говорим: выведи np_lst_num, но строки возьми те, у которых np_lst_sum больше 50 и меньше 100,
# а колонки выведи все.
# Но, на это выражение он ругается
# Зададим маску, которая нам выберет значения в указанных позициях.
# mask = (50 <= np_lst_sum <= 100) # на это он также выдает ошибку. Здесь проблема в двойном выражении
# Тогда попробуем разделить
mask = (np_lst_sum >= 50) & (np_lst_sum <= 100)
print(mask) # [False False True True False False] - из шести строк условию соответствуют две средние.
# Выводим значения по маске со всеми колонками
print(np_lst_num[mask, :]) # здесь подставляем маску для получения строки, и выводим все колонки ':'
# [[10 11 12 13 14]
# [15 16 17 18 19]]
# получили те строки, в которых сумма элементов больше 50 и меньше 100
# К матрицам (2-мерным массивам) можно применять теже операции, что и к одномерным.
print('Сумма двухмерного массива ',np_lst_num.sum()) # 435
# Можем обратиться отдельно к каждому столбцу или к строке
print('Минимальный элемент в каждом столбце ',np_lst_num.min(axis=0)) # [0 1 2 3 4]
print('Минимальный элемент в каждой строке ',np_lst_num.min(axis=1)) # [ 0 5 10 15 20 25]
# ------------------------ Циклы в numpy -------------------------------
# По numpy-массивам также можно запускать циклы
# Имеем массив
print(a)
# [25 9 10 11]
for i in a:
print(i)
# 25
# 9
# 10
# 11
# Создаем нампи-массив из списка с помощью генератора (цикла)
a_arr_1 = np.array([i for i in a])
print('Массив с помощью генератора ', a_arr_1) # [25 9 10 11]
# Создаем второй нампи-массив с помощью генератора
a_arr_2 = np.array([i for i in range(15)])
print('Массив с помощью генератора ', a_arr_2) # [ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14]
# Вообще конструкция с генераторами редко испльзуется, но вы должны знать об этом способе.
# Этот же подход можно использовать для создания масок
# Создадим маску для массива a_arr_1 по наличию его элементов в массиве a_arr_2
mask = np.array([(i in a_arr_2) for i in a_arr_1])
# Здесь мы пробегаем по каждому элементу массива a_arr_1, и для каждого элемента проверяем, входит ли он вмассив a_arr_2
print(mask) # [False True True True] # элемент 25 массива a_arr_1 не входим в массив a_arr_2, остальные входят.
# Еще один способ
# Смотрим шейп (форму) массива
print(a.shape)
# (6,) - одна строка с шестью позициями. При реализации шейпа, получаем последовательность типа Тапл с 6 в индексе 0
for i in range(a.shape[0]): # a.shape[0] - это 6
print(a[i])
# 1
# 2
# 3
# 4
# 5
# 6
# Получаем то же самое
# Выведем на экран шейп двухмерного листа
print(np_lst_num.shape)
# (6, 5) - количество строк - 6, элементов в каждой строке - 5
# Запускаем цикл на основе этого шейпа
for i in range(np_lst_num.shape[0]): # идем по первой оси [0] - то есть здесь просто делаем выбор строк
print('Строка ', i, sep='') # выводим номер строки начиная с 0. Если хотим с 1, то пишем: i+1
for j in range(np_lst_num.shape[1]): # идем по второй оси [1] - строки в пределах одной колонки, те по одной позиции
print(np_lst_num[i, j]) # выводим последовательно каждую строку и колонки по каждой строке.
print()
# Строка 0
# 0
# 1
# 2
# 3
# 4
#
# Строка 1
# 5
# 6
# 7
# 8
# 9
#
# Строка 2
# 10
# 11
# 12
# 13
# 14
#
# Строка 3
# 15
# 16
# 17
# 18
# 19
#
# Строка 4
# 20
# 21
# 22
# 23
# 24
#
# Строка 5
# 25
# 26
# 27
# 28
# 29
# Можно это же вывести красиво по строчкам
# команад print() всегда переводит на новую строку, но им делать не удобно.
# Мы просто будем накапливать строку
curr_str = '' # создаем переменную с пустой буферной строкой
for i in range(np_lst_num.shape[0]): # проходимся по оси [0] - это строки, которые перебираем поочередно.
curr_str = '' # каждый раз, когда новая строка, сбрасываем текущую строку в ноль, то есть очищаем
# print('Строка ', i, sep='')
for j in range(np_lst_num.shape[1]): # проходим по всем позициям (колонкам - [1]) выбранной строки в цикле i
curr_str += str(np_lst_num[i, j]) + ' ' # в очищенную строку добавляем считанные элементы
# здесь построчно добавляем выводимые элементы приведенные к типу str и разделяем их пробелами
print(curr_str) # собранную строку выводим на экран и затем циклы повторяются
# 0 1 2 3 4
# 5 6 7 8 9
# 10 11 12 13 14
# 15 16 17 18 19
# 20 21 22 23 24
# 25 26 27 28 29
# Вот таким образом выводится двумерная numpy
# Сам numpy конечно по-умнее выводится, но это вариант самописного.
# ------------------------ ВСТРОЕННЫЕ МЕТОДЫ NumPy --------------------------
# ----------------------------- ДЛЯ ЦИФР ------------------------------------
# Имеем одномерный массив 'a'
print(a)
# [1 2 3 4 5 6]
# и двухмерный
print(np_lst_num)
# [[ 0 1 2 3 4]
# [ 5 6 7 8 9]
# [10 11 12 13 14]
# [15 16 17 18 19]
# [20 21 22 23 24]
# [25 26 27 28 29]]
# На их примерах разберем методы
# -------------------------------- sum() --------------------------------
# Суммирует значения
print(a.sum()) # 21
print(np_lst_num.sum()) # 435
# Здесь можно указать, по какой оси суммировать
print(np_lst_num.sum(axis=0)) # здесь он просуммирует по вертикальной оси - все колонки.
# [75 81 87 93 99]
# Это все в сумме даст предыдущее значене - 435
# А если еще добавим по первой оси, то он просуммирует построчно
print(np_lst_num.sum(axis=1)) # здесь он просуммирует по горизонтальной оси - все строки.
# [ 10 35 60 85 110 135]
# -------------------------------- mean() ---------------------------------
# Выдает среднее значение всех элементов
print(a.mean()) # 3.5
# -------------------------------- max() ----------------------------------
# Выдает максимальное значеине из всех значений
print(a.max()) # 6
# или эдентичная запись
np.max(a)
print('Максимум всего массива ', np_lst_num.max()) # максимальное значение из всего массива
print(np_lst_num.max(axis=0)) # [25 26 27 28 29] - максимальные значения по колонкам
print(np_lst_num.max(axis=1)) # [ 4 9 14 19 24 29] - максимальные значения по строкам
# -------------------------------- min() ---------------------------------
# Выдает максимальное значеине из всех значений
print(a.min()) # 1
# или
np.min(a)
# --------------------------------- argmax() argmin() -----------------------
# Получение индекса максимального/минимального элемента массива
print('Макс.число ', a.argmax()) # Макс.число 5
print('Мин.число ', a.argmin()) # Мин.число 0
# -------------------------------- prod() ---------------------------------
# Выдает произведение всех значений (перемножает)
print(a.prod()) # 720
# --------------------------------------- ДЛЯ СТРОК --------------------------
# Имеем массив строк
print(b)
# ['cat' 'mouse' 'dog' 'rat']
# --------------------------------- sort() ---------------------------
b.sort()
print(b) # ['cat' 'dog' 'mouse' 'rat'] - сортирует по алфовиту (по первым буквам)
# Для цифр он также работает
d = [24, 65, 1, 23, 235, 4578, 12]
d_numpy = np.array(d) # приводим лист к numpy
d_numpy.sort()
print(d_numpy)
# [ 1 12 23 24 65 235 4578]
# Обратная сортировка
d_numpy = d_numpy[::-1] # -1 - шаг в обратном порядке
print(d_numpy)
# [4578 235 65 24 23 12 1]
d_numpy = d_numpy[::-2] # -2 - в обратном порядке с шагом 2
print(d_numpy)
# [ 1 23 65 4578]
# ----------------------- ОПЕРАЦИИ С МАССИВАМИ --------------------------
# -------------------------- arange() -------------------------
# Это функция-генератор создания массивов
# В numpy создание массива может быть еще более простым
# Ф-я arange() создает массив из чисел, идущих по порядку.
print(np.arange(5))
# [0 1 2 3 4]
# Это то же самое, что и обычный range()
# с диапазоном
print(np.arange(3, 6)) # правая граница не включается в диапазон
# [3 4 5]
# с шагом
print(np.arange(3, 16, 5))
# [ 3 8 13]
# Создание массива с присвоением переменной
a = np.arange(5)
print(a)
# [0 1 2 3 4]
b = np.arange(3, 8)
print(b)
# [3 4 5 6 7]
# Создание многомерного массива одной командой
a_m = np.arange(12).reshape(3, 4)
print('Массив одной командой ', a_m)
# [[ 0 1 2 3]
# [ 4 5 6 7]
# [ 8 9 10 11]]
# ---------------------------------- ВЕКТОРНЫЕ ОПЕРАЦИИ --------------------------------
print(a * 2) # [0 2 4 6 8] - умножение каждого элемента массива на 2
print(a ** 2) # [ 3 5 7 9 11] - возведение каждого элемента массива в степень
# Операции с двумя массивами
# Важное условие - при выполнении этих операций, длина (размерность) массивов должна быть одинаковая.
# Имеем два массива 'a' и 'b'
# Суммирование массивов
print('Сложение двух массивов ', a + b)
# [ 3 5 7 9 11]
# Суммирование происходит поэлементно, если массивы совпадают размерами
# Это также полезная особенность NumPy
# List в такой ситуации не суммирует элементы, а соединяет их (конкотенирует)
# Чтобы суммировать их поэлементно в List, надо использоваь цикл for, что менее удобно и менее быстро.
# NumPy также удобен при математических сложениях, потому что он не объединяет элементы, а именно суммирует.
# Можно массив сложить с самим собой
print('Сложение массива с самим собой ', a + a)
# [0 2 4 6 8]
# То же самое для остальных арифметических операций
print(a - b) # [-3 -3 -3 -3 -3]
print(a / b) # [0. 0.25 0.4 0.5 0.57142857]
print(a * b) # [ 0 4 10 18 28]
print(a ** b) # [ 0 1 32 729 16384]
# print(b // a) # [0 4 2 2 1]
# - здесь он ругается на деление на 0: RuntimeWarning: divide by zero encountered in floor_divide
# То же можно делать и со скалярами (с числами)
print(a + 1) # Каждый элемент массива увеличиваем на 1
# [1 2 3 4 5]
# При делении чисел на ноль получаем предупреждение
print('Делим на ноль ', a / 0) # [nan inf inf inf inf]
# Первый элемент 0. Деление 0 на 0 выдает результат not a number (nan)
# В случае деления числа на 0 получаем бесконечность infinity (inf)
# При делении этого массива самого на себя получаем то же предупреждение
print(a / a) # [nan 1. 1. 1. 1.]
# Такой вывод позволяет продолжить выполнение кода без его остановки из-за этой ошибки. Будет просто предупреждение.
a ** 2 # все элементы массива возводим во второу степерь
# Квадратный корень всех элементов массива
a_sqrt = np.sqrt(a) #
print('Квадратный корень всех элементов', a_sqrt)
# [0. 1. 1.41421356 1.73205081 2. ]
# Экспонента все элеменртов массива
print('Экспонента всех элементов ', np.exp(a))
# [ 1. 2.71828183 7.3890561 20.08553692 54.59815003]
# Можно делать сложные комбинации
print((a * b - a) ** 2) # [ 0 9 64 225 576]
# К операциям можно добавлять ф-и
n = ((a * b - a) ** 2).sum()
print(n) # 874
n = ((a * b - a) ** 2).mean()
print(n) # 174.8
# Можно делать операции по индексам
v = a * 5 + b[0] * 17
# a * 5 - каждый элемент массива 'a' умножаем на скаляр(одно число) - это будте вектор
# затем ко всем ячейкам массива 'a' прибавляем произведение одного элемента по индексу 0 массива b на скаляр 17
print(v) # [51 56 61 66 71]
# C NumPy это все очень удобно выполнять. Этого нет в List
# Хотя в List также есть плюсы. Например, в List можно присоединять в конец массива.
# Чтобы сделать это в NumPy, numpy-массив, если он двумерный, он обязательно должне быть квадратный.
# Тогда как листы могут быть любыми.
# Поэтому, мы будем использовать и List и NumPy
# -------------------------- МНОГОМЕРНЫЕ МАССИВЫ --------------------------------
# Мы уже рассматривали двумерные.
# ------------------------- Индексация -----------------------------
# Имеем лист
a_list = [[3, 6, 2, 7],
[9, 2, 4, 8],
[8, 2, 3, 6]]
# Переводим его в numpy
np_a_list = np.array(a_list)
# Это можно было сделать и сразу
np_list = np.array([[1,2], [3,4], [5,6], [7,8]]) # четыре строки по два столбца в каждой
print(np_list)
# [[1 2]
# [3 4]
# [5 6]
# [7 8]]
# Вывод построчно
print(a_list) # [[3, 6, 2, 7], [9, 2, 4, 8], [8, 2, 3, 6]]
# Нулевая строка
print(np_a_list[0]) # [3 6 2 7]
# Первая строка
print(np_a_list[1]) # [9 2 4 8]
# Последняя строка
print(np_a_list[-1]) # [8 2 3 6]
# Обращение поэлементно
print(np_a_list[1, 2]) # 4
# Вывод по столбцам
# Нулевой столбец
print(np_a_list[1:, 0]) # [9 8] - нулевой столбец с первого индекса до последнего
# Все строки - первый столбец
print(np_a_list[:, 1]) # [6 2 2]
# Для матриц также доступны все математические операции.
# Они совершаются поэлеметно. Условие то же самое - оба массива должны быть одинаковой размерности.
# -------------- Ф-и быстрого создания numpy-массивов ---------------------
# ------------------------------------ empty() ------------------------
# Это первый и самый тупой способ создания массива - пустой массив
A = np.empty((3,4), dtype=np.float32) # в скобках указываем форму массива и его тип. Когда мы, при создании массива,
# не указываем его тип - это исключение из правил.
# Если сейчас его выведем на экран, то увидим, что его элементы имеют случайные значения
print('Пустой массив\n ', A)
# [[-1.4012985e-45 2.8025969e-44 0.0000000e+00 0.0000000e+00]
# [ 0.0000000e+00 1.7950633e-42 6.0185311e-36 2.9427268e-44]
# [-4.9230647e-03 -1.0303581e-03 -1.8485262e-27 1.4026998e-42]]
# Это полезно в том случае, если нам не важно, чем массив заполнен и мы потом его заполним тем, чем нам надо.
# Но, как правило, пустой массив, это потенциально опасная ситуация - случайные значения, это не очень хорошо.
# Поэтому предусмотрены способы создания уже заполненных массивов.
# -------------------- ones_like() --------------------------------------
# Чаще всего numpy-массивы не делаются из листов, как мы делали до этого.
# Обычно они копируются из уже готовых numpy-массивов
# Это делает ф-я ones_like()
b_ = np.ones_like(np_a_list) # она сделает массив из единиц таким же размером, как и массив np_a_list
# У него нет никаких параметров, он просто копирует размер переданного ему массива в качестве аргумента
print(b_)
# [[1 1 1 1]
# [1 1 1 1]
# [1 1 1 1]]
# ------------------------ ones() ------------------------------
# Массив из нулей с помощь ф-и np.ones()
c_ = np.ones((12, 8), dtype=np.complex64) # задаем размер матирцы 12х8 как Тапл в скобках с указанием его типа
# Для разнообразия возьмем комплексный тип (по умолчанию - float)
print(c_)
# [[1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j]
# [1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j]
# [1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j]
# [1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j]
# [1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j]
# [1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j]
# [1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j]
# [1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j]
# [1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j]
# [1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j]
# [1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j]
# [1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j]]
# Получаем массив, заполненный одними единицами.
# ---------------------------------- zeros() --------------------------------
# Или, можно сделать массив из нулей с помощью ф-и np.zeros(). Оба эти методы похожи.
a1 = np.zeros(10) # одномерный массив из 10 нулей
print(a1)
# [0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
a_ = np.zeros((5, 3), dtype=np.float64) # указываем размерность 5х3 как Тапл (в скобках) и его тип
print('Массив из нулей\n ', a_)
# [[0. 0. 0.]
# [0. 0. 0.]
# [0. 0. 0.]
# [0. 0. 0.]
# [0. 0. 0.]]
# ----------------------------------- full() -------------------------------
# Можно задать массив, заполненый произвольными значниями
D = np.full((5,3), 46, dtype=np.int64) # в скобках задаем форму, значение, которым надо заполнить и тип.
print('Произольные значения\n', D)
# [[46 46 46]
# [46 46 46]
# [46 46 46]
# [46 46 46]
# [46 46 46]]
# Сеточный массив. Это создание массива с шагом 0,1
X1 = np.arange(0, 100, 0.1, dtype=np.float64)
print(X1)
# [ 0. 0.1 0.2 0.3 0.4 0.5 0.6 0.7 0.8 0.9 1. 1.1 1.2 1.3
# 1.4 1.5 1.6 1.7 1.8 1.9 2. 2.1 2.2 2.3 2.4 2.5 2.6 2.7
# 2.8 2.9 3. 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9 4. 4.1
# 4.2 4.3 4.4 4.5 4.6 4.7 4.8 4.9 5. 5.1 5.2 5.3 5.4 5.5
# 5.6 5.7 5.8 5.9 6. 6.1 6.2 6.3 6.4 6.5 6.6 6.7 6.8 6.9
# 7. 7.1 7.2 7.3 7.4 7.5 7.6 7.7 7.8 7.9 8. 8.1 8.2 8.3
# ...........................................................
# 92.4 92.5 92.6 92.7 92.8 92.9 93. 93.1 93.2 93.3 93.4 93.5 93.6 93.7
# 93.8 93.9 94. 94.1 94.2 94.3 94.4 94.5 94.6 94.7 94.8 94.9 95. 95.1
# 95.2 95.3 95.4 95.5 95.6 95.7 95.8 95.9 96. 96.1 96.2 96.3 96.4 96.5
# 96.6 96.7 96.8 96.9 97. 97.1 97.2 97.3 97.4 97.5 97.6 97.7 97.8 97.9
# 98. 98.1 98.2 98.3 98.4 98.5 98.6 98.7 98.8 98.9 99. 99.1 99.2 99.3
# 99.4 99.5 99.6 99.7 99.8 99.9]
# Тип float, это числа с плавающей точкой. А эти числа являются приближенными.
# Может случиться так, что в массив может быть вставлено значение 99.99999999, но, по сути это то же что и 100.
# Поэтому данной функцией arange() надо пользоваться аккуратно, так как мы не может точно сказать,
# какое число будет последним, где она остановится.
# Поэтому, в этом плане более удобной ф-ей является ф-я linspace()
# -------------------------------- linspace() ---------------------------
# Эта ф-я возвращает равномерно распределенное множество. Это множество, в котором расстояние между элементами одинаковое
d_ = np.linspace(0, 5, 5, dtype=np.float64) # старт, стоп и количество равномерно распределенных элементов
print(d_) # [0. 1.25 2.5 3.75 5. ]
# Получаем пять элементов от 0 до 5, которые равномерно распределены
# Разница между этими элементами составляет одно и то же число - 1.25
d_1 = np.linspace(0, 10, 5) # меняя настройки, мы получим другую разницу между числами
print(d_1) # [ 0. 2.5 5. 7.5 10. ] - разницам - 2,5
# Это всегда одномерный массив (видно по одной паре квадратных скобок).
# Эта ф-я гарантированно включит в наш диапазон цифры, указанные в старте и в стоп.
# Трехмерный массив (Тензор)
d_ = np.linspace(15, 37, 24, dtype=np.float64).reshape(2, 3, 4)
print(d_)
# [[[15. 15.95652174 16.91304348 17.86956522]
# [18.82608696 19.7826087 20.73913043 21.69565217]
# [22.65217391 23.60869565 24.56521739 25.52173913]]
#
# [[26.47826087 27.43478261 28.39130435 29.34782609]
# [30.30434783 31.26086957 32.2173913 33.17391304]
# [34.13043478 35.08695652 36.04347826 37. ]]]
# -------------------------------- logspace() -------------------------
# Это менее распространенный способ создания массива.
# Это ф-я похожа на linspace(), только раскидывает точки равномерно по лагорифмической шкале
X3 = np.logspace(0, 100, 101, dtype=np.float64)
# здесь базовые числа 0 и 100 используются как показатель степени
print('logspace\n', X3)
# [1.e+000 1.e+001 1.e+002 1.e+003 1.e+004 1.e+005 1.e+006 1.e+007 1.e+008
# 1.e+009 1.e+010 1.e+011 1.e+012 1.e+013 1.e+014 1.e+015 1.e+016 1.e+017
# 1.e+018 1.e+019 1.e+020 1.e+021 1.e+022 1.e+023 1.e+024 1.e+025 1.e+026
# 1.e+027 1.e+028 1.e+029 1.e+030 1.e+031 1.e+032 1.e+033 1.e+034 1.e+035
# 1.e+036 1.e+037 1.e+038 1.e+039 1.e+040 1.e+041 1.e+042 1.e+043 1.e+044
# 1.e+045 1.e+046 1.e+047 1.e+048 1.e+049 1.e+050 1.e+051 1.e+052 1.e+053
# 1.e+054 1.e+055 1.e+056 1.e+057 1.e+058 1.e+059 1.e+060 1.e+061 1.e+062
# 1.e+063 1.e+064 1.e+065 1.e+066 1.e+067 1.e+068 1.e+069 1.e+070 1.e+071
# 1.e+072 1.e+073 1.e+074 1.e+075 1.e+076 1.e+077 1.e+078 1.e+079 1.e+080
# 1.e+081 1.e+082 1.e+083 1.e+084 1.e+085 1.e+086 1.e+087 1.e+088 1.e+089
# 1.e+090 1.e+091 1.e+092 1.e+093 1.e+094 1.e+095 1.e+096 1.e+097 1.e+098
# 1.e+099 1.e+100]
# Показатель степени изменяется через 1.
# ------------------------------ geomspace() -------------------------------
# Раскидывает точки в геометрической прогрессии
X4 = np.geomspace(1, 100, 101, dtype=np.float64) # в качестве начального параметра не принимает 0.
print('geomspace\n', X4)
# [ 1. 1.04712855 1.0964782 1.14815362 1.20226443
# 1.25892541 1.31825674 1.38038426 1.44543977 1.51356125
# 1.58489319 1.65958691 1.73780083 1.81970086 1.90546072
# 1.99526231 2.08929613 2.18776162 2.29086765 2.39883292
# 2.51188643 2.63026799 2.7542287 2.8840315 3.01995172
# 3.16227766 3.31131121 3.4673685 3.63078055 3.80189396
# 3.98107171 4.16869383 4.36515832 4.5708819 4.78630092
# 5.01187234 5.2480746 5.49540874 5.75439937 6.02559586
# 6.30957344 6.60693448 6.91830971 7.2443596 7.58577575
# 7.94328235 8.31763771 8.7096359 9.12010839 9.54992586
# 10. 10.47128548 10.96478196 11.48153621 12.02264435
# 12.58925412 13.18256739 13.80384265 14.45439771 15.13561248
# 15.84893192 16.59586907 17.37800829 18.19700859 19.05460718
# 19.95262315 20.89296131 21.87761624 22.90867653 23.98832919
# 25.11886432 26.30267992 27.54228703 28.84031503 30.1995172
# 31.6227766 33.11311215 34.67368505 36.30780548 38.01893963
# 39.81071706 41.68693835 43.65158322 45.70881896 47.86300923
# 50.11872336 52.48074602 54.95408739 57.54399373 60.25595861
# 63.09573445 66.0693448 69.18309709 72.44359601 75.8577575
# 79.43282347 83.17637711 87.096359 91.20108394 95.4992586
# 100. ]
# Итак, есть три основных ф-и для формирования массивов:
# 1 - arange() - распределяет точки через равный интервал
# 2 - linspace() - делит диапазон на указанное число частей по линейной шкале
# 3 - logspace() - делает то же, что и linspace, но при делении диапазона использует показатель степени
# по лагорифмической шкале.
# ------------------------------- identity() ------------------------
# Единичная матрица. Она всегда двухмерная и всегда квадратная
E = np.identity(10, dtype=np.float64) # задаем один параметр, так как матрица квадратная
print('identity\n', E)
# [[1. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
# [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.]
# [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.]
# [0. 0. 0. 1. 0. 0. 0. 0. 0. 0.]
# [0. 0. 0. 0. 1. 0. 0. 0. 0. 0.]
# [0. 0. 0. 0. 0. 1. 0. 0. 0. 0.]
# [0. 0. 0. 0. 0. 0. 1. 0. 0. 0.]
# [0. 0. 0. 0. 0. 0. 0. 1. 0. 0.]
# [0. 0. 0. 0. 0. 0. 0. 0. 1. 0.]
# [0. 0. 0. 0. 0. 0. 0. 0. 0. 1.]]
# --------------------------------------- eye() -------------------------------
# Эта ф-я создает Identity matrix / Единичную матрицу
# Это матрица из нулей с единицами по диагонали, которая начинается с первого элемента матрицы и заканчивается последним.
m_ = np.eye(3) # матрица 3х3
print(m_)
# [[1. 0. 0.]
# [0. 1. 0.]
# [0. 0. 1.]]
# -------------------------------------
# Умножение всех элементов массива на число
# В матрицах мы также можем умножать на число
b_ = b_ * 2
print(b_)
# [[2 2 2 2]
# [2 2 2 2]
# [2 2 2 2]]
# Можно умножать матрицу на матирцу, если они совпадают размерами.
# Здесь умножение происходит поэлементно
print(a_list * b_)
# [[ 6 12 4 14]
# [18 4 8 16]
# [16 4 6 12]]
# Что делают матрицы?
# Матрица это один из основых математических аппаратов. С помощью них решают уровнения, делают апроксимации.
# Любые математические методы построены на матрицах.
# Пока для нас матирца, это двухмерный массив
# https://www.numpy.org - документация по numpy
# -------------------------- Семпелирование из распределений (Создание случайного массива) -------------------------
# Описание всех возможностей модуля np.random:
# https://docs.scipy.org/doc/numpy-1.15.0/reference/routines.random.html
# Позволяет делать семплирование из различных распределений
# --------------------------------------- rand() ----------------------------
# Ф-я rand() возвращает распределенные числа
r_a = np.random.rand(3) # в скобках передеаем количество элементов
print(r_a) # [0.26295925 0.86094219 0.10804199]
# Получаем три случайных числа значением 0 до 1
# Это одномерный массив. Если хотим получсить двумерный - в скобках помещаем два числа
r_a_2 = np.random.rand(3, 4)
print(r_a_2)
# [[0.35670423 0.33045392 0.69668886 0.87599185]
# [0.45371986 0.52534176 0.20873434 0.0511607 ]
# [0.60906173 0.17519525 0.85137775 0.17951122]]
# Получаем двумерный массив (две пары кв.скобок) размерностью 3х4
# ---------------------------- randn() ---------------------------------
# Ф-я randn() возвращает нормально распределенные числа
r_n = np.random.randn(5)
print(r_n)
# [0.4619619 1.37952577 0.0024386 0.58737799 0.65258035]
# Двухмерный массив
r_n_2 = np.random.randn(3, 5)
print(r_n_2)
# [[ 0.11240541 0.88797712 -0.76090493 0.39046211 0.32887074]
# [ 1.64754416 -0.53392785 2.16685259 0.36912093 1.37752072]
# [-1.71455156 0.02808839 -1.50790139 -1.42062286 -1.62162641]]
# Трехмерный массив
r_n_3 = np.random.randn(2, 3, 5)
print('Трехмернй массив\n', r_n_3)
# ------------------------------- normal() -------------------------------
# Случайный массив нормального распределения
# Метод normal()
# Создаем массив размера 5 на 5 со случайными числами из стандартного нормального распределения
# Числа будут распределяться в соответствии с нормальным распределением
# Нормальное распределение означает, что среднее значение всех элементов будет в районе 0 (первое значение в аргументах)
# Среднеквадратичное отклонение - в районе второго значения
rr = np.random.normal(0, 1, (5, 5))
print('Массив случайных чисел с нормальным распределением')
print(rr)
# [[ 0.44609444 1.46648639 -0.71085575 -1.37248413 -1.50204124]
# [-0.91750705 0.65186486 -0.77443963 -0.41575527 -0.42991253]
# [ 0.57163415 0.307304 0.797994 -0.63930071 -0.91871729]
# [ 0.13777992 1.18317277 0.63241621 -1.70244244 -0.33194237]
# [-0.13911916 0.33009841 0.26635273 -0.20181408 0.69920153]]
# Почему случайная?
# При каждом запуске расчета, будут выдаваться новые случайные велечины
# [[ 1.24701787 -1.39613534 -0.26663356 0.689353 1.04496652]
# [ 1.38255089 -0.04465846 0.74089134 0.47437058 0.27041353]
# [-0.64641649 -0.4218203 0.75355706 0.57893304 -0.26714739]
# [ 1.11584443 0.75603918 0.3494514 0.45091684 0.1791541 ]
# [ 0.17316534 -1.37216487 0.26336408 0.83848343 -0.94691011]]
# Смотрим среднее значение
print(rr.mean()) # -0.10263729011138162
#
print(rr.std()) # 0.8203049258132238
# ------------------------------- randint() -------------------------------
# Возвращает случайные целые числа
# Одномерная матрица
r_i = np.random.randint(0, 5, 10)
# Создаем массив из 10 элементов, значения которого будут в диапазоне от 0 до 5
# параметры: старт диапазона (включен в диапазон), конец диапазона (исключительно), количество элементов
print(r_i) # [0 0 3 4 0 1 0 2 2 1]
# При каждом повтороном запуске, выводимый ряд будет меняться.
# Двухмерная матрица
r_i_1 = np.random.randint(0, 10, (5, 5)) # условия: числа в диапазоне от 0 до 10, размер матирцы 5х5
# Случайных распределений не бывает, но random - это псевдогенератор случайных чисел, который генерит что-то похожее
# на случайное распределение.
print(r_i_1)
# [[4 2 8 8 1]
# [3 8 1 7 0]
# [4 1 6 2 5]
# [1 8 0 6 7]
# [1 7 5 1 6]]
# Получаем матрицу размером 5х5 случайных чисел от 0 до 10.
# Это как создавать случайные велечины. Они используются постоянно в нейронках и data cience
# Трехмерный массив (Тензор)
my_3d_array = np.random.randint(15, 37, 24).reshape(2, 3, 4)
print(my_3d_array.shape)
print('Трехмерный массив,\n',my_3d_array)
# [[[24 22 29 16]
# [22 21 32 29]
# [23 18 15 20]]
# [[35 17 28 29]
# [35 15 26 33]
# [33 17 28 26]]]
# Создание трехмерного массива
my_3d_array_2 = np.array([[[1,2,3],[4,5,6]],[[7,8,9],[10,11,12]]]) # Это Тензор - определяется по трем парам кв.скобок
print(my_3d_array_2.shape) # (1, 4, 3) - один элемент по три строчки и три столбца
print(my_3d_array_2)
# [[[ 1 2 3]
# [ 4 5 6]]
# [[ 7 8 9]
# [10 11 12]]]
# Меняем размерность Тензора
my_3d_array_2.reshape(2,2,3)
print('Размерность 2-2-3\n', my_3d_array_2.shape) # (2, 2, 3) - два элемента по две строчки и три столбца
print(my_3d_array_2)
# [[[ 1 2 3]
# [ 4 5 6]]
# [[ 7 8 9]
# [10 11 12]]]
# --------------------------- uniform() --------------------------------
# Создает массив из вещественных чисел (числа с плавающей точкой типа float)
rr_v = np.random.uniform(0, 10, 5)
print(rr_v)
# [2.47530261 4.54122979 6.77168887 9.72958147 6.40804463]
# ---------------------------------- Тензоры ----------------------------------
# Создадим четырехмерный массив из случайных значений размерности 2 на 3 на 4 и на 5
my_tensor = np.random.random((2, 3, 4, 5)) # из модуля random используем ф-ю random, которая создает массивы из случайных чисел.
# Создаем 2 блока по 3 элемента, в каждом из которых 4 строки и 5 столбцов
# Получаем Тензор из случайных чисел
print('Тензор')
print(my_tensor)
# [[[[0.0228543 0.55214868 0.28702753 0.7214254 0.06949648]
# [0.31906822 0.9058709 0.48809729 0.06816758 0.23458314]
# [0.40860497 0.64053844 0.20512333 0.36238068 0.12410515]
# [0.33983708 0.64704089 0.6163341 0.78368802 0.43499334]]
# [[0.46821275 0.81510566 0.24827603 0.16203632 0.78381811]
# [0.53502263 0.37368486 0.61733482 0.9794601 0.42355869]
# [0.21006761 0.96619641 0.36370357 0.47724621 0.30139114]
# [0.7190714 0.3189307 0.24450994 0.73103265 0.15298877]]
# [[0.84871557 0.22551803 0.70160413 0.58642556 0.36792143]
# [0.81993648 0.10407713 0.08112876 0.7922863 0.7666471 ]
# [0.17596218 0.48372704 0.34017527 0.92996698 0.65175539]
# [0.65137716 0.36383069 0.40195585 0.37628607 0.35812605]]]
# [[[0.9237488 0.44324777 0.66224455 0.70508636 0.94383757]
# [0.51201422 0.96100317 0.54144972 0.14302484 0.61772361]
# [0.30654129 0.98086756 0.16150838 0.53675196 0.59581246]
# [0.37647334 0.11722666 0.57120902 0.75427493 0.43846317]]
# [[0.47306444 0.67935 0.1798013 0.82778145 0.33059486]
# [0.61461207 0.8777961 0.38853882 0.36111666 0.45958279]
# [0.64165245 0.87634013 0.40377337 0.22649195 0.64921675]
# [0.73662732 0.91430142 0.77813516 0.75087619 0.44269413]]
# [[0.95071329 0.2349081 0.05702258 0.21231127 0.52474573]
# [0.71167152 0.0771482 0.1518983 0.73532062 0.14485726]
# [0.7643846 0.58094057 0.04403048 0.26821343 0.38422551]
# [0.52925118 0.73857103 0.13040456 0.25313323 0.5225339 ]]]]
print('Размерность Тензора')
print(my_tensor.shape) # (2, 3, 4, 5)
# ------------------------------- Обращение к элеметом Тезора -------------------------
# К элементу с индексами 0 по всем осям
print(my_tensor[0, 0, 0, 0]) # 0.0228543
# Обращение только к первым строкам
print(my_tensor[:, :, :, 0])
# [[[0.0228543 0.55214868 0.28702753 0.7214254 0.06949648]
# [0.46821275 0.81510566 0.24827603 0.16203632 0.78381811]
# [0.84871557 0.22551803 0.70160413 0.58642556 0.36792143]]
# [[0.9237488 0.44324777 0.66224455 0.70508636 0.94383757]
# [0.47306444 0.67935 0.1798013 0.82778145 0.33059486]
# [0.95071329 0.2349081 0.05702258 0.21231127 0.52474573]]]
# Сумма элементов по второй оси
print(my_tensor.sum(axis=3))
# Это сумма элементов другого случайного можества
# [[[2.91164516 1.84178005 2.69273399 3.40057715]
# [3.18896528 1.46415582 2.40975709 2.97385982]
# [2.02457623 2.35238754 2.42994069 2.23911234]]
# [[3.49961688 2.05497782 1.64820154 2.2829169 ]
# [2.50865381 2.78498322 2.07394522 2.54780752]
# [2.13326077 2.32027226 3.56564612 2.60500416]]]
# ------------------------ Структура np.matrix ------------------------------------
# Информация на тему matrix:
# https://numpy.org/devdocs/reference/generated/numpy.matrix.html
# Это дополнительное средство для матричных вычислений
# Для примера создадим массив
arr1 = np.array([[1,2,3],[4,5,6]])
print(type(arr1)) # <class 'numpy.ndarray'>
# Создадим из него объект matrix
my_matrix = np.matrix(arr1)
print(my_matrix)
print(type(my_matrix)) # <class 'numpy.matrix'>
# [[1 2 3]
# [4 5 6]]
# Он выводится аналогично двухмерному массиву
# Транспорированная матрица. Метод меняет столбцы и строки местами.
print(np.matrix(my_matrix).T)
# [[1 4]
# [2 5]
# [3 6]]
# Первая строка стала первым столбцом, вторая строка - вторым столбцом
# Матричное умножение
# В случае с np.matrix выполняется матричное умножение, а не поэлементное !!!
print(np.matrix(my_matrix) * np.matrix(my_matrix).T)
# [[14 32]
# [32 77]]
# ------------------------------- Матричные операции -------------------------------
# Умножение двух матриц
# Метод .dot
# Операции транспонирования доступны и для массивов
# Имеем два массива
arr_1 = np.array([1, 2, 3, 4, 5, 6]).reshape(3,2)
print(arr_1)
# [[1 2]
# [3 4]
# [5 6]]
arr_2 = np.array([4, 3, 2, 6, 4, 3]).reshape(3,2)
print(arr_2)
# [[4 3]
# [2 6]
# [4 3]]
# Это поэлеметное умноженпе
print(arr_1 * arr_2) # [ 4 6 6 24 20 18]
# В нампай-массивах есть метд dot, который осуществляет матричное перемножение
arrT = arr_1.T
arr_3 = arrT.dot(arr_2)
print('Матричное перемножение')
print(arr_3)
# [[30 36]
# [40 48]]
# ------------------------------------- Работа с файлами --------------------------------
# -------------------------- Задачи ----------------
# Создайте массив, который будет содержать списки с именем студента(str), его возрастом(int) и средней оценкой(float).
dt = [('name', '<U10'), ('age', 'int32'), ('mark', 'float32')]
std_list = [('Alex', 20, 4.3), ('Kate', 19, 4.8), ('Maks', 21, 4.1), ('Marry', 22, 4.6), ('Denis', 18, 3.8), ('Ann', 21, 4.2)]
std_np = np.array(std_list, dtype=dt) # В случае однотиповых данных, в dtype подставляем тип элементов, напр int32
print((np.sort(std_np, order='name')).reshape(6,1)) # сорт по имени (алфавит).
# С помощью reshape выводим в столбик для визуального удобства
print()
print((np.sort(std_np, order='age')).reshape(6,1)) # сорт по возрасту
print()
print((np.sort(std_np, order='mark'))[::-1].reshape(6,1)) # сорт по успеваемости (вначале лучшие)
# # ------------------------------------------------------
# Заменяем возраст на одно число
std_np['age'] = 10
print((np.sort(std_np, order='name'))[::-1]) # сорт по алфавиту в обратном порядке по колонке 'name'
my_3d_array_2 = np.random.uniform(17, 23, (2, 3, 4))
| 37.842759 | 129 | 0.64563 |
2842d30820f635256f73ae79bc6c16f824dc89f6 | 704 | py | Python | setup.py | jlevy44/Submit-HPC | 83dfd60587fab2c75e02f1c14b688b4bc51aff8c | [
"MIT"
] | 1 | 2020-06-11T00:51:24.000Z | 2020-06-11T00:51:24.000Z | setup.py | jlevy44/Submit-HPC | 83dfd60587fab2c75e02f1c14b688b4bc51aff8c | [
"MIT"
] | null | null | null | setup.py | jlevy44/Submit-HPC | 83dfd60587fab2c75e02f1c14b688b4bc51aff8c | [
"MIT"
] | 1 | 2020-06-19T01:05:07.000Z | 2020-06-19T01:05:07.000Z | from setuptools import setup
with open('README.md','r', encoding='utf-8') as f:
long_description = f.read()
setup(name='submit_hpc',
version='0.1.2',
description='Collection of growing job submission scripts, not to replace workflow specifications.',
url='https://github.com/jlevy44/Submit-HPC',
author='Joshua Levy',
author_email='joshualevy44@berkeley.edu',
license='MIT',
scripts=[],
entry_points={
'console_scripts':['submit-job=submit_hpc.job_runner:job']
},
packages=['submit_hpc'],
long_description=long_description,
long_description_content_type='text/markdown',
install_requires=['click','pandas'])
| 37.052632 | 106 | 0.666193 |
28461474953cc9c257de317f17581d4ef1a01795 | 18,209 | py | Python | DQN/network.py | Xin-Ye-1/HIEM | 6764f579eef6ec92dd85a005af27419f630df7da | [
"Apache-2.0"
] | 2 | 2021-04-12T02:41:00.000Z | 2021-05-15T02:18:15.000Z | DQN/network.py | Xin-Ye-1/HIEM | 6764f579eef6ec92dd85a005af27419f630df7da | [
"Apache-2.0"
] | null | null | null | DQN/network.py | Xin-Ye-1/HIEM | 6764f579eef6ec92dd85a005af27419f630df7da | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/env python
import tensorflow as tf
import tensorflow.contrib.slim as slim
seed = 0
def fc2d(inputs,
num_outputs,
activation_fn,
scope):
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE) as s:
n0, n1, n2 = inputs.get_shape().as_list()
weights = tf.get_variable(name='weights',
shape=[n2, num_outputs],
initializer=tf.contrib.layers.xavier_initializer(seed=seed),
trainable=True)
wx = tf.einsum('ijk,kl->ijl', inputs, weights)
biases = tf.get_variable(name='biases',
shape=[num_outputs],
initializer=tf.zeros_initializer(),
trainable=True)
wx_b = wx + biases
result = wx_b if activation_fn is None else activation_fn(wx_b, name=s.name)
return result
def conv3d(scope_name,
input,
filter_size):
with tf.variable_scope(scope_name, reuse=tf.AUTO_REUSE) as scope:
conv_filter = tf.get_variable(name='weights',
shape=filter_size,
initializer=tf.contrib.layers.xavier_initializer(seed=seed),
trainable=True)
conv = tf.nn.conv3d(input=input,
filter=conv_filter,
strides=[1, 1, 1, 1, 1],
padding='VALID')
biases = tf.get_variable(name='biases',
shape=[filter_size[-1]],
initializer=tf.zeros_initializer(),
trainable=True)
bias = tf.nn.bias_add(conv, biases)
result = tf.nn.relu(bias, name=scope.name)
return result
class Highlevel_Network():
def __init__(self,
window_size,
num_labels,
# action_size,
history_steps,
scope
):
with tf.variable_scope('highlevel'):
with tf.variable_scope(scope):
self.visions = tf.placeholder(shape=[None, history_steps * window_size * window_size, num_labels],
dtype=tf.float32)
self.depths = tf.placeholder(shape=[None, history_steps * window_size * window_size, 1], dtype=tf.float32)
self.targets = tf.placeholder(shape=[None, num_labels], dtype=tf.float32)
related_visions = fc2d(inputs=self.visions,
num_outputs=1,
activation_fn=None,
scope='vision_preprocess')
related_visions = slim.flatten(related_visions)
depths = slim.flatten(self.depths)
hidden_visions = slim.fully_connected(inputs=related_visions,
num_outputs=256,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='vision_hidden')
hidden_depths = slim.fully_connected(inputs=depths,
num_outputs=256,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='depth_hidden')
hidden_targets = slim.fully_connected(inputs=self.targets,
num_outputs=256,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='target_hidden')
vision_depth_feature = tf.concat([hidden_visions, hidden_depths, hidden_targets], -1)
embed_feature = slim.fully_connected(inputs=vision_depth_feature,
num_outputs=256,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='embed')
qvalue = slim.fully_connected(inputs=embed_feature,
num_outputs=num_labels,
activation_fn=None,
weights_initializer=tf.contrib.layers.xavier_initializer(seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='qvalue')
self.qvalue = qvalue
terminations = slim.fully_connected(inputs=embed_feature,
num_outputs=num_labels,
activation_fn=None,
weights_initializer=tf.contrib.layers.xavier_initializer(seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='termination')
self.terminations = tf.sigmoid(terminations)
# highlevel training
if not scope.startswith('global'):
self.chosen_objects = tf.placeholder(shape=[None], dtype=tf.int32)
self.target_qvalue = tf.placeholder(shape=[None], dtype=tf.float32)
self.highlevel_lr = tf.placeholder(dtype=tf.float32)
object_onehot = tf.one_hot(self.chosen_objects, num_labels, dtype=tf.float32)
qvalue_for_chosen_object = tf.reduce_sum(self.qvalue*object_onehot, axis=1)
td_error = tf.square(self.target_qvalue - qvalue_for_chosen_object)
self.qvalue_loss = 0.5*tf.reduce_mean(td_error)
highlevel_trainer = tf.train.RMSPropOptimizer(learning_rate=self.highlevel_lr)
highlevel_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'highlevel/%s' % scope)
gradients = tf.gradients(self.qvalue_loss, highlevel_params)
norm_gradients, _ = tf.clip_by_global_norm(gradients, 40.0)
global_highlevel_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'highlevel/global/main')
self.highlevel_update = highlevel_trainer.apply_gradients(zip(norm_gradients, global_highlevel_params))
class Lowlevel_Network():
def __init__(self,
window_size,
num_labels,
action_size,
history_steps,
scope='global'
):
with tf.variable_scope('lowlevel'):
with tf.variable_scope(scope):
self.visions = tf.placeholder(shape=[None, history_steps * window_size * window_size, num_labels],
dtype=tf.float32)
self.depths = tf.placeholder(shape=[None, history_steps * window_size * window_size, 1], dtype=tf.float32)
self.subtargets = tf.placeholder(shape=[None, num_labels], dtype=tf.float32)
subtargets_expanded = tf.tile(tf.expand_dims(self.subtargets, 1),
[1, history_steps * window_size * window_size, 1])
masked_visions = tf.reduce_sum(self.visions * subtargets_expanded, axis=-1)
masked_visions = slim.flatten(masked_visions)
depths = slim.flatten(self.depths)
hidden_visions = slim.fully_connected(inputs=masked_visions,
num_outputs=256,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='vision_hidden')
hidden_depths = slim.fully_connected(inputs=depths,
num_outputs=256,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='depth_hidden')
vision_depth_feature = tf.concat([hidden_visions, hidden_depths], 1)
embed_feature = slim.fully_connected(inputs=vision_depth_feature,
num_outputs=256,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='embed')
# value estimation
hidden_value = slim.fully_connected(inputs=embed_feature,
num_outputs=20,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='value_hidden')
self.qvalues = slim.fully_connected(inputs=hidden_value,
num_outputs=action_size,
activation_fn=None,
weights_initializer=tf.contrib.layers.xavier_initializer(seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='qvalue')
# Lowlevel training
if not scope.startswith('global'):
self.chosen_actions = tf.placeholder(shape=[None], dtype=tf.int32)
self.target_qvalues = tf.placeholder(shape=[None], dtype=tf.float32)
self.lowlevel_lr = tf.placeholder(dtype=tf.float32)
actions_onehot = tf.one_hot(self.chosen_actions, action_size, dtype=tf.float32)
qvalues_for_chosen_actions = tf.reduce_sum(self.qvalues * actions_onehot, axis=-1)
self.qvalue_loss = 0.5 * tf.reduce_mean(tf.square(self.target_qvalues - qvalues_for_chosen_actions))
local_lowlevel_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'lowlevel/%s'%scope)
gradients = tf.gradients(self.qvalue_loss, local_lowlevel_params)
norm_gradients, _ = tf.clip_by_global_norm(gradients, 40.0)
lowlevel_trainer = tf.train.RMSPropOptimizer(learning_rate=self.lowlevel_lr)
global_lowlevel_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'lowlevel/global/in/main')
self.lowlevel_update = lowlevel_trainer.apply_gradients(zip(norm_gradients, global_lowlevel_params))
class Lowlevel_Network_ex():
def __init__(self,
window_size,
num_labels,
action_size,
history_steps,
scope
):
with tf.variable_scope('lowlevel'):
with tf.variable_scope(scope):
self.visions = tf.placeholder(shape=[None, history_steps * window_size * window_size, num_labels],
dtype=tf.float32)
self.depths = tf.placeholder(shape=[None, history_steps * window_size * window_size, 1],
dtype=tf.float32)
self.targets = tf.placeholder(shape=[None, num_labels], dtype=tf.float32)
related_visions = fc2d(inputs=self.visions,
num_outputs=1,
activation_fn=None,
scope='vision_preprocess')
related_visions = slim.flatten(related_visions)
depths = slim.flatten(self.depths)
hidden_visions = slim.fully_connected(inputs=related_visions,
num_outputs=256,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(
seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='vision_hidden')
hidden_depths = slim.fully_connected(inputs=depths,
num_outputs=256,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(
seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='depth_hidden')
hidden_targets = slim.fully_connected(inputs=self.targets,
num_outputs=256,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(
seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='target_hidden')
vision_depth_feature = tf.concat([hidden_visions, hidden_depths, hidden_targets], -1)
embed_feature = slim.fully_connected(inputs=vision_depth_feature,
num_outputs=256,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(
seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='embed')
action_qvalues = slim.fully_connected(inputs=embed_feature,
num_outputs=action_size,
activation_fn=None,
weights_initializer=tf.contrib.layers.xavier_initializer(
seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='action_qvalue')
self.action_qvalues = action_qvalues
# highlevel training
if not scope.startswith('global'):
self.chosen_actions = tf.placeholder(shape=[None], dtype=tf.int32)
self.target_action_qvalues = tf.placeholder(shape=[None], dtype=tf.float32)
self.highlevel_lr = tf.placeholder(dtype=tf.float32)
action_onehot = tf.one_hot(self.chosen_actions, action_size, dtype=tf.float32)
qvalue_for_chosen_action = tf.reduce_sum(self.action_qvalues * action_onehot, axis=1)
td_error = tf.square(self.target_action_qvalues - qvalue_for_chosen_action)
self.action_qvalue_loss = 0.5 * tf.reduce_mean(td_error)
highlevel_trainer = tf.train.RMSPropOptimizer(learning_rate=self.highlevel_lr)
highlevel_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'lowlevel/%s' % scope)
gradients = tf.gradients(self.action_qvalue_loss, highlevel_params)
norm_gradients, _ = tf.clip_by_global_norm(gradients, 40.0)
global_highlevel_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
'lowlevel/global/ex/main')
self.highlevel_update = highlevel_trainer.apply_gradients(
zip(norm_gradients, global_highlevel_params))
| 55.012085 | 123 | 0.473228 |
2847498b54c2f788df1761ffd02163a689964021 | 5,924 | py | Python | 128/utility.py | Jeffrey-Ede/Adaptive-Partial-STEM | dc13e64ba3fb8266d39a260780af615b170a3c88 | [
"MIT"
] | 3 | 2020-04-29T21:45:21.000Z | 2021-08-13T16:01:14.000Z | 128/utility.py | Jeffrey-Ede/intelligent-partial-STEM | dc13e64ba3fb8266d39a260780af615b170a3c88 | [
"MIT"
] | null | null | null | 128/utility.py | Jeffrey-Ede/intelligent-partial-STEM | dc13e64ba3fb8266d39a260780af615b170a3c88 | [
"MIT"
] | null | null | null | import tensorflow as tf
import itertools
import numpy as np
FLAGS = tf.flags.FLAGS
def stepped_spiral_actions(theta_incr=np.pi/180):
coverage = FLAGS.num_steps*FLAGS.step_size/FLAGS.img_side**2
start_theta = np.pi/4
start_r = np.sqrt(2)*FLAGS.step_size
start_position = np.ones([2])/2
alpha = 3.4
theta0 = -start_r/alpha
actions = []
positions = [start_position]
for _ in range(0, FLAGS.num_steps):
for i in itertools.count(start=1):
theta = start_theta + i*theta_incr
r = alpha*(theta - theta0)
if np.sqrt( (r*np.cos(theta) - start_r*np.cos(start_theta))**2 +
(r*np.sin(theta) - start_r*np.sin(start_theta))**2 ) >= np.sqrt(2)*FLAGS.step_size:
vect = np.array([r*np.cos(theta) - start_r*np.cos(start_theta),
r*np.sin(theta) - start_r*np.sin(start_theta)])
vect /= np.sum(np.sqrt(vect**2))
vect *= np.sqrt(2)
start_position += FLAGS.step_size*vect/FLAGS.img_side
actions.append( vect )
positions.append( start_position )
start_theta = theta
start_r = r
break
actions.append( np.ones([2]) ) #Discarded
actions = np.stack(actions)
actions = np.stack([actions]*FLAGS.batch_size).astype(np.float32)
positions = np.stack(positions)
positions = np.stack([positions]*FLAGS.batch_size).astype(np.float32)
return actions, positions
def make_observations(actions, starts, full_scans):
x = np.minimum(np.maximum(np.stack([starts + i*actions for i in range(FLAGS.step_size)]), 0), FLAGS.img_side-1)
indices = []
for j in range(FLAGS.batch_size):
for i in range(FLAGS.step_size):
indices.append( [j, int(x[i][j][0]), int(x[i][j][1]), 0] )
indices = tuple([np.array(indices)[:,i] for i in range(4)])
observations = full_scans[indices].reshape([-1, FLAGS.step_size])
return observations
def spiral_generator(scans):
actions0, positions = stepped_spiral_actions()
actor_actions = tf.convert_to_tensor(actions0[:,:-1], dtype=tf.float32)
positions *= FLAGS.img_side
def py_spiral_generator(imgs):
actions = np.concatenate([np.ones([FLAGS.batch_size, 1, 2]), actions0[:,1:]], axis=1)
observations = [make_observations(actions[:,i,:], positions[:,i,:], imgs) for i in range(FLAGS.num_steps)]
observations = np.stack(observations, axis=1)
return observations
observations = tf.py_func(py_spiral_generator, [scans], tf.float32)
observations = tf.reshape(observations, [FLAGS.batch_size, FLAGS.num_steps, FLAGS.step_size])
return observations, actor_actions
def auto_name(name):
"""Append number to variable name to make it unique.
Inputs:
name: Start of variable name.
Returns:
Full variable name with number afterwards to make it unique.
"""
scope = tf.contrib.framework.get_name_scope()
vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope)
names = [v.name for v in vars]
#Increment variable number until unused name is found
for i in itertools.count():
short_name = name + "_" + str(i)
sep = "/" if scope != "" else ""
full_name = scope + sep + short_name
if not full_name in [n[:len(full_name)] for n in names]:
return short_name
def alrc(
loss,
num_stddev=3,
decay=0.999,
mu1_start=2,
mu2_start=3**2,
in_place_updates=False
):
"""Adaptive learning rate clipping (ALRC) of outlier losses.
Inputs:
loss: Loss function to limit outlier losses of.
num_stddev: Number of standard deviation above loss mean to limit it
to.
decay: Decay rate for exponential moving averages used to track the first
two raw moments of the loss.
mu1_start: Initial estimate for the first raw moment of the loss.
mu2_start: Initial estimate for the second raw moment of the loss.
in_place_updates: If False, add control dependencies for moment tracking
to tf.GraphKeys.UPDATE_OPS. This allows the control dependencies to be
executed in parallel with other dependencies later.
Return:
Loss function with control dependencies for ALRC.
"""
#Varables to track first two raw moments of the loss
mu = tf.get_variable(
auto_name("mu1"),
initializer=tf.constant(mu1_start, dtype=tf.float32))
mu2 = tf.get_variable(
auto_name("mu2"),
initializer=tf.constant(mu2_start, dtype=tf.float32))
#Use capped loss for moment updates to limit the effect of outlier losses on the threshold
sigma = tf.sqrt(mu2 - mu**2+1.e-8)
loss = tf.where(loss < mu+num_stddev*sigma,
loss,
loss/tf.stop_gradient(loss/(mu+num_stddev*sigma)))
#Update moment moving averages
mean_loss = tf.reduce_mean(loss)
mean_loss2 = tf.reduce_mean(loss**2)
update_ops = [mu.assign(decay*mu+(1-decay)*mean_loss),
mu2.assign(decay*mu2+(1-decay)*mean_loss2)]
if in_place_updates:
with tf.control_dependencies(update_ops):
loss = tf.identity(loss)
else:
#Control dependencies that can be executed in parallel with other update
#ops. Often, these dependencies are added to train ops e.g. alongside
#batch normalization update ops.
for update_op in update_ops:
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_op)
return loss
if __name__ == "__main__":
pass | 33.280899 | 116 | 0.613774 |
2847f21bc2086528a4db0c276260fe7ae1c988d5 | 1,336 | py | Python | tests/test_lexer.py | codyd51/camelback | 2dd1269bcbc7ce35fcab1df7dfddce229c51e610 | [
"MIT"
] | 2 | 2018-11-22T16:45:24.000Z | 2018-11-26T16:13:31.000Z | tests/test_lexer.py | codyd51/camelback | 2dd1269bcbc7ce35fcab1df7dfddce229c51e610 | [
"MIT"
] | null | null | null | tests/test_lexer.py | codyd51/camelback | 2dd1269bcbc7ce35fcab1df7dfddce229c51e610 | [
"MIT"
] | null | null | null | import os
import unittest
from camelback.lexer import Lexer
class TestLexer(unittest.TestCase):
SOURCE_CODE_FILE = os.path.join(os.path.dirname(__file__), 'bin', 'snakecase.c')
def test_tokenize_source_code(self):
with open(TestLexer.SOURCE_CODE_FILE) as f:
stream = f.read()
lexer = Lexer(stream)
tokens = []
while True:
try:
tok = lexer.get()
tokens.append(tok)
except EOFError:
break
correct = ['#include', ' ', '<stdio.h>', ' ', ' ', '//', ' ', 'printf', '\n',
'\n',
'void', ' ', 'foo_bar', '(', 'int', ' ', 'user_argument', ')', ' ', '{', '\n',
' ', ' ', ' ', ' ', 'printf', '(', '"%d\\n",', ' ', 'user_argument', ')', ';', '\n',
'}', '\n',
'\n',
'int', ' ', 'main', '(', 'int', ' ', 'argc,', ' ', 'char**', ' ', 'argv', ')', ' ', '{', '\n',
' ', ' ', ' ', ' ', 'int', ' ', 'my_variable', ' ', '=', ' ', '42;', '\n',
' ', ' ', ' ', ' ', 'foo_bar', '(', 'my_variable', ')', ';', '\n',
' ', ' ', ' ', ' ', 'return', ' ', '0;', '\n',
'}',
'\n']
self.assertEqual(correct, tokens)
| 37.111111 | 113 | 0.355539 |
28484e4880b21cb0cfb818580cf5e99d4d59fc00 | 568 | py | Python | tests/conftest.py | ljnsn/pycwatch | 9fb8910b010e7e89357a9c6b99197697ee5a8cf6 | [
"MIT"
] | 1 | 2022-02-25T17:23:17.000Z | 2022-02-25T17:23:17.000Z | tests/conftest.py | ljnsn/pycwatch | 9fb8910b010e7e89357a9c6b99197697ee5a8cf6 | [
"MIT"
] | 1 | 2022-02-28T18:37:08.000Z | 2022-02-28T18:37:08.000Z | tests/conftest.py | ljnsn/pycwatch | 9fb8910b010e7e89357a9c6b99197697ee5a8cf6 | [
"MIT"
] | null | null | null | """Fixtures and configuration for the test suite."""
from pathlib import Path
import pytest
import vcr
from pycwatch import CryptoWatchClient
BASE_DIR = Path(__file__).parent.absolute()
api_vcr = my_vcr = vcr.VCR(
serializer="yaml",
cassette_library_dir=BASE_DIR.joinpath("vcr_cassettes").as_posix(),
record_mode="new_episodes",
match_on=["uri", "method", "query"],
decode_compressed_response=True,
)
@pytest.fixture()
def live_client():
return CryptoWatchClient()
@pytest.fixture
def api_key():
return "abcdefghijklmnopqrstuvwxyz"
| 20.285714 | 71 | 0.739437 |
2848866b78d851fe0130aa00ed413f094e4d8df4 | 6,861 | py | Python | cxphasing/CXResolutionEstimate.py | jbgastineau/cxphasing | a9847a0afb9a981d81f027e75c06c9bb2b531d33 | [
"MIT"
] | 3 | 2018-05-11T16:05:55.000Z | 2021-12-20T08:52:02.000Z | cxphasing/CXResolutionEstimate.py | jbgastineau/cxphasing | a9847a0afb9a981d81f027e75c06c9bb2b531d33 | [
"MIT"
] | null | null | null | cxphasing/CXResolutionEstimate.py | jbgastineau/cxphasing | a9847a0afb9a981d81f027e75c06c9bb2b531d33 | [
"MIT"
] | 2 | 2018-11-14T08:57:10.000Z | 2021-12-20T08:52:06.000Z | """
.. module:: CXResolutionEstimate.py
:platform: Unix
:synopsis: A class for predicting the resolution of a ptychography measurement.
.. moduleauthor:: David Vine <djvine@gmail.com>
"""
import requests
import pdb
import scipy as sp
import numpy as np
import scipy.fftpack as spf
from pylab import *
class HenkeRefractiveIndex(object):
""".. class:: HenkeRefractiveIndex()
A class for interacting with henke.lbl.gov to get the complex
refractive index of a material at a given energy.
:attr float energy: X-ray energy [keV].
:attr str formula: Chemical formula.
:attr float density: Material density [gm/cm^3].
:attr str url: POST form url.
:attr dict form_data: Stores POST form data.
"""
def __init__(self, energy='1', formula='Si3N4', density='-1'):
self.url = 'http://henke.lbl.gov/cgi-bin/getdb.pl'
self.form_data = {'Density': density,
'Formula': formula,
'Max': energy,
'Min': energy,
'Npts': '10',
'Output': 'Text File',
'submit': 'submit'
}
self.get_response()
def get_response(self):
response = requests.post(self.url, data=self.form_data)
url = response.content('HREF="')[1].split('"')[0]
result = requests.get('http://henke.lbl.gov'+url).split('\n')[2].split(' ')
self.delta = result[4]
self.beta = result[6]
class Detector(object):
""".. class:: Detector([pilatus100k, pilatus1M, medipix])
A class for describing X-ray area detectors.
:attr int xpix: Number of pixels in x direction.
:attr int ypix: Number of pixels in y direction.
:attr float pix_size: Pixel size [m].
:attr int dr: Dynamic range.
:attr tuple nchips: Chip arrangement.
:attr int dead_space: Dead space between chips [units of detector pixels].
"""
def __init__(self, det):
if det=='pilatus100k':
d = {
'xpix':187,
'ypix':485,
'pix_size':172e-6,
'dr': 2**20,
'nchips': (1,1),
'dead_space': 0
}
elif det=='pilatus1M':
d = {
'xpix':187,
'ypix':485,
'pix_size':172e-6,
'dr': 2**20,
'nchips': (2,3),
'dead_space': 4
}
elif det=='medipix':
d = {
'xpix':256,
'ypix':256,
'pix_size':55e-6,
'dr': 11800,
'nchips': (1,1),
'dead_space': 0
}
for k, v in d.iteritems():
setattr(self, k, v)
class TransmissionFunction(object):
""".. class:: TransmissionFunction()
Class for calculating the transmission function.
:attr np.ndarray mag: absolute value of complex refractive index.
:attr np.ndarray pha: phase of complex refractive index.
:attr np.ndarray thickness: the thickness function [m].
:default thickness: lena.
:attr float max_thickness: Thickness function scaled to this max thickness [micron].
:default max_thickness: 1.0.
:attr str material: the material the sample is composed of: [gold, protein].
:attr float density: material density.
:attr np.ndarray T: complex transmission function.
"""
def __init__(self, thickness=sp.misc.lena(), max_thickness=1.0, energy=1.0, **kwargs):
thickness -= thickness.min()
thickness *= max_thickness*1e-6/thickness.max()
l = 1.24e-9/energy
ref_ind = HenkeRefractiveIndex(energy, **kwargs)
self.T = exp((2.0*math.pi/l)*complex(ref_ind.beta, ref_ind.delta)*thickness)
for k, v in kwargs.iteritems():
setattr(self, k, v)
def __mul__(self, a, b):
return TransmissionFunction(T=a.T*b.T)
@staticmethod
def fft(a):
return TransmissionFunction(T=spf.fftshift(spf.fft2(a.T)))
def show(self):
pylab.matshow(sp.abs(self.T)**2.0)
class Params(object):
""".. class:: Params()
A class for storing parameters for the simulation.
:attr float energy: X-ray energy [keV].
:attr float z: Sample-detector distance [m].
:attr Detector det: Type of area detector. Choices ['pilatus100k', 'pilatus1M', 'medipix'].
:attr float zp_diameter: Zone plate diameter [micron].
:attr float zp_finest_zone: Zone plate finest zone [micron].
:attr float beamstop_radius: Beamtop radius [micron].
:attr float beamstop thickness: Beamstop thickness [micron].
:attr str beamstop_material: Beamstop material.
:attr TransmissionFunction beamstop: Beamstop transmission function.
:attr TransmissionFunction sample: Sample transmission function.
"""
def __init__(self):
self.energy = 10.0
self.l = 1.24e-9/self.energy
self.z = 1.0
self.det = Detector('pilatus100k')
self.zp_diameter = 160.0
self.zp_finest_zone = 80.0
self.zp_focal_length = self.zp_diameter*1e-6*self.zp_finest_zone*1e-9/self.l
self.beamstop_size = 100.0
self.beamstop_thickness = 100.0
self.beamstop_material = 'Si'
self.dx_s = self.l*self.z/(min(self.det.xpix, self.det.ypix)*self.det.pix_size)
self.dx_zp = self.zp_focal_length*self.det.pix_size/self.z
class IncidentIllumination(object):
""".. class:: IncidentIllumination(zp_radius, dx_zp)
Calculate the incident illumination in the sample plane.
:attr float zp_radius: Zone plate radius [micron].
:attr float dx_zp: Array physical pixel size [m].
:attr numpy.ndarray T: Array containing complex wavefield describing incident illumination.
"""
def __init__(self, zp_radius, dx_zp):
zp_radius_in_pixels = sp.ceil(zp_radius*1e-6/dx_zp)
self.T = spf.fftshift(spf.fft2(sp.where(sp.hypot(*sp.ogrid[-1024:1024, -1024:1024])<zp_radius_in_pixels, 1., 0.)))
def recommend_beamstop(q_dependence=-3.5, energy=10.0, det=Detector('pilatus100k'), z_or_dx={'dx': 10e-9}):
""".. func:: recommend_beamstop(q_dependence, detector, z_or_dx)
:param float q_dependence: Intensity vs q scaling in far field.
:param float energy: Incident X-ray energy.
:param Detector det: Detector to be used for calculation.
:param dict z_or_dx: Choose the optimal beamstop when (i) the detector is placed at z or, (ii) the desired resolution is dx.
"""
det_npix = min(det.xpix, det.ypix)
det_width = det.pix_size*det_npix/2
l = 1.24e-9/energy
if 'z' in z_or_dx.keys():
z = z_or_dx['z']
dx = l*z/det_width
else:
dx = z_or_dx['dx']
z = dx*det_width/(2*l)
det_domain_x = sp.arange(det_npix)*det.pix_size
det_domain_q = 4*math.pi*det_domain_x/(l*z)
intensity = lambda q: (1+q**2.0)**-2.0
full_dynamic_range = log10(intensity(det_domain_q[-1]/det_domain_q[0]))
detector_dynamic_range = log10(det.dr)
required_dynamic_range = full_dynamic_range-detector_dynamic_range
# Is a beamstop required?
if required_dynamic_range>0:
# Yes
pass
def main():
pdb.set_trace()
p = Params()
i0 = IncidentIllumination(p.zp_diameter/2.0, p.dx_zp)
sample = TransmissionFunction(energy=p.energy)
beamstop = TransmissionFunction(thickness=sp.ones((3,3)), max_thickness=p.beamstop_thickness,
material=p.beamstop_material, energy=p.energy)
exit_wave = i0*sample
det_wave = TransmissionFunction.fft(exit_wave) * beamstop
det_wave.show()
# Do photon scaling
if __name__=='__main__': main()
| 27.011811 | 126 | 0.69538 |
284932efb61177d76bc830e6f9381821ff06ec7e | 997 | py | Python | classes/migrations/0007_auto_20201206_1223.py | henrylameck/school_management_system | 38c270977d001d28f2338eb90fffc3e8c2598d06 | [
"MIT"
] | null | null | null | classes/migrations/0007_auto_20201206_1223.py | henrylameck/school_management_system | 38c270977d001d28f2338eb90fffc3e8c2598d06 | [
"MIT"
] | 3 | 2021-06-05T00:01:48.000Z | 2021-09-22T19:39:12.000Z | classes/migrations/0007_auto_20201206_1223.py | henrylameck/school_management_system | 38c270977d001d28f2338eb90fffc3e8c2598d06 | [
"MIT"
] | null | null | null | # Generated by Django 3.1 on 2020-12-06 09:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('classes', '0006_auto_20201205_2224'),
]
operations = [
migrations.RemoveField(
model_name='classsyllabus',
name='components',
),
migrations.AddField(
model_name='classsyllabus',
name='assignment',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='classsyllabus',
name='practical',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='classsyllabus',
name='project',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='classsyllabus',
name='theory',
field=models.BooleanField(default=False),
),
]
| 26.236842 | 53 | 0.563691 |
2849b9c3dc25b3aa339fd03d7bee0279359be673 | 1,129 | py | Python | Analysis/views_in_dow.py | harrisonxia/Lil-Data | 204467aa740bef10d865925508d7cf007cac19b3 | [
"MIT"
] | 5 | 2018-11-14T03:31:13.000Z | 2022-01-12T04:20:16.000Z | Analysis/views_in_dow.py | harrisonxia/Lil-Data | 204467aa740bef10d865925508d7cf007cac19b3 | [
"MIT"
] | null | null | null | Analysis/views_in_dow.py | harrisonxia/Lil-Data | 204467aa740bef10d865925508d7cf007cac19b3 | [
"MIT"
] | null | null | null | import sys
from pyspark.sql import SparkSession, functions, types
from pyspark.sql.functions import date_format
import json
def views_in_dow():
data_stream = spark.read.json('stream_cleanned')
data_channel = spark.read.json('channel_cleanned')
data_stream.createOrReplaceTempView('data_s')
data_channel.createOrReplaceTempView('data_c')
#using date_format() extract day of the week for each date
data_stream_weekdays = data_stream.select('*', date_format('created_at', 'u').alias('dow_number'), date_format('created_at', 'E').alias('dow_string'))
data_stream_weekdays.createOrReplaceTempView('data_stream_weekdays')
#select sum of viewers grouping by day of week
views_per_weekdays = spark.sql(
"""SELECT dow_string, sum(viewers) as viewers
FROM data_stream_weekdays
GROUP BY dow_string
"""
)
views_per_weekdays.coalesce(1).write.json('views_in_dow', mode='overwrite')
if __name__ == '__main__':
spark = SparkSession.builder.appName('views_in_dow').getOrCreate()
spark.sparkContext.setLogLevel('WARN')
views_in_dow()
| 35.28125 | 154 | 0.723649 |
284d7bf5c6289c980a9a21998f63efdfc660f3b8 | 570 | py | Python | videoprocessor/app.py | ashish1595/uPresent | 663acc6ad7c958c8d45699918c60e48535aff3b3 | [
"MIT"
] | 1 | 2020-09-02T23:51:15.000Z | 2020-09-02T23:51:15.000Z | videoprocessor/app.py | ashish1595/uPresent | 663acc6ad7c958c8d45699918c60e48535aff3b3 | [
"MIT"
] | 1,143 | 2020-01-26T07:18:37.000Z | 2022-03-31T21:02:44.000Z | videoprocessor/app.py | ashish1595/uPresent | 663acc6ad7c958c8d45699918c60e48535aff3b3 | [
"MIT"
] | 4 | 2020-01-27T07:47:29.000Z | 2020-07-22T10:54:15.000Z | from flask import Flask
from flask_restful import Api
from elasticapm.contrib.flask import ElasticAPM
from flask_restful_swagger import swagger
from resources import custom_logger
from resources.routes import initialize_routes
import logging
app = Flask(__name__)
app.config.from_object("config.Config")
# Initializing custom logger
log = logging.getLogger("root")
log.setLevel(logging.INFO)
log.addHandler(custom_logger.LogHandler())
apm = ElasticAPM(app)
api = Api(app)
api = swagger.docs(Api(app), apiVersion="0.1")
initialize_routes(api)
app.run(host="0.0.0.0")
| 24.782609 | 47 | 0.801754 |
284e0e3fc7904eb4e425103fc8997d9f8ee44f17 | 1,593 | py | Python | cogs/rng.py | Ana-gram/Amanager | 5ceef312125b1c73dea59d37f8f06e22293c8960 | [
"Apache-2.0"
] | 12 | 2021-04-23T18:10:24.000Z | 2021-05-03T13:08:54.000Z | cogs/rng.py | Ana-gram/Amanager | 5ceef312125b1c73dea59d37f8f06e22293c8960 | [
"Apache-2.0"
] | 3 | 2021-04-04T17:47:02.000Z | 2021-11-20T10:59:46.000Z | cogs/rng.py | Margana314/Amanager | 87e241d942ca07f3ed8dfc5e1aebfde6f58bbdac | [
"Apache-2.0"
] | 3 | 2021-04-30T11:07:28.000Z | 2021-05-01T11:35:27.000Z | import discord, random
from discord.ext import commands
from discord_slash import cog_ext
from discord_slash.utils.manage_commands import create_option
class Slash(commands.Cog):
def __init__(self, bot):
self.bot = bot
@cog_ext.cog_slash(name="rng", description="Générer un nombre aléatoire !", options=[
create_option(
name="nombre1",
description="Nombre 1",
option_type=4,
required=True
),
create_option(
name="nombre2",
description="Nombre 2",
option_type=4,
required=True
)])
async def _rng(self, ctx, nombre1: int, nombre2: int):
n_min, n_max = -4294967296, 4294967296
if nombre1 > nombre2:
await ctx.send(f"{ctx.author.mention} Le nombre 1 est plus grand que le nombre 2... relance la commande en faisant en sorte que le nombre 1 soit **plus petit** que le nombre 2.")
elif nombre1 < n_min or nombre2 > n_max:
await ctx.send(f"{ctx.author.mention} L'intervalle entrée est hors champ, la limite est de [{n_min} ; {n_max}] !")
else:
random_nb = random.randint(nombre1, nombre2)
embed = discord.Embed(title=f"Générateur de nombres aléatoires")
embed.add_field(name=f"Nombre généré : {random_nb}", value=f"Intervalle de valeurs : {nombre1} et {nombre2}")
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(Slash(bot))
def teardown(bot):
bot.remove_cog("rng") | 40.846154 | 190 | 0.603264 |
284ed97a6e7a6cca5b8e4fac818f272a6b8ee59d | 1,538 | py | Python | app.py | dodoche/essaie | 53d22cfec969a8f992f4b5cb473bb41a215975b6 | [
"Apache-2.0"
] | null | null | null | app.py | dodoche/essaie | 53d22cfec969a8f992f4b5cb473bb41a215975b6 | [
"Apache-2.0"
] | null | null | null | app.py | dodoche/essaie | 53d22cfec969a8f992f4b5cb473bb41a215975b6 | [
"Apache-2.0"
] | null | null | null | from flask import Flask
from kubernetes.client.rest import ApiException
from pprint import pprint
from kubernetes import client, config
app = Flask(__name__)
@app.route("/ing")
def ing():
# Configure API key authorization: BearerToken
configuration = kubernetes.client.Configuration()
configuration.api_key['authorization'] = '5h8FzmDxBpPtKpqZ9XfiKLEuMgh-9pxrHPeEyx0rfZs'
# Uncomment below to setup prefix (e.g. Bearer) for API key, if needed
configuration.api_key_prefix['authorization'] = 'Bearer'
api_instance = client.CoreV1Api()
name = 'elastic-licensing' # str | name of the ConfigMap
namespace = 'default' # str | object name and auth scope, such as for teams and projects
pretty = 'pretty_example' # str | If 'true', then the output is pretty printed. (optional)
exact = True # bool | Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18. (optional)
export = True # bool | Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18. (optional)
try:
api_response = api_instance.read_namespaced_config_map(name, namespace, pretty=pretty, exact=exact, export=export)
#pprint (api_response.data)
except ApiException as e:
pprint("Exception when calling CoreV1Api->read_namespaced_config_map: %s\n" % e)
return (api_response.data)
if __name__ == "__main__":
app.run(host='0.0.0.0',port=8000)
| 48.0625 | 171 | 0.727568 |
284f1b158ca4db2c6a71a6bb4e065847635e83d7 | 447 | py | Python | Chapter2_Python/Logic.py | LKilian1/UdemyML_Template | 4d9cd40c35ff29d796e2b7d327e0032ee7dc2f5a | [
"MIT"
] | null | null | null | Chapter2_Python/Logic.py | LKilian1/UdemyML_Template | 4d9cd40c35ff29d796e2b7d327e0032ee7dc2f5a | [
"MIT"
] | null | null | null | Chapter2_Python/Logic.py | LKilian1/UdemyML_Template | 4d9cd40c35ff29d796e2b7d327e0032ee7dc2f5a | [
"MIT"
] | null | null | null | i_am_broke = False
if i_am_broke:
print("I am broke.")
else:
print("I am not broke.")
my_bank_account = 1000
if my_bank_account <= 0:
print("I am broke.")
else:
print("I am not broke.")
# equal ==
# less <
# greater >
# not equal !=
# less or equal <=
# greater or equal >=
my_age = 21
if my_age < 18:
print("You are a child.")
elif my_age < 66:
print("You are a an adult.")
else:
print("You are a pensioner.")
| 13.96875 | 33 | 0.604027 |
284f52f9892227b12c3c28dae697a2873a90c1e7 | 34 | py | Python | my_script.py | jeroenpijpker/easy_CD_tutorial | 2827508a7060c74ff937d7820c5b0f5cdfa4d6a4 | [
"MIT"
] | null | null | null | my_script.py | jeroenpijpker/easy_CD_tutorial | 2827508a7060c74ff937d7820c5b0f5cdfa4d6a4 | [
"MIT"
] | null | null | null | my_script.py | jeroenpijpker/easy_CD_tutorial | 2827508a7060c74ff937d7820c5b0f5cdfa4d6a4 | [
"MIT"
] | null | null | null | print("x")
print("x")
print("x")
| 6.8 | 10 | 0.529412 |
2851770a789aa6df103f42e0f34d59c8093d59ff | 498 | py | Python | neighbour/migrations/0014_auto_20211228_2342.py | mary-wan/Neighbourhood | 4150ea60d8ab471fce7173c50c040f36320f3f40 | [
"Unlicense"
] | 2 | 2022-01-17T03:52:59.000Z | 2022-02-18T15:09:34.000Z | neighbour/migrations/0014_auto_20211228_2342.py | mary-wan/Neighbourhood | 4150ea60d8ab471fce7173c50c040f36320f3f40 | [
"Unlicense"
] | null | null | null | neighbour/migrations/0014_auto_20211228_2342.py | mary-wan/Neighbourhood | 4150ea60d8ab471fce7173c50c040f36320f3f40 | [
"Unlicense"
] | null | null | null | # Generated by Django 2.2.24 on 2021-12-28 20:42
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('neighbour', '0013_profile_neighbourhood'),
]
operations = [
migrations.AlterModelOptions(
name='neighbourhood',
options={'ordering': ['-pk']},
),
migrations.RenameField(
model_name='post',
old_name='hood',
new_name='neighbourhood',
),
]
| 21.652174 | 52 | 0.566265 |
2853e0d7d747d6c3288b88732191d861e6eecd97 | 427 | py | Python | scipy/ndimage/tests/__init__.py | Ennosigaeon/scipy | 2d872f7cf2098031b9be863ec25e366a550b229c | [
"BSD-3-Clause"
] | 9,095 | 2015-01-02T18:24:23.000Z | 2022-03-31T20:35:31.000Z | scipy/ndimage/tests/__init__.py | Ennosigaeon/scipy | 2d872f7cf2098031b9be863ec25e366a550b229c | [
"BSD-3-Clause"
] | 11,500 | 2015-01-01T01:15:30.000Z | 2022-03-31T23:07:35.000Z | scipy/ndimage/tests/__init__.py | Ennosigaeon/scipy | 2d872f7cf2098031b9be863ec25e366a550b229c | [
"BSD-3-Clause"
] | 5,838 | 2015-01-05T11:56:42.000Z | 2022-03-31T23:21:19.000Z |
from __future__ import annotations
from typing import List, Type
import numpy
# list of numarray data types
integer_types: List[Type] = [
numpy.int8, numpy.uint8, numpy.int16, numpy.uint16,
numpy.int32, numpy.uint32, numpy.int64, numpy.uint64]
float_types: List[Type] = [numpy.float32, numpy.float64]
complex_types: List[Type] = [numpy.complex64, numpy.complex128]
types: List[Type] = integer_types + float_types
| 26.6875 | 63 | 0.754098 |
285632d72a4a6ee14ee3a1b9a5965b712d109e62 | 10,286 | py | Python | experiments/utils.py | linshaoxin-maker/taas | 34e11fab167a7beb78fbe6991ff8721dc9208793 | [
"MIT"
] | 4 | 2021-02-28T11:58:18.000Z | 2022-02-03T03:26:45.000Z | experiments/utils.py | linshaoxin-maker/taas | 34e11fab167a7beb78fbe6991ff8721dc9208793 | [
"MIT"
] | null | null | null | experiments/utils.py | linshaoxin-maker/taas | 34e11fab167a7beb78fbe6991ff8721dc9208793 | [
"MIT"
] | null | null | null | import torch
import os
import subprocess as sp
from mlutils.pt.training import TrainerBatch
from mlutils.callbacks import Callback
from os import path
import numpy as np
import json
class WeightedSum:
def __init__(self, name, init, postprocessing=None):
self.name = name
self.v = init
self.w = 0
self.postprocessing = postprocessing
def add(self, v, w):
self.v = self.v + v * w
self.w = self.w + w
def get(self):
# No accumulated value
if self.w == 0:
return 0
v = self.v / self.w
if self.postprocessing is not None:
v = self.postprocessing(v)
return v
def __repr__(self):
return '{} {:.5f}'.format(self.name, self.get())
class PerplexStatistics:
def __init__(self):
def _item(x):
return x.item()
def _exp_item(x):
return torch.exp(x).item()
self.stat = {
'ppx': (WeightedSum('ppx', 0, _exp_item), '', ''),
'ppx_doc': (WeightedSum('ppx_doc', 0, _exp_item), '', ''),
'loss': (WeightedSum('loss', 0, _item), 'loss', 'doc_count'),
'loss_rec': (WeightedSum('loss_rec', 0, _item), 'rec_loss', 'doc_count'),
'kld': (WeightedSum('kld', 0, _item), 'kld', 'doc_count'),
'penalty': (WeightedSum('penalty', 0, _item), 'penalty', 'doc_count'),
'penalty_mean': (WeightedSum('penalty_mean', 0, _item), 'penalty_mean', 'doc_count'),
'penalty_var': (WeightedSum('penalty_var', 0, _item), 'penalty_var', 'doc_count'),
}
def add(self, stat):
"""Accumulate statistics."""
with torch.no_grad():
data_batch = stat.pop('data')
weight = {
'word_count': data_batch.sum(),
'doc_count': len(data_batch)
}
for s, k, w in self.stat.values():
if s.name == 'ppx_doc':
s.add((stat['minus_elbo'] / data_batch.sum(dim=-1)).sum() / weight['doc_count'],
weight['doc_count'])
elif s.name == 'ppx':
s.add(stat['minus_elbo'].sum() / weight['word_count'], weight['word_count'])
else:
if k not in stat: # skip for compatibility of multiple models.
continue
s.add(stat[k].mean(), weight[w])
return self
def description(self, prefix=''):
return ' | '.join(['{} {:.5f}'.format(prefix + k, v)
for k, v in self.get_dict().items()])
def get_value(self, k):
"""Get the accumulated value."""
return self.stat[k][0].get()
def get_dict(self):
r = {}
for k in self.stat.keys():
t = self.stat[k][0].get()
if t != 0:
r[k] = t
return r
class BatchOperation(TrainerBatch):
def __init__(self, model, optimizer, loss, device, test_sample=1):
self.model = model.cuda()
self.optimizer = optimizer
self.dst_device = device
assert loss in ('mean', 'sum'), 'loss should be mean or sum of batch losses.'
self.loss = loss
self.test_sample = test_sample
def train_batch(self, data, train=True):
self.model.train(train)
data = self.place_data_to_device(data)
if train:
# enter GSM class - forward function
# out: 'stat' in GSM forward function; torch.Tensor
out, topic = self.model(data)
# self.optimizer.zero_grad()
# if self.loss == 'mean':
# out['loss'].mean().backward()
# else:
# out['loss'].sum().backward()
# self.optimizer.step()
else:
with torch.no_grad():
out, topic = self.model(data)
out.update(data=data)
return out, topic
def torch_detach(x):
return x.detach().cpu().numpy()
def save_topics(save_path, vocab, topic_prob, topk=100, logger=None):
"""topic_prob: n_topic x vocab_size.
Assumed that topic_prob[i] is probability distribution for all i.
"""
if logger:
logger.info('saving topics to {}'.format(save_path))
values, indices = torch.topk(topic_prob, k=topk, dim=-1)
indices = torch_detach(indices)
values = torch_detach(values)
topics = []
for t in indices:
topics.append(' '.join([vocab.itos[i] for i in t]))
with open(save_path+'.topics', 'w') as f:
f.write('\n'.join(topics))
str_values = []
for t in values:
str_values.append(' '.join([str(v) for v in t]))
with open(save_path+'.values', 'w') as f:
f.write('\n'.join(str_values))
torch.save(topic_prob, save_path + '.pt')
def evaluate_topic_coherence(topic_path, ref_corpus_dir, res_path, logger):
"""Evaluating topic coherence at topic_path whose lines are topics top 10 words.
The evaluation uses the script at scripts/topics-20news.sh
"""
if not os.path.exists(topic_path):
logger.warning('topic file {} not exists'.format(topic_path))
return -1
v = -1
try:
p = sp.run(['bash', 'scripts/topic_coherence.sh', topic_path, ref_corpus_dir, res_path],
encoding='utf-8', timeout=20,
stdout=sp.PIPE, stderr=sp.DEVNULL)
v = float(p.stdout.split('\n')[-3].split()[-1])
except (ValueError, IndexError, TimeoutError):
logger.warning('error when calculating topic coherence at {}'.format(topic_path))
return v
def normalize(v2d, eps=1e-12):
return v2d / (np.linalg.norm(v2d, axis=1, keepdims=True) + eps)
def wetc(e):
"""embedding matrix: N x D where N is the first N words in a topic, D is the embedding dimension."""
e = normalize(e)
t = normalize(e.mean(axis=0, keepdims=True))
return float(e.dot(t.T).mean())
class EvaluationCallback(Callback):
def __init__(self, base_dir, vocab, topk=10, corpus_dir="", embedding_path="", metric='npmi', every=10):
"""Evaluate topic coherence based on NPMI or WETC.
For NPMI: args are, vocab, topk, corpus_dir
For WETC: args are, embedding_path
"""
super(EvaluationCallback, self).__init__()
self.base_dir = base_dir
self.vocab = vocab
self.topk = topk
self.corpus_dir = corpus_dir
self.every = every
self.cnt = 0
self.max_tc = 0
self.last_tc = 0
if metric == 'wetc':
assert os.path.exists(embedding_path), 'embedding file does not exists.'
self.embedding = np.load(embedding_path)
assert len(self.embedding) == len(vocab)
metric = metric.lower()
assert metric in ['npmi', 'wetc']
self.metric = metric
def wetc(self, topics):
topics = torch_detach(topics)
idx = np.argsort(topics, axis=1)
tc = [wetc(self.embedding[idx[-self.topk:]]) for idx in idx]
save_path = path.join(self.base_dir, 'wetc-topic-{}'.format(self.cnt))
tc_mean = np.mean(tc)
tc.append('mean {}'.format(tc_mean))
with open(save_path, 'w') as f:
json.dump(tc, f)
return tc_mean
def npmi(self, topics):
save_path = path.join(self.base_dir, 'topic-{}'.format(self.cnt))
save_topics(save_path, self.vocab, topics, self.topk,
self.trainer.logger)
return evaluate_topic_coherence(save_path+'.topics', self.corpus_dir, save_path+'.res', self.trainer.logger)
def evaluate_topic_coherence(self):
topics = self.trainer.trainer_batch.model.get_topics()
assert topics.size(1) == len(self.vocab), 'topics shape error, should be vocab size {}'.format(len(self.vocab))
if self.metric == 'npmi':
tc = self.npmi(topics)
else:
tc = self.wetc(topics)
self.max_tc = max(self.max_tc, tc)
self.last_tc = tc
self.trainer.summary_writer.add_scalar('topic_coherence', tc, self.cnt)
self.trainer.logger.info('topic coherence {}'.format(tc))
def on_epoch_end(self, epoch, logs=None):
self.cnt += 1
if epoch % self.every == 0:
self.evaluate_topic_coherence()
def on_train_end(self, logs=None):
self.evaluate_topic_coherence()
def get_dict(self):
return {
'max_topic_coherence': self.max_tc,
'last_topic_coherence': self.last_tc
}
def recover_topic_embedding(topic_word_paths, embedding_path, dataset_dir):
"""Evaluate the WETC of topics generated by NPMI metric."""
from data_utils import read_dataset
assert isinstance(topic_word_paths, list), 'Multiple paths should be specified.'
_, _, vocab = read_dataset(dataset_dir)
embedding = np.load(embedding_path)
scores = []
for p in topic_word_paths:
with open(p) as f:
r = []
for line in f:
idx = [int(vocab.stoi[w]) for w in line.split()]
r.append(wetc(embedding[idx]))
scores.append(r)
return np.array(scores)
def recover_model(exp_path):
import json
from mlutils.exp import yaml_load
config_path = path.join(exp_path, 'trainer.json')
if path.exists(config_path):
print('find trainer.json')
with open(config_path) as f:
trainer = json.load(f)
return BatchOperation.from_config(trainer['trainer_batch'])
config_path = path.join(exp_path, 'trainer.yaml')
if path.exists(config_path):
print('find trainer.yaml')
trainer = yaml_load(config_path)
return BatchOperation.from_config(trainer['trainer_batch'])
return None
def visualize_scale(count_path, scale_path, dataset_dir):
from data_utils import read_dataset
from mlutils.exp import yaml_load
from matplotlib import pyplot as plt
_, _, vocab = read_dataset(dataset_dir)
scale = np.load(scale_path)
if len(scale.shape) == 2:
scale = scale[0]
count = yaml_load(count_path)
kv = sorted(count.items(), key=lambda x: -x[1])
s = scale[[vocab.stoi[w[0]] for w in kv]]
# scale = np.array([scale[vocab.stoi[w[0]]] for w in kv])
plt.plot(s)
| 33.504886 | 119 | 0.586914 |
28567dfbf8e22fcad08ad19553a2399e95399e25 | 6,612 | py | Python | noise/perlin.py | AnthonyBriggs/Python-101 | e6c7584fd6791bb5d7d05fd419faa46dc7148f61 | [
"MIT"
] | 3 | 2017-08-02T23:40:55.000Z | 2018-07-02T14:59:07.000Z | noise/perlin.py | AnthonyBriggs/Python-101 | e6c7584fd6791bb5d7d05fd419faa46dc7148f61 | [
"MIT"
] | null | null | null | noise/perlin.py | AnthonyBriggs/Python-101 | e6c7584fd6791bb5d7d05fd419faa46dc7148f61 | [
"MIT"
] | null | null | null | #!/usr/bin/python
"""
TODO: where'd I get this from?
"""
import math
import random
p = (
151,160,137,91,90,15,131,13,201,95,96,53,194,233,7,225,140,36,103,
30,69,142,8,99,37,240,21,10,23,190,6,148,247,120,234,75,0,26,197,
62,94,252,219,203,117,35,11,32,57,177,33,88,237,149,56,87,174,20,
125,136,171,168,68,175,74,165,71,134,139,48,27,166,77,146,158,231,
83,111,229,122,60,211,133,230,220,105,92,41,55,46,245,40,244,102,
143,54,65,25,63,161,1,216,80,73,209,76,132,187,208,89,18,169,200,
196,135,130,116,188,159,86,164,100,109,198,173,186,3,64,52,217,226,
250,124,123,5,202,38,147,118,126,255,82,85,212,207,206,59,227,47,16,
58,17,182,189,28,42,223,183,170,213,119,248,152,2,44,154,163,70,
221,153,101,155,167,43,172,9,129,22,39,253,19,98,108,110,79,113,
224,232,178,185,112,104,218,246,97,228,251,34,242,193,238,210,144,
12,191,179,162,241,81,51,145,235,249,14,239,107,49,192,214,31,181,
199,106,157,184,84,204,176,115,121,50,45,127,4,150,254,138,236,
205,93,222,114,67,29,24,72,243,141,128,195,78,66,215,61,156,180,
151,160,137,91,90,15,131,13,201,95,96,53,194,233,7,225,140,36,103,
30,69,142,8,99,37,240,21,10,23,190,6,148,247,120,234,75,0,26,197,
62,94,252,219,203,117,35,11,32,57,177,33,88,237,149,56,87,174,20,
125,136,171,168,68,175,74,165,71,134,139,48,27,166,77,146,158,231,
83,111,229,122,60,211,133,230,220,105,92,41,55,46,245,40,244,102,
143,54,65,25,63,161,1,216,80,73,209,76,132,187,208,89,18,169,200,
196,135,130,116,188,159,86,164,100,109,198,173,186,3,64,52,217,226,
250,124,123,5,202,38,147,118,126,255,82,85,212,207,206,59,227,47,16,
58,17,182,189,28,42,223,183,170,213,119,248,152,2,44,154,163,70,
221,153,101,155,167,43,172,9,129,22,39,253,19,98,108,110,79,113,
224,232,178,185,112,104,218,246,97,228,251,34,242,193,238,210,144,
12,191,179,162,241,81,51,145,235,249,14,239,107,49,192,214,31,181,
199,106,157,184,84,204,176,115,121,50,45,127,4,150,254,138,236,
205,93,222,114,67,29,24,72,243,141,128,195,78,66,215,61,156,180)
def lerp(t, a, b):
return a + t * (b - a)
def fade(t):
return t * t * t * (t * (t * 6 - 15) + 10)
def grad(hash, x, y, z):
h = hash & 15
if h < 8:
u = x
else:
u = y
if h < 4:
v = y
elif h == 12 or h == 14:
v = x
else:
v = z
if h & 1 != 0:
u = -u
if h & 2 != 0:
v = -v
return u + v
def pnoise(x, y, z):
global p
X = int(math.floor(x)) & 255
Y = int(math.floor(y)) & 255
Z = int(math.floor(z)) & 255
x -= math.floor(x)
y -= math.floor(y)
z -= math.floor(z)
u = fade(x)
v = fade(y)
w = fade(z)
A = p[X] + Y
AA = p[A] + Z
AB = p[A + 1] + Z
B = p[X + 1] + Y
BA = p[B] + Z
BB = p[B + 1] + Z
pAA = p[AA]
pAB = p[AB]
pBA = p[BA]
pBB = p[BB]
pAA1 = p[AA + 1]
pBA1 = p[BA + 1]
pAB1 = p[AB + 1]
pBB1 = p[BB + 1]
gradAA = grad(pAA, x, y, z)
gradBA = grad(pBA, x-1, y, z)
gradAB = grad(pAB, x, y-1, z)
gradBB = grad(pBB, x-1, y-1, z)
gradAA1 = grad(pAA1,x, y, z-1)
gradBA1 = grad(pBA1,x-1, y, z-1)
gradAB1 = grad(pAB1,x, y-1, z-1)
gradBB1 = grad(pBB1,x-1, y-1, z-1)
return lerp(w,
lerp(v, lerp(u, gradAA, gradBA), lerp(u, gradAB, gradBB)),
lerp(v, lerp(u, gradAA1,gradBA1),lerp(u, gradAB1,gradBB1)))
def perlin_multifractal(x,y,z, octaves, lambda_, amplitude):
"""Multi fractal just means that we have more noise,
at higher frequencies (aka. octaves), layered on top
of the existing noise."""
sum = 0
for oct in range(octaves):
#print oct, amplitude, lambda_
amp = amplitude / (2 ** oct);
lam = lambda_ / (2 ** oct);
# todo - find a decent interpolation function?
#add = interpolate(x/lam, y/lam, z/lam) * amp;
add = pnoise(x/lam, y/lam, z/lam) * amp;
if oct > 1:
add *= sum;
sum += add;
return sum
def perlin_multi_common(x, y, z):
return perlin_multifractal(x,y,z, 4, 1.0, 1.0)
def perlin_ridged(x, y, z):
"""Ridged means that instead of varying from -1..1,
the value varies from -1..1..-1"""
value = pnoise(x, y, z)
if value > 0:
value = (-value) + 1
else:
value = value + 1
return 2*value - 1
def perlin_ridged_multifractal(x,y,z, octaves, lambda_, amplitude):
sum = 0
# k is a scaling constant. NFI what it should be though...
k = 0.1
for oct in range(octaves):
#print oct, amplitude, lambda_
amp = amplitude / (2 ** oct);
lam = lambda_ / (2 ** oct);
#add = interpolate(x/lam, y/lam, z/lam) * amp;
add = perlin_ridged(x/lam, y/lam, z/lam) * amp;
if oct > 1:
add *= k * sum;
sum += add;
return sum
def perlin_ridged_multi_common(x, y, z):
return perlin_ridged_multifractal(x,y,z, 4, 1.0, 1.0)
def get_random_range(num_steps=100):
startx = random.random(); endx = random.random()
if endx < startx:
startx, endx = endx, startx
stepx = (endx - startx) / num_steps
return startx, endx, stepx
def make_landscape(x_size, y_size, noise_func, startx, stepx, starty, stepy):
"""Display some perlin noise as an image in a window."""
heights = []
landscape = Image.new("RGB", (x_size, y_size))
for x in range(x_size):
for y in range(y_size):
xnoise, ynoise = (startx + x*stepx, starty + y*stepy)
# make noise 0..2.0 instead of -1..1 and mult up to 256 max
height = 128 * (noise_func(xnoise, ynoise, 0.5) + 1.0)
heights.append((height, height, height))
print(len(heights), "heights generated")
landscape.putdata(heights)
landscape.show()
if __name__ == '__main__':
for z in range(10):
for y in range(10):
for x in range(10):
print("%.02f" % pnoise(x/10.0, y/10.0, z/10.0), end=' ')
print()
print()
# pnoise is deterministic
for i in range(10):
print(pnoise(0.1, 0.1, 0.1))
import Image
x_size = 200; y_size = 200
#startx, endx, stepx = get_random_range()
#starty, endy, stepy = get_random_range()
startx, endx, stepx = (0.0, 5.0, 5.0/x_size)
starty, endy, stepy = (0.0, 5.0, 5.0/y_size)
make_landscape(x_size, y_size, pnoise, startx, stepx, starty, stepy)
make_landscape(x_size, y_size, perlin_multi_common, startx, stepx, starty, stepy)
make_landscape(x_size, y_size, perlin_ridged, startx, stepx, starty, stepy)
make_landscape(x_size, y_size, perlin_ridged_multi_common, startx, stepx, starty, stepy)
| 31.942029 | 92 | 0.58908 |
2856b66bfd0e689f82a8cb47b06e0b491d804f30 | 3,555 | py | Python | tests/test_random.py | xadrnd/display_sdk_ios | f8a140d1cf3d1e8f63b915caf8c723d771dac13f | [
"BSD-3-Clause"
] | null | null | null | tests/test_random.py | xadrnd/display_sdk_ios | f8a140d1cf3d1e8f63b915caf8c723d771dac13f | [
"BSD-3-Clause"
] | null | null | null | tests/test_random.py | xadrnd/display_sdk_ios | f8a140d1cf3d1e8f63b915caf8c723d771dac13f | [
"BSD-3-Clause"
] | null | null | null | from test_base import DisplaySDKTest
from utils import *
class RandomTest(DisplaySDKTest):
def test_banner_random(self):
click_btn(self, "All Errors")
for i in range(10):
click_load_ad_btn(self, "Banner")
accept_location(self)
webview = block_until_webview(self)
# switch to web view
print("Switching to webview")
self.driver.switch_to.context(webview)
# inspect inside html
#print(self.driver.)
# switch back to native view
print("Switching to native view")
self.driver.switch_to.context(self.driver.contexts[0])
# get web view
webview = self.driver.find_elements_by_ios_predicate('wdType == "XCUIElementTypeWebView"')
# if web view then click
if len(webview) > 0:
click_on_webview(self)
sleep(5)
try:
click_btn(self, "OK")
except:
pass
try:
click_btn(self, "Cancel")
except:
pass
# nagivate back
try:
click_back_btn(self)
except:
pass
def test_banner_random_large(self):
click_btn(self, "All Errors")
for i in range(10):
click_load_ad_btn(self, "Banner", "300x250")
accept_location(self)
webview = block_until_webview(self)
# switch to web view
print("Switching to webview")
self.driver.switch_to.context(webview)
# inspect inside html
#print(self.driver.)
# switch back to native view
print("Switching to native view")
self.driver.switch_to.context(self.driver.contexts[0])
# get web view
webview = self.driver.find_elements_by_ios_predicate('wdType == "XCUIElementTypeWebView"')
# if web view then click
if len(webview) > 0:
click_on_webview(self)
sleep(5)
try:
click_btn(self, "OK")
except:
pass
try:
click_btn(self, "Cancel")
except:
pass
# nagivate back
try:
click_back_btn(self)
except:
pass
def test_interstitial_random(self):
for i in range(10):
click_load_ad_btn(self, "Interstitial")
accept_location(self)
block_until_webview(self)
# switch to web view
"""
self.driver.switch_to.context(webview)
sleep(1)
# inspect inside html
#print(self.driver.
# switch back to native view
self.driver.switch_to.context(self.driver.contexts[0])
"""
sleep(1)
# find close button
close_btn = self.driver.find_elements_by_ios_predicate('wdType == "XCUIElementTypeButton"')[0]
close_btn.click()
sleep(1)
def test_video_random(self):
for i in range(10):
click_load_ad_btn(self, "Video")
accept_location(self)
# TODO Assert video player appeared
# TODO Poll until video is done
# TODO Close video | 25.76087 | 106 | 0.501547 |
28573be47f861d148cdbe92b563ea159f2c46f16 | 1,964 | py | Python | libretto/plugin/__init__.py | johnwcchau/libretto | 8b8cde81a978536f1c797e070818188bd2c006e5 | [
"MIT"
] | null | null | null | libretto/plugin/__init__.py | johnwcchau/libretto | 8b8cde81a978536f1c797e070818188bd2c006e5 | [
"MIT"
] | null | null | null | libretto/plugin/__init__.py | johnwcchau/libretto | 8b8cde81a978536f1c797e070818188bd2c006e5 | [
"MIT"
] | null | null | null | import logging
from configparser import ConfigParser
from typing import Callable
__plugins = {}
def plugin_disabled(config, path):
name = '.'.join(path.replace('/', '.').replace('\\', '.').split('.')[:-2])
return config.getboolean(name, "disabled", fallback=False)
def plugin_mjs(config):
from glob import glob
with open("libretto/plugin/plugins.mjs", "w") as file:
for path in glob("libretto/plugin/**/__init__.mjs"):
if plugin_disabled(config, path): continue
path = path[8:].replace("\\", "/")
name = path.split('/')[-2]
file.write(f'import {{}} from "{path}";\n');
file.write("""
export default function plugin_css() {
""")
for name in glob("libretto/plugin/**/__init__.css"):
name = name[4:].replace("\\", "/")
file.write(f"""
$("head").append('<link rel="stylesheet" href="{name}" type="text/css" />');
""")
file.write("""
}""")
def init(config):
from importlib import import_module
from os import path
import logging
from glob import glob
global __plugins
for path in glob("libretto/plugin/**/__init__.py"):
name = '.'.join(path.replace('/', '.').replace('\\', '.').split('.')[:-2])
logging.info(f'Discovered plugin {name}')
if plugin_disabled(config, path):
logging.info(f'{name}: Disabled in config and not loaded')
continue
try:
lib = import_module(f'{name}.__init__')
if hasattr(lib, "__init_plugin"):
getattr(lib, "__init_plugin")(config)
__plugins[name] = lib
except Exception as e:
logging.error(repr(e))
def dispatch(lamda:Callable[[str, object], None]):
global __plugins
for name, plugin in __plugins.items():
lamda(name, plugin)
def find_plugin(plugin:str):
global __plugins
return __plugins[plugin] if plugin in __plugins else None | 32.733333 | 82 | 0.588595 |
2858823061844e57eb3d0e1bf225fe8863fd7485 | 30,325 | py | Python | cax/tasks/tsm_mover.py | XENON1T/cax | 06de9290851904695275fd34d7c74e2c9eb7fe59 | [
"0BSD"
] | 2 | 2016-05-19T05:51:15.000Z | 2017-10-13T13:43:00.000Z | cax/tasks/tsm_mover.py | XENON1T/cax | 06de9290851904695275fd34d7c74e2c9eb7fe59 | [
"0BSD"
] | 93 | 2016-03-26T20:34:01.000Z | 2021-03-25T21:41:57.000Z | cax/tasks/tsm_mover.py | XENON1T/cax | 06de9290851904695275fd34d7c74e2c9eb7fe59 | [
"0BSD"
] | 2 | 2017-05-19T03:47:09.000Z | 2018-12-19T18:10:45.000Z | """Handle copying data between sites.
tsm_mover.py contains the necessary classes
to upload and download from tape backup and
syncronize it with the runDB.
Author: Boris Bauermeister
Email: Boris.Bauermeister@fysik.su.se
"""
import datetime
import logging
import os
import time
import hashlib
import json
import random
import requests
import signal
import socket
import subprocess
import sys
import time
import traceback
import datetime
import time
import tarfile
import copy
import shutil
import checksumdir
import tempfile
import scp
from paramiko import SSHClient, util
from cax import config
from cax.task import Task
class TSMclient(Task):
def __init__(self):
"""init"""
def check_client_installation(self):
check_install = self.tsm_commands("check-installation")
logging.debug(check_install)
msg_std, msg_err = self.doTSM(check_install)
client_info = ""
server_info = ""
for i in msg_std:
if i.find("Client Version") >= 0:
client_info = i
logging.info("Client information: %s", client_info)
if i.find("Server Version") >= 0:
server_info = i
logging.info("Server information: %s", server_info)
if i.find("command not found") >= 0:
client_info = "Client not Installed"
server_info = "No Information"
if client_info == "Client not Installed":
return False
else:
return True
def delete_script(self, fileobj):
"""Delete script after submitting to cluster
:param script_path: path to the script to be removed
"""
fileobj.close()
def create_script(self, script):
"""Create script as temp file to be run on cluster"""
fileobj = tempfile.NamedTemporaryFile(delete=True,
suffix='.sh',
mode='wt',
buffering=1)
fileobj.write(script)
os.chmod(fileobj.name, 0o774)
return fileobj
def doTSM(self, upload_string):
sc = self.create_script(upload_string)
execute = subprocess.Popen(['sh', sc.name],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, shell=False)
stdout_value, stderr_value = execute.communicate()
stdout_value = stdout_value.decode("utf-8")
stdout_value = stdout_value.split("\n")
self.delete_script(sc)
return stdout_value, stderr_value
def get_checksum_folder(self, raw_data_location):
return checksumdir.dirhash(raw_data_location, 'sha512')
def get_checksum_list(self, raw_data_location):
"""Get a dictionary with filenames and their checksums"""
filelist = []
for (dirpath, dirnames, filenames) in os.walk(raw_data_location):
filelist.extend(filenames)
break
def download(self, tape_source, dw_destination, raw_data_filename):
"""Download a folder from the tape storage"""
script_download = self.tsm_commands("restore-path").format(path_tsm=tape_source,
path_restore=dw_destination)
logging.debug(script_download)
msg_std, msg_err = self.doTSM(script_download)
tno_dict = {"tno_restored_objects": -1,
"tno_restored_bytes": -1,
"tno_failed_objects": -1,
"tno_data_transfer_time": -1,
"tno_network_transfer_rate": -1,
"tno_aggregate_transfer_rate": -1,
"tno_elapsed_processing_time": -1,
"tno_file_info": "",
}
sub_dict = {}
for i in msg_std:
if i.find("Restoring") >= 0:
j_restoring = i.split(" ")
i_restoring = [x for x in j_restoring if x]
j_dic = {}
filename_local = i_restoring[4].replace(" ", "").split("/")[-1]
j_dic["file_size"] = i_restoring[1].replace(" ", "")
j_dic["file_path_tsm"] = i_restoring[2].replace(" ", "")
j_dic["file_path_local"] = i_restoring[4].replace(" ", "")
j_dic["file_status"] = i_restoring[5].replace(" ", "")
sub_dict[filename_local] = j_dic
if i.find("Total number of objects restored") >= 0:
tno_dict["tno_restored_objects"] = i.split(":")[
1].replace(" ", "")
if i.find("Total number of bytes transferred:") >= 0:
tno_dict["tno_restored_bytes"] = i.split(
":")[1].replace(" ", "")
if i.find("Total number of objects failed:") >= 0:
tno_dict["tno_failed_objects"] = i.split(
":")[1].replace(" ", "")
if i.find("Data transfer time:") >= 0:
tno_dict["tno_data_transfer_time"] = i.split(":")[
1].replace(" ", "")
if i.find("Network data transfer rate:") >= 0:
tno_dict["tno_network_transfer_rate"] = i.split(":")[
1].replace(" ", "")
if i.find("Aggregate data transfer rate:") >= 0:
tno_dict["tno_aggregate_transfer_rate"] = i.split(":")[
1].replace(" ", "")
if i.find("Elapsed processing time:") >= 0:
tno_dict["tno_elapsed_processing_time"] = i.split(":")[
1].replace(" ", "")
tno_dict["tno_file_info"] = sub_dict
return tno_dict
def upload(self, raw_data_location):
script_upload = self.tsm_commands(
"incr-upload-path").format(path=raw_data_location)
logging.debug(script_upload)
tno_dict = {
"tno_inspected": -1,
"tno_updated": -1,
"tno_rebound": -1,
"tno_deleted": -1,
"tno_expired": -1,
"tno_failed": -1,
"tno_encrypted": -1,
"tno_grew": -1,
"tno_retries": -1,
"tno_bytes_inspected": -1,
"tno_bytes_transferred": -1,
"tno_data_transfer_time": -1,
"tno_network_transfer_rate": -1,
"tno_aggregate_transfer_rate": -1,
"tno_object_compressed": -1,
"tno_total_data_reduction": -1,
"tno_elapsed_processing_time": -1
}
msg_std, msg_err = self.doTSM(script_upload)
for i in msg_std:
if i.find("Total number of objects inspected:") >= 0:
tno_dict['tno_inspected'] = int(
i.split(":")[1].replace(",", ""))
elif i.find("Total number of objects backed up:") >= 0:
tno_dict['tno_backedup'] = int(
i.split(":")[1].replace(",", ""))
elif i.find("Total number of objects updated:") >= 0:
tno_dict['tno_updated'] = int(i.split(":")[1].replace(",", ""))
elif i.find("Total number of objects rebound:") >= 0:
tno_dict['tno_rebound'] = int(i.split(":")[1].replace(",", ""))
elif i.find("Total number of objects deleted:") >= 0:
tno_dict['tno_deleted'] = int(i.split(":")[1].replace(",", ""))
elif i.find("Total number of objects expired:") >= 0:
tno_dict['tno_expired'] = int(i.split(":")[1].replace(",", ""))
elif i.find("Total number of objects failed:") >= 0:
tno_dict['tno_failed'] = int(i.split(":")[1].replace(",", ""))
elif i.find("Total number of objects encrypted:") >= 0:
tno_dict['tno_encrypted'] = int(
i.split(":")[1].replace(",", ""))
elif i.find("Total number of objects grew:") >= 0:
tno_dict['tno_grew'] = int(i.split(":")[1].replace(",", ""))
elif i.find("Total number of retries:") >= 0:
tno_dict['tno_retries'] = int(i.split(":")[1].replace(",", ""))
elif i.find("Total number of bytes inspected:") >= 0:
tno_dict['tno_bytes_inspected'] = i.split(
":")[1].replace(" ", "")
elif i.find("Total number of bytes transferred:") >= 0:
tno_dict['tno_bytes_transferred'] = i.split(":")[
1].replace(" ", "")
elif i.find("Data transfer time:") >= 0:
tno_dict['tno_data_transfer_time'] = i.split(":")[
1].replace(" ", "")
elif i.find("Network data transfer rate:") >= 0:
tno_dict['tno_network_transfer_rate'] = i.split(":")[
1].replace(" ", "")
elif i.find("Aggregate data transfer rate:") >= 0:
tno_dict['tno_aggregate_transfer_rate'] = i.split(":")[
1].replace(" ", "")
elif i.find("Objects compressed by:") >= 0:
tno_dict['tno_object_compressed'] = i.split(":")[
1].replace(" ", "")
elif i.find("Total data reduction ratio:") >= 0:
tno_dict['tno_total_data_reduction'] = i.split(":")[
1].replace(" ", "")
elif i.find("Elapsed processing time:") >= 0:
tno_dict['tno_elapsed_processing_time'] = (i.split(":")[1].replace(" ", "") + ":" +
i.split(":")[2].replace(" ", "") + ":" +
i.split(":")[3].replace(" ", ""))
return tno_dict
def copy_and_rename(self, source, destination):
"""Create a viratually copy in /tmp for upload"""
pass
def delete(self, path):
"""Delete the given path including the sub-folders"""
pass
def tsm_commands(self, method=None):
host_xe1t_datamanager = """#!/bin/bash
echo "Basic Config@xe1tdatamanager"
source /home/xe1ttransfer/tsm_config/init_tsm.sh
"""
host_teger = """#!/bin/bash
echo "Basic Config@Tegner"
export PATH="/cfs/klemming/projects/xenon/.adm/xenon-tsm/:$PATH"
"""
general = {"xe1t-datamanager": host_xe1t_datamanager,
"tegner-login-1": host_teger}
check_for_raw_data = """
dsmc query ba {path}
"""
check_method = """
echo "No method is selected: Do nothing"
"""
incr_upload = """
dsmc incr {path}/
"""
restore_path = """
dsmc rest {path_tsm}/ {path_restore}/ -followsymbolic=yes
"""
check_install = """
dsmc
"""
if method == "check-for-raw-data":
return general[config.get_hostname()] + check_for_raw_data
elif method == None:
return general[config.get_hostname()]
elif method == "incr-upload-path":
return general[config.get_hostname()] + incr_upload
elif method == "restore-path":
return general[config.get_hostname()] + restore_path
elif method == "check-installation":
return general[config.get_hostname()] + check_install
else:
return general[config.get_hostname()] + check_method
# Class: Add checksums for missing tsm-server entries in the runDB:
class AddTSMChecksum(Task):
"""Perform a checksum on accessible data at the tsm-server
and add the checksum to the runDB.
(Only in case the checksum is not yet added)
"""
def variables(self):
self.checksum_xe1t = 'no_checksum_xe1tdatam'
def each_location(self, data_doc):
#print("each location")
hostname = config.get_hostname()
destination = config.get_config("tsm-server")
self.variables()
if data_doc['host'] == "xe1t-datamanager":
self.checksum_xe1t = data_doc['checksum']
logging.info("Found checksum for xe1t-datamanger: %s",
self.checksum_xe1t)
return
if destination['name'] == data_doc['host'] and data_doc['checksum'] == None and data_doc['status'] == 'transferred':
"""A dedicated function to add checksums to the database
in case there are no checksums for tsm-server entries
but the status says transferred
"""
logging.info(
"There is a database entry for %s (transferred) but no checksum", data_doc['location'])
# Init the TSMclient class:
self.tsm = TSMclient()
raw_data_location = data_doc['location']
raw_data_filename = data_doc['location'].split('/')[-1]
raw_data_path = config.get_config(config.get_hostname())['dir_raw']
raw_data_tsm = config.get_config(config.get_hostname())['dir_tsm']
tmp_data_path = raw_data_tsm + "tmp_checksum_test/"
logging.info("Raw data location @xe1t-datamanager: %s",
raw_data_location)
logging.info("Path to raw data: %s", raw_data_path)
logging.info("Path to tsm data: %s", raw_data_tsm)
logging.info("Path to temp. data: %s", tmp_data_path)
logging.info("File/Folder for backup: %s", raw_data_filename)
# Sanity Check
if self.tsm.check_client_installation() == False:
logging.info("There is a problem with your dsmc client")
return
# Make sure that temp. download directory exists:
if not os.path.exists(tmp_data_path):
os.makedirs(tmp_data_path)
# Download it to a temp directory
dfolder = tmp_data_path + "/" + raw_data_filename
if os.path.exists(dfolder):
logging.info(
"Temp. directory %s already exists -> Delete it now", dfolder)
shutil.rmtree(dfolder)
tsm_download_result = self.tsm.download(
raw_data_tsm + raw_data_filename, tmp_data_path, raw_data_filename)
if os.path.exists(tmp_data_path + raw_data_filename) == False:
logging.info("Download to %s failed.", raw_data_path)
# Do the checksum
checksum_after = self.tsm.get_checksum_folder(
tmp_data_path + "/" + raw_data_filename)
logging.info("Summary of the download for checksum comparison:")
logging.info("Number of downloaded files: %s",
tsm_download_result["tno_restored_objects"])
logging.info("Transferred amount of data: %s",
tsm_download_result["tno_restored_bytes"])
logging.info("Network transfer rate: %s",
tsm_download_result["tno_network_transfer_rate"])
logging.info("Download time: %s",
tsm_download_result["tno_data_transfer_time"])
logging.info("Number of failed downloads: %s",
tsm_download_result["tno_failed_objects"])
logging.info("MD5 Hash (database entry|TSM-SERVER): %s",
data_doc['checksum'])
logging.info(
"MD5 Hash (database entry|xe1t-datamanager): %s", self.checksum_xe1t)
logging.info("MD5 Hash (downloaded data): %s", checksum_after)
# Add to runDB and compare
# if data_doc['checksum'] == None and self.checksum_xe1t == checksum_after:
if data_doc['checksum'] == None and self.checksum_xe1t == "no_checksum_xe1tdatam":
logging.info("No checksum for database entry TSM-server")
logging.info("Checksums for xe1t-datamanager is verfied")
if config.DATABASE_LOG:
logging.info("Notify the runDB to add checksum")
self.collection.update({'_id': self.run_doc['_id'],
'data': {'$elemMatch': data_doc}},
{'$set': {'data.$.checksum': checksum_after}})
# Delete from temp directory
# if data_doc['checksum'] == None and self.checksum_xe1t == checksum_after:
logging.info(
"Delete temp. directory for checksum verification: %s", dfolder)
shutil.rmtree(dfolder)
# Class: Log-file analyser:
class TSMLogFileCheck():
def __init__(self, f_folder=None):
if f_folder != None:
self.f_folder = f_folder
else:
self.f_folder = "/home/xe1ttransfer/tsm_log/"
self.flist = self.init_logfiles_from_path(self.f_folder)
self.read_all_logfiles()
def init_logfiles_from_path(self, path_to_logfiles):
"""Read the log-file path for logfiles:"""
if path_to_logfiles == None:
logging.info("No log file path is chosen")
return 0
filelist = []
for (dirpath, dirnames, filenames) in os.walk(path_to_logfiles):
filelist.extend(filenames)
break
if len(filelist) == 0:
logging.info(
"Ups... Your chosen log file folder (%s) seems to be empty", path_to_logfiles)
return 0
return filelist
def search_for_expression(self, logfile, expression):
ffile = open(logfile, 'r')
is_in = False
for i_line in ffile:
if i_line.find(expression) >= 0:
expression_position = i_line.find(expression)
is_in = True
return is_in
def sort(self, sortkey=None):
"""Have a sort key for upload time and run time"""
pass
def read_logfile(self, logfile=None, search_expression=None):
if logfile == None:
return 0, 0
"""Read single log file"""
ffile = open(logfile, 'r')
# Select log file for a search criterion:
t = self.search_for_expression(logfile, search_expression)
if t == False:
return 0, 0
# extract the when the upload started:
#print( logfile.split(".")[0].split("_") )
inf_str = logfile.split(".")[0].split("_")
# print("a")
length = len(inf_str)
date_str = str(inf_str[length - 2])
time_str = str(inf_str[length - 1])
datetime_str = "{dd}_{tt}".format(dd=date_str,
tt=time_str)
#print(inf_str, length, date_str, time_str, datetime_str)
nb_uploaded_files = 0
nb_inspected_files = 0
tr_amount_up = 0
tr_amount_up_counted = False
tr_amount_dw = 0
tr_rate_up = 0
tr_rate_up_counted = False
tr_rate_dw = 0
upload_time = 0
download_time = 0
total_time = 0
dataset = ''
dataset_time = ''
for i in ffile:
if i.find("Number of uploaded files:") >= 0:
nb_uploaded_files = int(
i[i.find("Number of uploaded files:"):].split(":")[1].replace(" ", ""))
if i.find("Number of inspected files:") >= 0:
nb_inspected_files = int(
i[i.find("Number of inspected files:"):].split(":")[1].replace(" ", ""))
if i.find("Upload time:") >= 0:
print("TU: ", i)
upload_time = i[i.find("Upload time:"):].split(
":")[1].replace(" ", "").replace(",", "")
upload_time = upload_time[:len(upload_time) - 4]
if i.find("Download time:") >= 0:
print("TD: ", i)
download_time = i[i.find("Download time:"):].split(":")[
1].replace(" ", "").replace(",", "")
download_time = download_time[:len(download_time) - 4]
if i.find("Transferred amount of data:") >= 0 and tr_amount_up_counted == False:
tr_read = i[i.find("Transferred amount of data:"):].split(":")[
1].replace(" ", "")
tr_amount_up = tr_read[:len(tr_read) - 3]
tr_amount_unit = tr_read[len(tr_read) - 3:].replace(" ", "")
if tr_amount_unit.find("MB") >= 0:
tr_amount_up = float(tr_amount_up) / 1024.
elif tr_amount_unit.find("KB") >= 0:
tr_amount_up = float(tr_amount_up) / 1024. / 1024.
elif tr_amount_unit.find("GB") >= 0:
tr_amount_up = float(tr_amount_up)
print("TUPAmount: ", tr_amount_up)
tr_amount_up_counted = True
if i.find("Transferred amount of data:") >= 0 and tr_amount_up_counted == True:
tr_read = i[i.find("Transferred amount of data:"):].split(":")[
1].replace(" ", "")
tr_amount_dw = tr_read[:len(tr_read) - 3]
tr_amount_unit = tr_read[len(tr_read) - 3:].replace(" ", "")
if tr_amount_unit.find("MB") >= 0:
tr_amount_dw = float(tr_amount_dw) / 1024.
elif tr_amount_unit.find("KB") >= 0:
tr_amount_dw = float(tr_amount_dw) / 1024. / 1024.
elif tr_amount_unit.find("GB") >= 0:
tr_amount_dw = float(tr_amount_dw)
print("TDWAmount: ", tr_amount_dw)
if i.find("Network transfer rate:") >= 0 and tr_rate_up_counted == False:
print("NTR up: ", i)
tr_rate_up = i[i.find("Network transfer rate:"):].split(":")[
1].replace(" ", "").replace(",", "")
tr_rate_up = tr_rate_up[:len(tr_rate_up) - 7]
tr_rate_up_counted = True
if i.find("Network transfer rate:") >= 0 and tr_amount_up_counted == True:
print("NTR dw: ", i)
tr_rate_dw = i[i.find("Network transfer rate:"):].split(":")[
1].replace(" ", "").replace(",", "")
tr_rate_dw = tr_rate_dw[:len(tr_rate_dw) - 7]
if i.find("tsm upload dataset") >= 0:
position = int(i.split("[INFO]")[1].find("_"))
beg_d = int(position - 6)
end_d = int(position + 5)
dataset = i.split("[INFO]")[1][beg_d:end_d]
total_time = i.split("took")[1].replace(
" ", "").replace("seconds", "")
total_time = total_time[:len(total_time) - 1]
if i.find("File/Folder for backup:") >= 0:
position = i.split("[INFO]")[1].split(":")[1]
if position.find("_MV") >= 0:
position = position.split("_MV")[0]
dataset_time = position
subinfo = {}
subinfo['dataset_time'] = dataset
subinfo['upload_time'] = datetime_str
subinfo['nb_uploaded_files'] = nb_uploaded_files
subinfo['nb_inspected_files'] = nb_inspected_files
subinfo['tr_amount_up'] = tr_amount_up
subinfo['tr_amount_dw'] = tr_amount_dw
subinfo['tr_rate_up'] = tr_rate_up
subinfo['tr_rate_dw'] = tr_rate_dw
subinfo['total_time'] = total_time
return dataset, subinfo
def read_all_logfiles(self):
"""A function to read all logfile at the same time"""
print(self.f_folder)
import ROOT as root
total_upload_time_per_dataset = 0
total_upload_volume = 0
fin = root.TFile(
"/home/xe1ttransfer/tsm_log/tsm_summary.root", "RECREATE")
gr_upload_rate = root.TGraph()
gr_upload_rate.SetName("upload_rate")
gr_dwload_rate = root.TGraph()
gr_dwload_rate.SetName("download_rate")
gr_upAmount = root.TGraph()
gr_upAmount.SetName("Upload_Amount")
gr_dwAmount = root.TGraph()
gr_dwAmount.SetName("Download_Amount")
gr_total_time = root.TGraph()
gr_total_time.SetName("total_time")
gr_nb_uploaded_files = root.TGraph()
gr_nb_uploaded_files.SetName("gr_nb_uploaded_files")
i = 0
for i_file in self.flist:
# print(i_file)
try:
filename, info = self.read_logfile(
self.f_folder + i_file, "Upload to tape: [succcessful]")
total_upload_time_per_dataset += float(info['total_time'])
total_upload_volume += float(info['tr_amount_up'])
print(filename, info)
dataset_time = info['dataset_time']
upload_time = info['upload_time']
upload_rate = float(info['tr_rate_up'])
dwload_rate = float(info['tr_rate_dw'])
upAmount = float(info['tr_amount_up'])
dwAmount = float(info['tr_amount_dw'])
total_time = float(info['total_time'])
nb_uploaded_files = float(info['nb_uploaded_files'])
nb_inspected_files = float(info['nb_inspected_files'])
# Quick Summary calculation:
if nb_uploaded_files > 0 and nb_inspected_files > 0:
total_upload_time_per_dataset += total_time # sec
total_upload_volume += upAmount # GB
x = time.strptime(upload_time, '%Y%m%d_%H%M%S')
y = time.mktime(x)
x_axis = y # specify the x axis in seconds
if nb_uploaded_files > 0 and nb_inspected_files > 0:
gr_upload_rate.SetPoint(i, x_axis, upload_rate)
gr_dwload_rate.SetPoint(i, x_axis, dwload_rate)
gr_upAmount.SetPoint(i, x_axis, upAmount)
gr_dwAmount.SetPoint(i, x_axis, dwAmount)
gr_total_time.SetPoint(i, x_axis, total_time)
gr_nb_uploaded_files.SetPoint(i, x_axis, nb_uploaded_files)
print("-> Output: time vs. uploaded files",
x_axis, nb_uploaded_files)
i = i + 1
except:
print("Log file not readable: {f}".format(f=i_file))
gr_upload_rate.SetTitle("Upload Rate")
gr_upload_rate.GetXaxis().SetTitle("time [s]")
gr_upload_rate.GetYaxis().SetTitle("Upload Rate [kB/s]")
gr_upload_rate.Write()
gr_dwload_rate.SetTitle("Download Rate")
gr_dwload_rate.GetXaxis().SetTitle("time [s]")
gr_dwload_rate.GetYaxis().SetTitle("Download Rate [kB/s]")
gr_dwload_rate.Write()
gr_upAmount.SetTitle("Amount of uploaded data")
gr_upAmount.GetXaxis().SetTitle("time [s]")
gr_upAmount.GetYaxis().SetTitle("Upload [GB]")
gr_upAmount.Write()
gr_dwAmount.SetTitle("Amount of downloaded data")
gr_dwAmount.GetXaxis().SetTitle("time [s]")
gr_dwAmount.GetYaxis().SetTitle("Download [GB]")
gr_dwAmount.Write()
gr_total_time.SetTitle("Total upload time")
gr_total_time.GetXaxis().SetTitle("time [s]")
gr_total_time.GetYaxis().SetTitle("UploadTime [s]")
gr_total_time.Write()
gr_nb_uploaded_files.SetTitle("Number of uploaded files")
gr_nb_uploaded_files.GetXaxis().SetTitle("time [s]")
gr_nb_uploaded_files.GetYaxis().SetTitle("Files [#]")
gr_nb_uploaded_files.Write()
print("Total upload time: ",
total_upload_time_per_dataset / 60 / 60, 'hours')
print("Total uploaded volume: ", total_upload_volume / 1024, "TB")
class TSMDatabaseCheck(Task):
"""A class to cross check runDB information
and grab tsm-server information via tsm query
"""
def __init__(self):
self.tsm = TSMclient()
def each_location(self, data_doc):
#print("each location")
hostname = config.get_hostname()
destination = config.get_config("tsm-server")
def get_info(self, tsm_path):
# Prepare path for tsm-server query:
if tsm_path[-1] != "/":
tsm_path += "/"
logging.info("Query tsm-server information for path %s", tsm_path)
# Query tsm-information by script:
query_script = self.tsm.tsm_commands(
"check-for-raw-data").format(path=tsm_path)
#logging.debug( query_script )
msg_std, msg_err = self.tsm.doTSM(query_script)
# find read position:
read_position = 0
for key, line in enumerate(msg_std):
if line.find("Accessing as node: XENON") >= 0:
read_position = key + 3
file_size_run = 0
for key, line in enumerate(msg_std):
if line.find("DEFAULT") >= 0:
iline = line.split(" ")
iline = list(filter(None, iline))
file_size = float(iline[0].replace(",", ""))
file_size_run += file_size / 1024 / 1024
return file_size_run
class TSMStatusCheck(Task):
"""Check TSM related entries and status messages
This notifies the run database.
"""
def __init__(self, db, status):
self.run_doc = db
self.status = status
Task.__init__(self)
def each_run(self):
# For each data location, see if this filename in it
cnt = 0
for data_doc in self.run_doc['data']:
# Is not local, skip
# print(data_doc)
if data_doc['host'] == "xe1t-datamanager":
data_path_datamanager = data_doc['location']
if data_doc['host'] != "tsm-server":
continue
# if data_doc['status'] == "transferred" and data_doc['checksum'] == None:
#print( data_doc['location'], data_doc['status'] , data_doc['checksum'])
if data_doc['status'] == self.status:
if data_doc['checksum'] != None:
cksum = "checksum: YES"
else:
cksum = "checksum: NO"
logging.info("Run %s/%s at %s: Status: %s, Location: %s, %s",
self.run_doc['number'], self.run_doc['name'], data_doc['host'], data_doc['status'], data_doc['location'], cksum)
| 38.679847 | 142 | 0.54526 |
285a8bab289bfb8c666439b93d30129bb0e1ff4e | 2,076 | py | Python | src/network/topology.py | joelwanner/smtax | 7d46f02cb3f15f2057022c574e0f3a8e5236d647 | [
"MIT"
] | null | null | null | src/network/topology.py | joelwanner/smtax | 7d46f02cb3f15f2057022c574e0f3a8e5236d647 | [
"MIT"
] | null | null | null | src/network/topology.py | joelwanner/smtax | 7d46f02cb3f15f2057022c574e0f3a8e5236d647 | [
"MIT"
] | null | null | null | from network.route import *
class Host(object):
def __init__(self, name, r, s, a=1):
self.name = name
self.receiving_cap = r
self.sending_cap = s
self.amp_factor = a
self.links = []
def add_link(self, l):
self.links.append(l)
def __str__(self):
if self.amp_factor == 1:
return "%s(%d,%d)" % (self.name, self.receiving_cap, self.sending_cap)
else:
return "%s(%d,%d,%d)" % (self.name, self.receiving_cap, self.sending_cap, self.amp_factor)
def __repr__(self):
return self.name
class Server(Host):
def __init__(self, name, r, s, a):
super().__init__(name, r, s, a)
def __str__(self):
return "_" + super().__str__()
class Router(Server):
def __init__(self, name, r, s):
super().__init__(name, r, s, 1)
class Link(object):
def __init__(self, h1, h2, c):
self.h1 = h1
self.h2 = h2
self.capacity = c
def neighbor(self, h):
if h == self.h1:
return self.h2
elif h == self.h2:
return self.h1
else:
return None
def __repr__(self):
return "%s--%s" % (self.h1.name, self.h2.name)
def __str__(self):
return "%s:%d" % (self.__repr__(), self.capacity)
class Topology(object):
def __init__(self, hosts, links):
self.hosts = hosts
self.links = links
self.__routes = None
for l in links:
l.h1.add_link(l)
l.h2.add_link(l)
def get_routes(self):
if not self.__routes:
self.__routes = RoutingTable(self)
return self.__routes
def __str__(self):
host_str = ",\n\t".join([str(h) for h in self.hosts])
link_str = ",\n\t".join([str(l) for l in self.links])
return "hosts {\n\t%s\n}\nlinks {\n\t%s\n}" % (host_str, link_str)
@classmethod
def from_string(cls, s):
return parser.parse_network(s)
# TODO: remove workaround for circular dependencies
import interface.parse as parser
| 23.590909 | 102 | 0.560694 |
285ae74b0cd6ba7b852c770105e6b5d4523be1ae | 5,310 | py | Python | leetcode_python/Sort/wiggle-sort-ii.py | yennanliu/Python_basics | 6a597442d39468295946cefbfb11d08f61424dc3 | [
"Unlicense"
] | 18 | 2019-08-01T07:45:02.000Z | 2022-03-31T18:05:44.000Z | leetcode_python/Sort/wiggle-sort-ii.py | yennanliu/Python_basics | 6a597442d39468295946cefbfb11d08f61424dc3 | [
"Unlicense"
] | null | null | null | leetcode_python/Sort/wiggle-sort-ii.py | yennanliu/Python_basics | 6a597442d39468295946cefbfb11d08f61424dc3 | [
"Unlicense"
] | 15 | 2019-12-29T08:46:20.000Z | 2022-03-08T14:14:05.000Z | # V0
# V1
# https://www.hrwhisper.me/leetcode-wiggle-sort-ii/
class Solution(object):
def wiggleSort(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
temp = sorted(nums)
s, t = (len(nums) + 1) >> 1, len(nums)
for i in range(len(nums)):
if i & 1 == 0:
s -= 1
nums[i] = temp[s]
else:
t -= 1
nums[i] = temp[t]
# V1'
# http://bookshadow.com/weblog/2015/12/31/leetcode-wiggle-sort-ii/
class Solution(object):
def wiggleSort(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
size = len(nums)
snums = sorted(nums)
for x in range(1, size, 2) + range(0, size, 2):
nums[x] = snums.pop()
# V1''
# https://www.jiuzhang.com/solution/wiggle-sort-ii/#tag-highlight-lang-python
class Solution:
"""
@param: nums: A list of integers
@return: nothing
"""
def wiggleSort(self, nums):
if not nums:
return
# partition nums into smaller half and bigger half
# all nums in smaller half <= any num in bigger half
median = self.find_median(nums)
n = len(nums)
# reorder the nums from
# 0 => n-1(odd), (n-2)(even)
# 1 => n-3
# 2 => n-5
# ...
# (n - 1) / 2 => 0
# (n - 1) / 2 + 1 => n - 2(odd), n - 1(even)
# (n - 1) / 2 + 2 => n - 4(odd), n - 3(even)
# ...
def get_index(i):
if i <= (n - 1) // 2:
return n - i * 2 - 1 - (n + 1) % 2
i -= (n - 1) // 2 + 1
return n - i * 2 - 1 - n % 2
# 3-way partition
left, i, right = 0, 0, n - 1
while i <= right:
if nums[get_index(i)] < median:
nums[get_index(left)], nums[get_index(i)] = nums[get_index(i)], nums[get_index(left)]
i += 1
left += 1
elif nums[get_index(i)] == median:
i += 1
else:
nums[get_index(right)], nums[get_index(i)] = nums[get_index(i)], nums[get_index(right)]
right -= 1
def find_median(self, nums):
return self.find_kth(nums, 0, len(nums) - 1, (len(nums) - 1) // 2)
def find_kth(self, nums, start, end, kth):
# k is zero based
left, right = start, end
mid = nums[(left + right) // 2]
while left <= right:
while left <= right and nums[left] < mid:
left += 1
while left <= right and nums[right] > mid:
right -= 1
if left <= right:
nums[left], nums[right] = nums[right], nums[left]
left, right = left + 1, right - 1
if kth <= right:
return self.find_kth(nums, start, right, kth)
elif kth >= left:
return self.find_kth(nums, left, end, kth)
else:
return nums[kth]
# V2
# Time: O(n) ~ O(n^2)
# Space: O(1)
# Tri Partition (aka Dutch National Flag Problem) with virtual index solution. (TLE)
from random import randint
class Solution2(object):
def wiggleSort(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
def findKthLargest(nums, k):
left, right = 0, len(nums) - 1
while left <= right:
pivot_idx = randint(left, right)
new_pivot_idx = partitionAroundPivot(left, right, pivot_idx, nums)
if new_pivot_idx == k - 1:
return nums[new_pivot_idx]
elif new_pivot_idx > k - 1:
right = new_pivot_idx - 1
else: # new_pivot_idx < k - 1.
left = new_pivot_idx + 1
def partitionAroundPivot(left, right, pivot_idx, nums):
pivot_value = nums[pivot_idx]
new_pivot_idx = left
nums[pivot_idx], nums[right] = nums[right], nums[pivot_idx]
for i in range(left, right):
if nums[i] > pivot_value:
nums[i], nums[new_pivot_idx] = nums[new_pivot_idx], nums[i]
new_pivot_idx += 1
nums[right], nums[new_pivot_idx] = nums[new_pivot_idx], nums[right]
return new_pivot_idx
def reversedTriPartitionWithVI(nums, val):
def idx(i, N):
return (1 + 2 * (i)) % N
N = len(nums) / 2 * 2 + 1
i, j, n = 0, 0, len(nums) - 1
while j <= n:
if nums[idx(j, N)] > val:
nums[idx(i, N)], nums[idx(j, N)] = nums[idx(j, N)], nums[idx(i, N)]
i += 1
j += 1
elif nums[idx(j, N)] < val:
nums[idx(j, N)], nums[idx(n, N)] = nums[idx(n, N)], nums[idx(j, N)]
n -= 1
else:
j += 1
mid = (len(nums) - 1) / 2
findKthLargest(nums, mid + 1)
reversedTriPartitionWithVI(nums, nums[mid]) | 34.038462 | 103 | 0.468927 |
285af3b4261da560e2b691c6b8e6dc0c7a368402 | 517 | py | Python | model/Rules/ZigZag.py | GigliOneiric/ElliotWave | 5d0fc166530a57132dce4e4c00ecb33cb8101aaa | [
"Apache-2.0"
] | null | null | null | model/Rules/ZigZag.py | GigliOneiric/ElliotWave | 5d0fc166530a57132dce4e4c00ecb33cb8101aaa | [
"Apache-2.0"
] | null | null | null | model/Rules/ZigZag.py | GigliOneiric/ElliotWave | 5d0fc166530a57132dce4e4c00ecb33cb8101aaa | [
"Apache-2.0"
] | null | null | null | import config.Text
class ZigZag:
def __init__(self, name: str):
self._name = name
@property
def name(self):
return self._name
@property
def conditions(self):
conditions = {
'wa_b': {
config.Text.waves: ['waveA', 'waveB'],
config.Text.function: lambda waveA, waveB: waveA.wave_length > waveB.wave_length,
config.Text.message: 'Wave A is longer than wave B'
},
}
return conditions
| 21.541667 | 97 | 0.54352 |
285ef9691cdce93a606e382f9fdd9a1cebb1c5b6 | 232 | py | Python | views.py | Rexypoo/shortnsweet | e773f01f2fdd6630b8d649232b48a753aa387c4f | [
"Apache-2.0"
] | null | null | null | views.py | Rexypoo/shortnsweet | e773f01f2fdd6630b8d649232b48a753aa387c4f | [
"Apache-2.0"
] | null | null | null | views.py | Rexypoo/shortnsweet | e773f01f2fdd6630b8d649232b48a753aa387c4f | [
"Apache-2.0"
] | null | null | null | from django.shortcuts import get_object_or_404, redirect, render
from .models import ShortURL
def redirect_alias(request, short_name):
shorturl = get_object_or_404(ShortURL, alias=short_name)
return redirect(shorturl.url)
| 29 | 64 | 0.806034 |
285f254edbac663963266cf598f4a69429de61bf | 4,153 | py | Python | pyautomailer/command_line.py | matteocappello94/pyautomailer | 933c23426d00d32543da3af03fe67a7e8fa38247 | [
"MIT"
] | null | null | null | pyautomailer/command_line.py | matteocappello94/pyautomailer | 933c23426d00d32543da3af03fe67a7e8fa38247 | [
"MIT"
] | 2 | 2018-08-08T07:51:14.000Z | 2018-08-10T14:35:32.000Z | pyautomailer/command_line.py | matteocappello94/pyautomailer | 933c23426d00d32543da3af03fe67a7e8fa38247 | [
"MIT"
] | null | null | null | import argparse
import sys
import logging as log
from pyautomailer import PyAutoMailer, PyAutoMailerMode
def main():
parser = parse_args(sys.argv[1:])
# Auto mailer init.
am = PyAutoMailer(parser.sender,
parser.host,
parser.port,
parser.username,
parser.password)
# Auto mailer property
am.test = parser.test # Test mode
am.log_file = parser.log_file
am.log_level = get_log_level(parser.log_level)
am.subject = parser.subject
am.body = parser.body
am.body_file = parser.body_file
# Attachments list separated by commas (ONE_SEND mode)
if parser.attachments is not None:
am.attachments = parser.attachments.split(',')
parser.func(parser, am)
def parse_args(args):
parser = argparse.ArgumentParser(prog='pyautomailer',
description='A fully customizable automatic bulk email sending script')
subparsers = parser.add_subparsers(title='List of subcommands',
description='Sending modes')
bs = subparsers.add_parser('bulk-send', aliases=['bs'])
os = subparsers.add_parser('one-send', aliases=['os'])
parser.add_argument('-H', '--host', type=str,
help='email client connection host')
parser.add_argument('-P', '--port', type=int,
help='email client connection port')
parser.add_argument('-U', '--username', type=str,
help='email client connection username')
parser.add_argument('-PWD', '--password', type=str,
help='email client connection password')
parser.add_argument('-SND', '--sender', type=str,
help='sender of message')
parser.add_argument('-S', '--subject', type=str,
help='subject of message')
parser.add_argument('-A', '--attachments', type=str,
help='attachments of message separated by commas')
body_group = parser.add_mutually_exclusive_group()
body_group.add_argument('-BF', '--body-file', type=str,
help='a file that contains HTML body code')
body_group.add_argument('-B', '--body', type=str,
help='body message')
parser.add_argument('-t', '--test', action='store_true',
help='run script in TEST mode without sending emails')
parser.add_argument('-lf', '--log-file', type=str,
help='log file path')
parser.add_argument('-ll', '--log-level', type=str,
choices=['CRITICAL',
'ERROR',
'WARNING',
'INFO',
'DEBUG'],
help='log level, default set to INFO')
# Bulk send arguments
bs.add_argument('source_file', metavar='SOURCE_FILE', type=str,
help='.CSV file source that contains emails and \
dynamics fields')
# One send arguments
os.add_argument('recipient', metavar='RECIPIENT', type=str,
help='recipient of message')
# Commands function
bs.set_defaults(func=bulk_send)
os.set_defaults(func=one_send)
return parser.parse_args(args)
# Bulk-send mode function
def bulk_send(args, am):
am.mode = PyAutoMailerMode.BULK_SEND
run_service(am, args.source_file)
# One-send mode function
def one_send(args, am):
am.mode = PyAutoMailerMode.ONE_SEND
run_service(am, args.recipient)
# From log_level string get log_level object of logging module
def get_log_level(self, log_level = 'INFO'):
str_log_level = { 'CRITICAL': log.CRITICAL,
'ERROR': log.ERROR,
'WARNING': log.WARNING,
'INFO': log.INFO,
'DEBUG': log.DEBUG
}
ll = str_log_level.get(log_level, lambda: log.INFO)
return ll
def run_service(am, arg):
# Start sending service
am.run_service(arg)
# Close connection
am.close()
| 36.752212 | 79 | 0.575969 |
285fac9d4a82de6931604146cf170d6983d5310f | 4,225 | py | Python | test-suite/handwritten-src/python/test_foo_interface.py | trafi/trafi-djinni | 47cd2c849782e2ab4b38e5dc6a5a3104cc87f673 | [
"Apache-2.0"
] | 16 | 2020-10-18T20:09:29.000Z | 2022-02-21T07:11:13.000Z | test-suite/handwritten-src/python/test_foo_interface.py | trafi/trafi-djinni | 47cd2c849782e2ab4b38e5dc6a5a3104cc87f673 | [
"Apache-2.0"
] | 53 | 2020-10-13T20:08:29.000Z | 2022-03-10T14:59:50.000Z | test-suite/handwritten-src/python/test_foo_interface.py | trafi/trafi-djinni | 47cd2c849782e2ab4b38e5dc6a5a3104cc87f673 | [
"Apache-2.0"
] | 13 | 2020-10-16T20:31:36.000Z | 2022-01-28T15:37:05.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
# Example code written by a python developer to access cpp implementation of Foo
# This file should be hand-written by a python developer
from foo_interface import FooInterface
from djinni.support import decoded_utf_8
import sys
PYTHON3 = sys.version_info[0] >= 3
def test_ints():
foo = FooInterface.create()
iSet = 8
foo.set_private_int32(iSet)
assert iSet == foo.get_private_int32(), "TestInts failed"
iSet = 18
foo.set_private_int32(foo.int32_inverse(iSet))
assert -iSet == foo.get_private_int32(), "TestInts failed"
#foo.__del__()
#assert 0 == 1
print ("TestInts succeeded")
def test_interface_as_return_value():
foo = FooInterface.create()
foo_primitives = foo.get_foo_primitives()
# Test a foo_primitives specific feature (impl of bools) works
b = True
foo_primitives.set_bool(b)
assert b == foo_primitives.get_bool(), "test_bool failed"
b = False
foo_primitives.set_bool(b)
assert b == foo_primitives.get_bool(), "test_bool failed"
# Can set: unicode strings (python 2 and 3), bytes utf-8 encoded (python 3)
# Will get: utf-8 encoded strings, and utf-8 encoded bytes respectively
DECODEUtf8 = 1
def test_strings():
foo = FooInterface.create()
strs = dict([
# PYTHON 2 and 3 unicode strings
(u"", not DECODEUtf8),
(u"my\0text", not DECODEUtf8),
(u"the best text", not DECODEUtf8),
(u"my \b friend", not DECODEUtf8),
#"Non-ASCII / 非 ASCII 字符"
(u"Non-ASCII / \xe9\x9d\x9e ASCII \xe5\xad\x97\xe7\xac\xa6", not DECODEUtf8),
(u"Non-ASCII / \u975e ASCII \u5b57\u7b26", not DECODEUtf8)
])
if PYTHON3:
strs.update({
chr(40960) + u'abcd' + chr(1972) + u"\0\bhi": not DECODEUtf8, #unicode string
bytes(chr(40960) + u'abcd' + chr(1972) + u"\0\bhi", 'utf-8'): DECODEUtf8 # bytes utf-8 encoded
})
else:
strs.update({
unichr(40960) + u'abcd' + unichr(1972) + u"\0\bhi": not DECODEUtf8, #unicode string for python 2
})
for sSet, decode in strs.items():
foo.set_private_string(sSet)
sSetUnicode = sSet
if decode:
sSetUnicode = decoded_utf_8(sSet)
sGetUnicode = foo.get_private_string()
# print ("client SetPrs=", sSetUnicode, ".", len(sSetUnicode), List(sSetUnicode) )
# print ("client GetPrs=", sGetUnicode, ".", len(sGetUnicode), List(sGetUnicode))
assert sSetUnicode == sGetUnicode
print ("TestStrings succeeded")
def test_abc_direct_instantiate():
try:
f = FooInterface()
assert False, "Instantiated abstract base class"
except:
pass
def test_abc_subclass_instantiate():
class FooInterfaceSub(FooInterface):
pass
try:
dummy = FooInterfaceSub()
assert False, "Instantiated abstract base class"
except TypeError:
pass
def test_abc_missing_method_instantiate():
class FooInterfaceSub(FooInterface):
def int32_inverse(self, x):
pass
def set_private_int32(self, private_int):
pass
def get_private_int32(self):
pass
def set_private_string(self, private_string):
pass
#def get_private_string(self):
# pass
def get_set_strings(self, ps1, ps2):
pass
def get_foo_primitives(self):
pass
try:
dummy = FooInterfaceSub()
assert False, "Instantiated abstract base class"
except TypeError:
pass
def test_abc_successful_instantiate():
class FooInterfaceSub(FooInterface):
def int32_inverse(self, x):
pass
def set_private_int32(self, private_int):
pass
def get_private_int32(self):
pass
def set_private_string(self, private_string):
pass
def get_private_string(self):
pass
def get_set_strings(self, ps1, ps2):
pass
def get_foo_primitives(self):
pass
dummy = FooInterfaceSub()
| 31.296296 | 108 | 0.626509 |
286006999e3a7c33bbad8b78611b5c41448309f0 | 4,324 | py | Python | examples/dvrl_asr/dvrl_asr_finetuning.py | SeunghyunSEO/seosh_fairseq | 443b2a8effb6b8fba5758989076cf992470ccb62 | [
"MIT"
] | null | null | null | examples/dvrl_asr/dvrl_asr_finetuning.py | SeunghyunSEO/seosh_fairseq | 443b2a8effb6b8fba5758989076cf992470ccb62 | [
"MIT"
] | 2 | 2022-02-22T08:28:06.000Z | 2022-02-22T09:26:26.000Z | examples/dvrl_asr/dvrl_asr_finetuning.py | SeunghyunSEO/seosh_fairseq | 443b2a8effb6b8fba5758989076cf992470ccb62 | [
"MIT"
] | null | null | null | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import logging
import os
import torch
import json
from argparse import Namespace
from dataclasses import dataclass, field
from typing import Optional, Any
from fairseq.data import AddTargetDataset, Dictionary, encoders
from fairseq.tasks.audio_pretraining import AudioPretrainingTask, AudioPretrainingConfig
from fairseq.tasks.audio_finetuning import AudioFinetuningTask, AudioFinetuningConfig
from fairseq.dataclass import FairseqDataclass
from fairseq.dataclass.configs import GenerationConfig
from fairseq.data.text_compressor import TextCompressor, TextCompressionLevel
from fairseq.tasks import FairseqTask, register_task
from fairseq import utils
from fairseq.logging import metrics
from fairseq.optim.amp_optimizer import AMPOptimizer
from omegaconf import MISSING, II, OmegaConf
logger = logging.getLogger(__name__)
@dataclass
class DVRLAudioFinetuningConfig(AudioFinetuningConfig):
tmp: bool = field(
default=False,
metadata={"help": ""},
)
dvrl_valid_subset: str = field(
default='tiny_test_other', metadata={"help": "path to wav2vec ctc model"}
)
dvrl_valid_max_tokens: int = II("dataset.max_tokens")
dvrl_valid_max_sentences: int = II("dataset.batch_size")
dvrl_valid_max_positions: int = II("dataset.max_tokens")
dvrl_valid_num_workers: int = II("dataset.num_workers")
dvrl_valid_data_buffer_size: int = II("dataset.data_buffer_size")
@register_task("dvrl_audio_finetuning", dataclass=DVRLAudioFinetuningConfig)
class DVRLAudioFinetuningTask(AudioFinetuningTask):
cfg: DVRLAudioFinetuningConfig
def __init__(
self,
cfg: DVRLAudioFinetuningConfig,
):
super().__init__(cfg)
self.cfg = cfg
self.dvrl_valid_subset = cfg.dvrl_valid_subset
self.load_dataset(self.dvrl_valid_subset, cfg, combine=False, epoch=1)
# self.valid_subset_for_dve_training = self.get_valid_iterator(subset, cfg)
def train_step(
self, sample, model, criterion, optimizer, update_num, ignore_grad=False
):
model.train()
model.set_num_updates(update_num)
with torch.autograd.profiler.record_function("forward"):
with torch.cuda.amp.autocast(enabled=(isinstance(optimizer, AMPOptimizer))):
loss, sample_size, logging_output = criterion(
model,
sample,
optimizer,
# self.valid_subset_for_dve_training
self.get_valid_iterator(self.dvrl_valid_subset, self.cfg).next_epoch_itr(shuffle=False, set_dataset_epoch=False)
)
if ignore_grad:
loss *= 0
# with torch.autograd.profiler.record_function("backward"):
# optimizer.backward(loss)
return loss, sample_size, logging_output
def valid_step(self, sample, model, criterion):
model.eval()
with torch.no_grad():
loss, sample_size, logging_output = criterion(model, sample)
return loss, sample_size, logging_output
def get_valid_iterator(
self,
subset,
cfg,
disable_iterator_cache=False,
):
"""Return an EpochBatchIterator over given validation subset for a given epoch."""
batch_iterator = self.get_batch_iterator(
dataset=self.dataset(subset),
max_tokens=cfg.dvrl_valid_max_tokens,
max_sentences=cfg.dvrl_valid_max_sentences,
max_positions=utils.resolve_max_positions(
self.max_positions(),
cfg.dvrl_valid_max_tokens,
),
seed=1,
num_workers=cfg.dvrl_valid_num_workers,
epoch=1,
data_buffer_size=cfg.dvrl_valid_data_buffer_size,
disable_iterator_cache=disable_iterator_cache,
skip_remainder_batch=False,
)
self.reset_dummy_batch(batch_iterator.first_batch)
return batch_iterator
def reset_dummy_batch(self, batch):
self._dummy_batch = batch
| 36.336134 | 132 | 0.698196 |
286032d47c652ffe5a80685b84caa53cf8ed1a03 | 2,727 | py | Python | opensource/opencv/write_to_video.py | marciojv/hacks-cognitives-plataforms | 5b43f52d6afde4ad2768ad5b85e376578e2c9b2f | [
"Apache-2.0"
] | 1 | 2021-05-14T18:43:51.000Z | 2021-05-14T18:43:51.000Z | opensource/opencv/write_to_video.py | marciojv/hacks-cognitives-plataforms | 5b43f52d6afde4ad2768ad5b85e376578e2c9b2f | [
"Apache-2.0"
] | null | null | null | opensource/opencv/write_to_video.py | marciojv/hacks-cognitives-plataforms | 5b43f52d6afde4ad2768ad5b85e376578e2c9b2f | [
"Apache-2.0"
] | 9 | 2019-02-04T22:08:08.000Z | 2021-07-17T12:12:12.000Z | # para executar
# python write_to_video.py --output example.avi
# ou
# python write_to_video.py --output example.avi --picamera 1
# python -m pip install imutils
from __future__ import print_function
from imutils.video import VideoStream
import numpy as np
import argparse
import imutils
import time
import cv2
#https://www.pyimagesearch.com/2016/02/22/writing-to-video-with-opencv/
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-o", "--output", required=True,
help="path to output video file")
ap.add_argument("-p", "--picamera", type=int, default=-1,
help="whether or not the Raspberry Pi camera should be used")
ap.add_argument("-f", "--fps", type=int, default=20,
help="FPS of output video")
ap.add_argument("-c", "--codec", type=str, default="MJPG",
help="codec of output video")
args = vars(ap.parse_args())
# initialize the video stream and allow the camera
# sensor to warmup
print("[INFO] warming up camera...")
vs = VideoStream(usePiCamera=args["picamera"] > 0).start()
time.sleep(2.0)
# initialize the FourCC, video writer, dimensions of the frame, and
# zeros array
fourcc = cv2.VideoWriter_fourcc(*args["codec"])
writer = None
(h, w) = (None, None)
zeros = None
# loop over frames from the video stream
while True:
# grab the frame from the video stream and resize it to have a
# maximum width of 300 pixels
frame = vs.read()
frame = imutils.resize(frame, width=300)
# check if the writer is None
if writer is None:
# store the image dimensions, initialize the video writer,
# and construct the zeros array
(h, w) = frame.shape[:2]
writer = cv2.VideoWriter(args["output"], fourcc, args["fps"],
(w * 2, h * 2), True)
zeros = np.zeros((h, w), dtype="uint8")
# break the image into its RGB components, then construct the
# RGB representation of each frame individually
(B, G, R) = cv2.split(frame)
R = cv2.merge([zeros, zeros, R])
G = cv2.merge([zeros, G, zeros])
B = cv2.merge([B, zeros, zeros])
# construct the final output frame, storing the original frame
# at the top-left, the red channel in the top-right, the green
# channel in the bottom-right, and the blue channel in the
# bottom-left
output = np.zeros((h * 2, w * 2, 3), dtype="uint8")
output[0:h, 0:w] = frame
output[0:h, w:w * 2] = R
output[h:h * 2, w:w * 2] = G
output[h:h * 2, 0:w] = B
# write the output frame to file
writer.write(output)
# show the frames
cv2.imshow("Frame", frame)
cv2.imshow("Output", output)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# do a bit of cleanup
print("[INFO] cleaning up...")
cv2.destroyAllWindows()
vs.stop()
writer.release()
| 27.826531 | 71 | 0.69527 |
2860ad63802d9b4247cfc5b4ea2a3cd53137c044 | 3,973 | py | Python | src/anaplan_api/Upload.py | pieter-pot/anaplan-api | 1b099cb102f98b114afa0794a40aaf0de19956c1 | [
"BSD-2-Clause"
] | null | null | null | src/anaplan_api/Upload.py | pieter-pot/anaplan-api | 1b099cb102f98b114afa0794a40aaf0de19956c1 | [
"BSD-2-Clause"
] | null | null | null | src/anaplan_api/Upload.py | pieter-pot/anaplan-api | 1b099cb102f98b114afa0794a40aaf0de19956c1 | [
"BSD-2-Clause"
] | null | null | null | # ===============================================================================
# Created: 1 Nov 2021
# @author: Jesse Wilson (Anaplan Asia Pte Ltd)
# Description: Abstract Anaplan Authentication Class
# Input: Username & Password, or SHA keypair
# Output: Anaplan JWT and token expiry time
# ===============================================================================
import logging
import requests
from requests.exceptions import HTTPError, ConnectionError, SSLError, Timeout, ConnectTimeout, ReadTimeout
from .File import File
logger = logging.getLogger(__name__)
class Upload(File):
def get_base_url(self) -> str:
"""Get base URL for Anaplan API
:return: Anaplan API base URL
:rtype: str
"""
return super().get_base_url()
def get_workspace(self) -> str:
"""Get the workspace ID
:return: Workspace ID for the specified model
:rtype: str
"""
return super().get_workspace()
def get_model(self) -> str:
"""Get the model ID
:return: ID of the specified model
:rtype: str
"""
return super().get_model()
def get_file_id(self) -> str:
"""Get the ID of the specified file
:return: ID of the specified file
:rtype: str
"""
return super().get_file_id()
def upload(self, chunk_size: int, file: str):
pass
def file_metadata(self, url: str) -> bool:
"""Update file metadata in Anaplan model as first step in file upload process
:param url: URL of the specified file
:raises HTTPError: HTTP error code
:raises ConnectionError: Network-related errors
:raises SSLError: Server-side SSL certificate errors
:raises Timeout: Request timeout errors
:raises ConnectTimeout: Timeout error when attempting to connect
:raises ReadTimeout: Timeout error waiting for server response
:return: Whether metadata was successfully updated
:rtype: bool
"""
authorization = super().get_connection().get_auth().get_auth_token()
file_id = super().get_file_id()
post_header = {
"Authorization": authorization,
"Content-Type": "application/json"
}
stream_metadata = {
"id": file_id,
"chunkCount": -1
}
meta_post = None
try:
logger.debug("Updating file metadata.")
meta_post = requests.post(url, headers=post_header, json=stream_metadata, timeout=(5, 30))
logger.debug("Complete!")
except (HTTPError, ConnectionError, SSLError, Timeout, ConnectTimeout, ReadTimeout) as e:
logger.error(f"Error setting metadata {e}", exc_info=True)
raise Exception(f"Error setting metadata {e}")
if meta_post.ok:
return True
else:
return False
def file_data(self, url: str, chunk_num: int, data: str) -> bool:
"""Upload data chunk to the specified file
:param url: URL of the specified file
:type url: str
:param chunk_num: ID of the chunk being uploaded
:type chunk_num: int
:param data: Data to upload
:type data: str
:raises HTTPError: HTTP error code
:raises ConnectionError: Network-related errors
:raises SSLError: Server-side SSL certificate errors
:raises Timeout: Request timeout errors
:raises ConnectTimeout: Timeout error when attempting to connect
:raises ReadTimeout: Timeout error waiting for server response
:return: Whether file data upload was successful
:rtype: bool
"""
authorization = super().get_connection().get_auth().get_auth_token()
put_header = {
"Authorization": authorization,
"Content-Type": "application/octet-stream"
}
stream_upload = None
try:
logger.debug(f"Attempting to upload chunk {chunk_num + 1}")
stream_upload = requests.put(url, headers=put_header, data=data, timeout=(5, 30))
logger.debug(f"Chunk {chunk_num + 1} uploaded successfully.")
except (HTTPError, ConnectionError, SSLError, Timeout, ConnectTimeout, ReadTimeout) as e:
logger.error(f"Error uploading chunk {chunk_num + 1}, {e}", exc_info=True)
raise Exception(f"Error uploading chunk {chunk_num + 1}, {e}")
if stream_upload.ok:
return True
else:
return False
| 29.87218 | 106 | 0.686383 |
286195dbc7f21dde0f07a4dbc6375c32996ea510 | 561 | py | Python | oppadc/osutimingpoint.py | jamuwu/oppadc.py | 3faca744143575f0a4f12f213745b0f311973526 | [
"MIT"
] | 8 | 2019-11-01T00:03:52.000Z | 2021-01-02T18:33:31.000Z | oppadc/osutimingpoint.py | jamuwu/oppadc.py | 3faca744143575f0a4f12f213745b0f311973526 | [
"MIT"
] | 7 | 2019-12-16T16:29:07.000Z | 2021-02-22T01:01:22.000Z | oppadc/osutimingpoint.py | jamuwu/oppadc.py | 3faca744143575f0a4f12f213745b0f311973526 | [
"MIT"
] | 9 | 2019-12-16T21:58:21.000Z | 2022-02-02T12:18:45.000Z | class OsuTimingPoint(object):
"""
representats a timingpoint in osu
if change is False:
ms_per_beat = -100.0 * bpm_multiplier
"""
def __init__(self, starttime:float or str=0.0, ms_per_beat:float or str=-100.0, change:bool=False):
self.starttime:float = float(starttime)
self.ms_per_beat:float = float(ms_per_beat)
self.change:bool = bool(change)
def __str__(self):
return self.__repr__()
def __repr__(self):
return f"<{self.__class__.__name__} {self.starttime}ms mspb={round(self.ms_per_beat, 1)}{' [change]' if self.change else ''}>"
| 29.526316 | 128 | 0.71836 |
28631cb627e7dcbf9512e0e9d35ed83e8378693a | 427 | py | Python | startPWM.py | adeept/adeept_alter | 6adf00eb141405fc3abad44965f81ba7797dd962 | [
"MIT"
] | 1 | 2021-12-21T15:50:57.000Z | 2021-12-21T15:50:57.000Z | startPWM.py | adeept/adeept_alter | 6adf00eb141405fc3abad44965f81ba7797dd962 | [
"MIT"
] | 2 | 2021-03-14T22:05:42.000Z | 2021-07-19T22:13:37.000Z | startPWM.py | adeept/adeept_alter | 6adf00eb141405fc3abad44965f81ba7797dd962 | [
"MIT"
] | null | null | null | import Adafruit_PCA9685
pwm = Adafruit_PCA9685.PCA9685()
pwm.set_pwm_freq(50)
initPWM = 320
setPWM = initPWM
ctrlPort = 11
def main():
global setPWM
while 1:
commandInput = input()
if commandInput == 'w':
setPWM += 1
elif commandInput == 's':
setPWM -= 1
pwm.set_pwm(ctrlPort, 0, setPWM)
print(setPWM)
try:
main()
except KeyboardInterrupt:
pwm.set_pwm(ctrlPort, 0, initPWM) | 16.423077 | 35 | 0.651054 |
28658c7c561044400a64c09359dccf6abba3fb8e | 2,042 | py | Python | get_tracks.py | RamonPuon/Spotipy-Valence-Analysis | 05f48e068097839d3dbd47d06f69608e48d1ac16 | [
"MIT"
] | null | null | null | get_tracks.py | RamonPuon/Spotipy-Valence-Analysis | 05f48e068097839d3dbd47d06f69608e48d1ac16 | [
"MIT"
] | null | null | null | get_tracks.py | RamonPuon/Spotipy-Valence-Analysis | 05f48e068097839d3dbd47d06f69608e48d1ac16 | [
"MIT"
] | null | null | null | #cred.py, python script with my client ID and my client secret
import cred
import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
import pandas as pd
client_credential_manager = SpotifyClientCredentials(client_id= cred.client_ID, client_secret= cred.client_SECRET)
sp = spotipy.Spotify(client_credentials_manager= client_credential_manager)
#Function that returns every track from each album from a specific artist
def artist_tracks(artist):
tracks = []
for artist in artist:
#Get artist id
artist_uri = sp.search(artist)['tracks']['items'][0]['artists'][0]['uri']
album_checker = []
n = 0
while len(sp.artist_albums(artist_uri, album_type= 'album', limit= 50, offset= n)['items']) > 0:
#With this variable, on each loop the list of albums dictionaries will be saved each time
dict_list = sp.artist_albums(artist_uri, album_type= 'album', limit= 50, offset= n)['items']
for i, album in enumerate(dict_list):
check_this_album = [j['name'] for j in dict_list[i]['artists']]
check_this_album.append(dict_list[i]['name'])
check_this_album.append(dict_list[i]['release_date'])
if check_this_album not in album_checker:
album_checker.append(check_this_album)
#Everything to track
tracks.extend([[artist, album['name'], album['uri'], song['name'],
album['release_date']]+ list(sp.audio_features(song['uri'])[0].values())
for song in sp.album_tracks(album['uri'])['items']])
n += 50
return tracks
def df_track(tracklist):
df = pd.DataFrame(tracklist, columns= ['artist',
'album_name',
'album_uri',
'track',
'release_date'] + list(sp.audio_features('6rqhFgbbKwnb9MLmUQDhG6')[0].keys()))
df.rename(columns= {'uri':'song_uri'}, inplace= True)
df.drop_duplicates(subset= ['artist', 'track', 'release_date'], inplace= True)
return df
| 39.269231 | 114 | 0.644466 |
28673f63b24f6a069e726650a9df5d529a4e2b9c | 3,053 | py | Python | Uzura/data/subsystem/stream.py | jskny/Uzura | 356f8c25ceef5bd098b8e338e4acabb3f8653dca | [
"MIT"
] | null | null | null | Uzura/data/subsystem/stream.py | jskny/Uzura | 356f8c25ceef5bd098b8e338e4acabb3f8653dca | [
"MIT"
] | null | null | null | Uzura/data/subsystem/stream.py | jskny/Uzura | 356f8c25ceef5bd098b8e338e4acabb3f8653dca | [
"MIT"
] | null | null | null | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# ユーザーストリームで流れているアカウントの画像を片っ端から取得、保存。
# Perl の名刺化スクリプトと連携せよ。
# 2012 / 11 / 14
# jskny
# http://d.hatena.ne.jp/iacctech/20110429/1304090609
import sys, tweepy, urllib, urllib2
import os, time, subprocess, socket
import re
from tweepy.streaming import StreamListener, Stream
from tweepy.auth import OAuthHandler
from datetime import timedelta
# カラは、ローカルホストを意味する。
host = ''
port = 18385
serversock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# クライアントの接続を待つ
serversock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serversock.bind((host, port))
serversock.listen(1)
print 'Waiting for connections...'
clientsock, client_address = serversock.accept()
print 'Connection his Succeed...'
# それぞれ取得して埋めて下さい
consumer_key = "oDiVnBOqcYjie0T8AN6XyA"
consumer_secret = "0rsndWq3N3u8AJXKP7gfwrAcdwzPoFxAgZ5PuLt4Ww"
access_key = "397948629-j4HutoScDcL5ncMZNvuA13JY6BA3D2zEJyZPdEAJ"
access_secret = "N3UGJUwxDcrs0yz4mK3Y9cNhkw8IpO6kHnFIzHMH3pM"
def GetOauth():
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_key, access_secret)
return auth
# アイコン保存くん
def SaveIcon(screen_name):
flag = os.path.exists("%s/%s.png" % (os.path.dirname(os.path.abspath(__file__)), screen_name))
if flag:
# ファイルあり。スルー
return;
else:
# ファイルなし。ダウンロード!!
urllib.urlretrieve(('http://api.twitter.com/1/users/profile_image/%s' % screen_name), ('%s/%s.png' % (os.path.dirname(os.path.abspath(__file__)), screen_name)))
return;
# Tweepy の機能で行きまーす。
class AbstractedlyListener(StreamListener):
def on_status(self, status):
try:
# RT の時は、無視。
if re.search("RT", status.text.encode("UTF-8")) != None :
return
# Ubuntuの時は気づかなかったんだけど、Windowsで動作確認してたら
# created_atがUTC(世界標準時)で返ってきてた。
# なので日本時間にするために9時間プラスする。
status.created_at += timedelta(hours=9)
print "%s" % status.text.encode("UTF-8")
print "%s(%s) %s via %s\n" % (status.author.name.encode("UTF-8"),
status.author.screen_name.encode("UTF-8"),
status.created_at, status.source.encode("UTF-8"))
SaveIcon(status.author.screen_name.encode("UTF-8"))
# perl 実行!!!
subprocess.call(("perl %s\CreateTweetMemo.pl \"%s\" \"%s\" \"%s\" \"%s.png\"" % (os.path.dirname(os.path.abspath(__file__)), status.author.screen_name.encode("UTF-8"), status.text.encode("UTF-8"), status.created_at, status.id_str.encode("UTF-8"))), shell=True)
# 送信。
ttt = "%s" % status.id_str.encode('UTF-8')
# print ttt
clientsock.sendall(str(ttt))
time.sleep(0.3)
except Exception, e:
print >> sys.stderr, 'Encounted Exception:', e
pass
def on_error(self, status_code):
print 'An error has occured! Status code = %s' % status_code
return True # keep stream alive
def on_timeout(self):
print "UserStream is timeout..."
def main():
auth = GetOauth()
stream = Stream(auth, AbstractedlyListener(), secure=True)
stream.userstream()
if __name__ == "__main__":
try:
main()
clientsock.close()
sys.exit()
except KeyboardInterrupt:
clientsock.close()
sys.exit()
| 24.821138 | 263 | 0.717327 |
286a9f1d8d066c57291a41e5d9d48a731a2d4a0c | 541 | py | Python | templates/django/djangoRest/app_dir/user/test/test_user_views.py | david-osas/create-basic-app | 860fc579672855093ad8426fb01d010de4c7cff8 | [
"MIT"
] | 2 | 2020-12-01T11:33:36.000Z | 2020-12-01T12:25:49.000Z | django-rest-framework-boilerplate/app_dir/user/test/test_user_views.py | PiotrZak/lem-golem | 78f91f21b19725fca99c05d8c536330ef2785064 | [
"MIT"
] | 2 | 2020-11-25T14:38:57.000Z | 2020-11-25T22:55:25.000Z | templates/django/djangoRest/app_dir/user/test/test_user_views.py | david-osas/create-basic-app | 860fc579672855093ad8426fb01d010de4c7cff8 | [
"MIT"
] | 2 | 2020-11-26T08:59:50.000Z | 2021-03-30T20:01:06.000Z | from django.test import TestCase
from django.urls import reverse
from rest_framework.test import APIClient
from ...factories import UserFactory
class UserViewTest(TestCase):
def setUp(self):
self.client = APIClient()
self.user = UserFactory()
self.client.force_authenticate(user=self.user)
self.namespace = 'user'
self.url = reverse(self.namespace + ':index')
def test_user_index_view(self):
response = self.client.get(self.url)
self.assertEqual(200, response.status_code)
| 28.473684 | 54 | 0.698706 |
286ba9afaaf93ad96524d8cf507a1bf2ad30a104 | 2,862 | py | Python | port_mapping.py | sbalasa/CiscoFMC | 024c9b6df3513e1e4a8e3e3f976a0c67b58c1909 | [
"MIT"
] | 1 | 2021-11-09T03:56:29.000Z | 2021-11-09T03:56:29.000Z | port_mapping.py | sbalasa/CiscoFMC | 024c9b6df3513e1e4a8e3e3f976a0c67b58c1909 | [
"MIT"
] | null | null | null | port_mapping.py | sbalasa/CiscoFMC | 024c9b6df3513e1e4a8e3e3f976a0c67b58c1909 | [
"MIT"
] | 1 | 2021-11-09T03:56:06.000Z | 2021-11-09T03:56:06.000Z | ports = {
"ssh": {"type": "PortLiteral", "port": "22", "protocol": "6",},
"udp/netbios-dgm": {"type": "PortLiteral", "port": "138", "protocol": "17",},
"udp/netbios-ns": {"type": "PortLiteral", "port": "137", "protocol": "17",},
"tcp/ssh": {"type": "PortLiteral", "port": "22", "protocol": "6",},
"tcp": {"type": "PortLiteral", "protocol": "6",},
"esp": {"type": "PortLiteral", "protocol": "50",},
"ah": {"type": "PortLiteral", "protocol": "51",},
"udp": {"type": "PortLiteral", "protocol": "17",},
"snmp": [
{"type": "PortLiteral", "port": "161", "protocol": "17",},
{"type": "PortLiteral", "port": "162", "protocol": "17",},
],
"udp/snmp": [
{"type": "PortLiteral", "port": "161", "protocol": "17",},
{"type": "PortLiteral", "port": "162", "protocol": "6",},
{"type": "PortLiteral", "port": "162", "protocol": "17",},
],
"udp/snmptrap": {"type": "PortLiteral", "port": "162", "protocol": "6",},
"snmptrap": [
{"type": "PortLiteral", "port": "162", "protocol": "6",},
{"type": "PortLiteral", "port": "162", "protocol": "17",},
],
"https": [
{"type": "PortLiteral", "port": "443", "protocol": "6",},
{"type": "PortLiteral", "port": "443", "protocol": "17",},
],
"tcp/https": {"type": "PortLiteral", "port": "443", "protocol": "6",},
"netbios-ssn": {"type": "PortLiteral", "port": "139", "protocol": "6",},
"tcp/netbios-ssn": {"type": "PortLiteral", "port": "139", "protocol": "6",},
"ntp": {"type": "PortLiteral", "port": "123", "protocol": "17",},
"udp/ntp": {"type": "PortLiteral", "port": "123", "protocol": "17",},
"tcp/tacacs": {"type": "PortLiteral", "port": "49", "protocol": "6",},
"udp/tacacs": {"type": "PortLiteral", "port": "49", "protocol": "17",},
"tcp/www": {"type": "PortLiteral", "port": "80", "protocol": "6",},
"udp/www": {"type": "PortLiteral", "port": "80", "protocol": "17",},
"tcp/http": {"type": "PortLiteral", "port": "80", "protocol": "6",},
"ldaps": {"type": "PortLiteral", "port": "636", "protocol": "6",},
"tcp/ldaps": {"type": "PortLiteral", "port": "636", "protocol": "6",},
"ldap": {"type": "PortLiteral", "port": "389", "protocol": "6",},
"tcp/ldap": {"type": "PortLiteral", "port": "389", "protocol": "6",},
"tcp/syslog": {"type": "PortLiteral", "port": "514", "protocol": "6",},
"udp/syslog": {"type": "PortLiteral", "port": "514", "protocol": "17",},
"tcp/domain": {"type": "PortLiteral", "port": "53", "protocol": "6", },
"udp/domain": {"type": "PortLiteral", "port": "53", "protocol": "17",},
"tcp/rsh": {"type": "PortLiteral", "port": "514", "protocol": "6",},
"icmp": {"type": "ICMPv4PortLiteral", "protocol": "1", "icmpType": "Any",},
"any": [],
}
| 57.24 | 82 | 0.490217 |
286dae799942d25528e620a011ce5d17032d1ce7 | 2,336 | py | Python | deep-learning-lab-00/binlogreg.py | BalderOdinson/Deep-Learning-Lab | 70786ff1be40fc829d64a644585c1d5683c76538 | [
"MIT"
] | null | null | null | deep-learning-lab-00/binlogreg.py | BalderOdinson/Deep-Learning-Lab | 70786ff1be40fc829d64a644585c1d5683c76538 | [
"MIT"
] | null | null | null | deep-learning-lab-00/binlogreg.py | BalderOdinson/Deep-Learning-Lab | 70786ff1be40fc829d64a644585c1d5683c76538 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 18 18:38:38 2019
@author: Oshikuru
"""
import numpy as np
import matplotlib.pyplot as plt
import random
import data
import pdb
import IPython
param_delta = 0.5
param_niter = 100
param_lambda = 0.01
def binlogreg_classify(X, w, b):
scores = np.dot(X, w) + b
return np.exp(scores) / (1 + np.exp(scores))
def binlogreg_train(X,Y_):
'''
Argumenti
X: podatci, np.array NxD
Y_: indeksi razreda, np.array Nx1
Povratne vrijednosti
w, b: parametri logističke regresije
'''
N = Y_.shape[0]
D = X.shape[1]
w = np.random.randn(D, 1)
b = np.random.randn(1,1)
Y__ = np.hsplit(Y_, 2)[1]
# gradijentni spust (param_niter iteracija)
for i in range(param_niter):
# klasifikacijske mjere
scores = np.dot(X, w) + b # N x 1
# vjerojatnosti razreda c_1
probs = np.abs((1 / (1 + np.exp(scores))) - Y__) # N x 1
# gubitak
loss = - (1 / N) * np.sum(np.log(probs)) + param_lambda * np.linalg.norm(w) # scalar
# dijagnostički ispis
if i % 10 == 0:
print("iteration {}: loss {}".format(i, loss))
# derivacije gubitka po klasifikacijskim mjerama
dL_dscores = np.exp(scores) / (1 + np.exp(scores)) - Y__ # N x 1
# gradijenti parametara
grad_w = np.expand_dims((1 / N) * np.sum(dL_dscores * X, axis=0), axis=1) + param_lambda * (1 / (2 * np.linalg.norm(w))) * 2 * w # D x 1
grad_b = (1 / N) * np.sum(dL_dscores) # 1 x 1
# poboljšani parametri
w += -param_delta * grad_w
b += -param_delta * grad_b
return w,b
if __name__=="__main__":
np.random.seed(100)
# get the training dataset
X,Y_ = data.sample_gauss(2, 100)
# train the model
w,b = binlogreg_train(X, data.class_to_onehot(Y_))
# evaluate the model on the training dataset
probs = binlogreg_classify(X, w,b)
Y = probs>0.5
# report performance
accuracy, recall, precision = data.eval_perf_binary(Y[:,-1], Y_)
AP = data.eval_AP(Y_)
print (accuracy, recall, precision, AP)
# graph the decision surface
rect=(np.min(X, axis=0), np.max(X, axis=0))
data.graph_surface(lambda x: binlogreg_classify(x,w,b), rect, offset=0.5)
# graph the data points
data.graph_data(X, Y_, Y[:,-1], special=[])
plt.show()
| 24.851064 | 141 | 0.610445 |
286e9cde8312920eabbf75c1a2872023f23ceb28 | 778 | py | Python | LintCode/1103.py | RENHANFEI/LintCode | d572dee248ba4c2a95b52cd737d76c7297f4e7b4 | [
"CNRI-Python"
] | null | null | null | LintCode/1103.py | RENHANFEI/LintCode | d572dee248ba4c2a95b52cd737d76c7297f4e7b4 | [
"CNRI-Python"
] | null | null | null | LintCode/1103.py | RENHANFEI/LintCode | d572dee248ba4c2a95b52cd737d76c7297f4e7b4 | [
"CNRI-Python"
] | null | null | null | from collections import Counter
class Solution:
"""
@param nums: a list of integers
@return: return a boolean
"""
def isPossible(self, nums):
left = Counter(nums) # record nums not placed
end = Counter() # record subsequences ended with certain num
for num in nums:
if not left[num]:
continue
left[num] -= 1
if end[num - 1]:
end[num - 1] -= 1
end[num] += 1
elif left[num + 1] and left[num + 2]:
end[num + 2] += 1
left[num + 1] -= 1
left[num + 2] -= 1
else:
return False
return True
| 25.933333 | 71 | 0.417738 |
286fb0783887ca84bf84591d7e276b7bf74e2f66 | 2,867 | py | Python | safe_eval/default_rules.py | bentheiii/safe_eval | caf9e7a6df3d6029e4bdac2abe11326d55c09ed2 | [
"MIT"
] | 1 | 2021-05-16T17:24:05.000Z | 2021-05-16T17:24:05.000Z | safe_eval/default_rules.py | bentheiii/safe_eval | caf9e7a6df3d6029e4bdac2abe11326d55c09ed2 | [
"MIT"
] | null | null | null | safe_eval/default_rules.py | bentheiii/safe_eval | caf9e7a6df3d6029e4bdac2abe11326d55c09ed2 | [
"MIT"
] | null | null | null | from _ast import In, NotIn, Is, IsNot
from collections import deque, Counter
from decimal import Decimal
from fractions import Fraction
from safe_eval.rules import BinOpRule, CallableTypeRule, CallableRule, GetattrTypeRule, CallableMethodRule
k_view_type = type({}.keys())
v_view_type = type({}.values())
it_view_type = type({}.items())
trusted_iterator_types = set(
type(iter(t())) for t in (str, tuple, bytes, list, set, frozenset, dict, deque, Counter)
)
trusted_iterator_types.update((
type(iter({}.keys())),
type(iter({}.values())),
type(iter({}.items())),
type(iter(range(0))),
))
immutable_trusted = frozenset((int, bool, float, str, complex, frozenset, tuple, Decimal, Fraction, bytes, type(None),
type(...), type(NotImplemented), object, range))
mutable_trusted = frozenset((list, set, dict, k_view_type, v_view_type, it_view_type, Exception, NameError,
ValueError, LookupError, KeyError, TypeError, deque, Counter, *trusted_iterator_types))
trusted_types = immutable_trusted | mutable_trusted
trusted_types |= trusted_iterator_types
bin_op_trusted_types = trusted_types
default_bin_rules = [
BinOpRule(..., op_set=(Is, IsNot)),
BinOpRule(bin_op_trusted_types),
BinOpRule(..., bin_op_trusted_types, (In, NotIn))
]
trusted_builtin_unary_funcs = frozenset((
abs, all, any, ascii,
bin, bool, bytearray, bytes,
chr, complex,
dict,
enumerate,
float, format, frozenset,
hasattr, hash, hex,
int, iter,
len, list,
max, min,
next,
oct,
property,
range, repr, reversed, round,
set, slice, sorted, str, sum,
tuple, zip,
))
safe_builtin_unary_funcs = frozenset((
id,
callable, classmethod,
ord,
))
# todo a lot of functions are only fine if iteration is fine, do that
default_callable_rules = [
CallableTypeRule(trusted_builtin_unary_funcs, trusted_types),
CallableTypeRule(safe_builtin_unary_funcs, ...),
CallableTypeRule(divmod, trusted_types, trusted_types),
CallableRule((isinstance, issubclass), ..., trusted_types),
CallableRule(object),
CallableTypeRule(pow, trusted_types, trusted_types, trusted_types)
]
imported_builtin_names = {*trusted_builtin_unary_funcs, *safe_builtin_unary_funcs,
divmod, isinstance, issubclass, object, pow}
default_namespace = {ibn.__name__: ibn for ibn in imported_builtin_names}
default_attr_rules = []
def _allow_method(owner, method, *args, **kwargs):
if isinstance(method, str):
method_name = method
method = getattr(owner, method)
else:
method_name = method.__name__
default_attr_rules.append(GetattrTypeRule(owner, method_name))
default_callable_rules.append(CallableMethodRule(method, owner, *args, **kwargs))
_allow_method(str, str.join, trusted_types)
| 31.505495 | 118 | 0.702825 |
2870b3250a7dca1e04fe54265450ad0c248653be | 6,745 | py | Python | test/lib/mayaUsd/render/vp2RenderDelegate/testVP2RenderDelegatePointInstanceSelection.py | ika-rporter/maya-usd | 8f216a4fb955fc44c0abda55caa53ed295aaa625 | [
"Apache-2.0"
] | 507 | 2019-07-30T20:05:10.000Z | 2022-03-30T07:38:43.000Z | test/lib/mayaUsd/render/vp2RenderDelegate/testVP2RenderDelegatePointInstanceSelection.py | ika-rporter/maya-usd | 8f216a4fb955fc44c0abda55caa53ed295aaa625 | [
"Apache-2.0"
] | 1,188 | 2019-07-31T11:27:27.000Z | 2022-03-31T21:06:06.000Z | test/lib/mayaUsd/render/vp2RenderDelegate/testVP2RenderDelegatePointInstanceSelection.py | ika-rporter/maya-usd | 8f216a4fb955fc44c0abda55caa53ed295aaa625 | [
"Apache-2.0"
] | 165 | 2019-07-30T22:27:57.000Z | 2022-03-25T07:20:23.000Z | #!/usr/bin/env mayapy
#
# Copyright 2021 Autodesk
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import fixturesUtils
import imageUtils
import mayaUtils
import usdUtils
from mayaUsd import lib as mayaUsdLib
from mayaUsd import ufe as mayaUsdUfe
from maya import cmds
import ufe
import os
class testVP2RenderDelegatePointInstanceSelection(imageUtils.ImageDiffingTestCase):
"""
Tests imaging using the Viewport 2.0 render delegate when selecting
instances of a PointInstancer.
"""
@classmethod
def setUpClass(cls):
# The test USD data is authored Z-up, so make sure Maya is configured
# that way too.
cmds.upAxis(axis='z')
inputPath = fixturesUtils.setUpClass(__file__,
initializeStandalone=False, loadPlugin=False)
cls._baselineDir = os.path.join(inputPath,
'VP2RenderDelegatePointInstanceSelectionTest', 'baseline')
cls._testDir = os.path.abspath('.')
# Store the previous USD point instances pick mode and selection kind
# (or None if unset) so we can restore the state later.
cls._pointInstancesPickModeOptionVarName = mayaUsdLib.OptionVarTokens.PointInstancesPickMode
cls._prevPointInstancesPickMode = cmds.optionVar(
query=cls._pointInstancesPickModeOptionVarName) or None
cls._selectionKindOptionVarName = mayaUsdLib.OptionVarTokens.SelectionKind
cls._prevSelectionKind = cmds.optionVar(
query=cls._selectionKindOptionVarName) or None
# Set the USD point instances pick mode to "Instances" so that we pick
# individual point instances during the test.
cmds.optionVar(stringValue=(
cls._pointInstancesPickModeOptionVarName, 'Instances'))
# Clear any setting for selection kind.
cmds.optionVar(remove=cls._selectionKindOptionVarName)
@classmethod
def tearDownClass(cls):
# Restore the previous USD point instances pick mode and selection
# kind, or remove if they were unset.
if cls._prevPointInstancesPickMode is None:
cmds.optionVar(remove=cls._pointInstancesPickModeOptionVarName)
else:
cmds.optionVar(stringValue=
(cls._pointInstancesPickModeOptionVarName,
cls._prevPointInstancesPickMode))
if cls._prevSelectionKind is None:
cmds.optionVar(remove=cls._selectionKindOptionVarName)
else:
cmds.optionVar(stringValue=
(cls._selectionKindOptionVarName, cls._prevSelectionKind))
@staticmethod
def _GetUfePath(instanceIndex=-1):
mayaSegment = mayaUtils.createUfePathSegment('|UsdProxy|UsdProxyShape')
usdSegmentString = mayaUsdUfe.usdPathToUfePathSegment(
'/PointInstancerGrid/PointInstancer', instanceIndex)
usdSegment = usdUtils.createUfePathSegment(usdSegmentString)
ufePath = ufe.Path([mayaSegment, usdSegment])
return ufePath
@staticmethod
def _GetSceneItem(instanceIndex=-1):
ufePath = testVP2RenderDelegatePointInstanceSelection._GetUfePath(
instanceIndex)
ufeItem = ufe.Hierarchy.createItem(ufePath)
return ufeItem
def assertSnapshotClose(self, imageName):
baselineImage = os.path.join(self._baselineDir, imageName)
snapshotImage = os.path.join(self._testDir, imageName)
imageUtils.snapshot(snapshotImage, width=960, height=540)
return self.assertImagesClose(baselineImage, snapshotImage)
def _RunTest(self):
globalSelection = ufe.GlobalSelection.get()
globalSelection.clear()
self.assertSnapshotClose('%s_unselected.png' % self._testName)
# Select one instance.
sceneItem = self._GetSceneItem(0)
globalSelection.append(sceneItem)
self.assertSnapshotClose('%s_select_one.png' % self._testName)
globalSelection.clear()
# We'll populate a new selection and swap that into the global
# selection to minimize the overhead of modifying the global selection
# one item at a time.
newSelection = ufe.Selection()
# Select the first seven instances. The most recently selected item
# should get "Lead" highlighting.
for instanceIndex in range(7):
sceneItem = self._GetSceneItem(instanceIndex)
newSelection.append(sceneItem)
globalSelection.replaceWith(newSelection)
self.assertSnapshotClose('%s_select_seven.png' % self._testName)
globalSelection.clear()
newSelection.clear()
# Select the back half of the instances.
for instanceIndex in range(self._numInstances // 2, self._numInstances):
sceneItem = self._GetSceneItem(instanceIndex)
newSelection.append(sceneItem)
globalSelection.replaceWith(newSelection)
self.assertSnapshotClose('%s_select_half.png' % self._testName)
globalSelection.clear()
newSelection.clear()
# Select all instances
for instanceIndex in range(self._numInstances):
sceneItem = self._GetSceneItem(instanceIndex)
newSelection.append(sceneItem)
globalSelection.replaceWith(newSelection)
self.assertSnapshotClose('%s_select_all.png' % self._testName)
globalSelection.clear()
newSelection.clear()
# Select the PointInstancer itself
sceneItem = self._GetSceneItem()
globalSelection.append(sceneItem)
self.assertSnapshotClose('%s_select_PointInstancer.png' % self._testName)
globalSelection.clear()
def testPointInstancerGrid14(self):
self._numInstances = 14
self._testName = 'Grid_14'
mayaUtils.openPointInstancesGrid14Scene()
self._RunTest()
def testPointInstancerGrid7k(self):
self._numInstances = 7000
self._testName = 'Grid_7k'
mayaUtils.openPointInstancesGrid7kScene()
self._RunTest()
def testPointInstancerGrid70k(self):
self._numInstances = 70000
self._testName = 'Grid_70k'
mayaUtils.openPointInstancesGrid70kScene()
self._RunTest()
if __name__ == '__main__':
fixturesUtils.runTests(globals())
| 37.265193 | 100 | 0.697999 |
2871778d5e8e5178dee6f9e80da7e8ac737d84a0 | 5,443 | py | Python | cy_widgets/strategy/exchange/base.py | cragod/CYWidgets | b1df1e32c363ed9252737d3041a7557b1dc604fe | [
"MIT"
] | 1 | 2021-06-17T02:25:25.000Z | 2021-06-17T02:25:25.000Z | cy_widgets/strategy/exchange/base.py | cragod/CYWidgets | b1df1e32c363ed9252737d3041a7557b1dc604fe | [
"MIT"
] | null | null | null | cy_widgets/strategy/exchange/base.py | cragod/CYWidgets | b1df1e32c363ed9252737d3041a7557b1dc604fe | [
"MIT"
] | 1 | 2021-12-08T06:50:33.000Z | 2021-12-08T06:50:33.000Z | # -*- coding: utf-8 -*-
import numpy as np
import talib as ta
from abc import ABC, abstractproperty, abstractclassmethod, abstractmethod
class BaseExchangeStrategy(ABC):
"""交易策略基类"""
shortable = True # 能否做空
leverage = 1 # 策略杠杆
def __init__(self, *initial_data, **kwargs):
"""支持按字典方式传入参数信息"""
for dictionary in initial_data:
for key in dictionary:
setattr(self, key, dictionary[key])
for key in kwargs:
setattr(self, key, kwargs[key])
@abstractclassmethod
def strategy_with(cls, parameters):
raise NotImplementedError('初始化参数')
@abstractproperty
def name(self):
"""策略名"""
raise NotImplementedError('Need a name')
@abstractproperty
def candle_count_for_calculating(self):
"""计算策略需要的 K 线根数,用于实盘获取 K 线时参考"""
raise NotImplementedError
@abstractmethod
def available_to_calculate(self, df):
"""检查 K 线数据是否能用于策略计算"""
return True
@abstractmethod
def calculate_signals(self, df, drop_extra_columns=True):
"""计算信号, 统一返回格式[candle_begin_time, open, high, low, close, volume, signal]"""
raise NotImplementedError('?')
@abstractmethod
def calculate_realtime_signals(self, df, debug=False, position_info=None, position_info_save_func=None):
"""计算实时信号
Parameters
----------
position_info : dict, optional
策略仓位数据
position_info_save_func : [type], optional
保存方法
Raises
------
NotImplementedError
"""
raise NotImplementedError("?")
def process_stop_lose(self, df, stop_loss_pct):
# ===考察是否需要止盈止损
df_new = df[['signal_long', 'signal_short']]
array_long_short = df_new.to_numpy(dtype=np.float32) # should float32, for int32 does not have np.nan
df_c = df[['close']]
array_data = df_c.to_numpy(dtype=np.float32)
result_array = self.numpy_process_stop_lose(array_long_short, array_data, stop_loss_pct)
df['signal'] = result_array[:, 0]
def numpy_process_stop_lose(self, array1, array_close, stop_loss_pct):
n = array1.shape[0]
result_signal = np.zeros((n, 1), dtype=np.float32)
result_signal.fill(np.nan)
pre_signal = 0
stop_lose_price = np.nan
for i in range(n):
# 如果之前是空仓
if pre_signal == 0:
# 当本周期有做多信号
if array1[i, 0] == 1:
result_signal[i, 0] = 1 # 将真实信号设置为1
# 记录当前状态
pre_signal = 1 # 信号
stop_lose_price = array_close[i, 0] * (1 - stop_loss_pct / 100.0) # 以本周期的收盘价乘以一定比例作为止损价格。也可以用下周期的开盘价df.at[i+1, 'open'],但是此时需要注意i等于最后一个i时,取i+1会报错
# 当本周期有做空信号
elif array1[i, 1] == -1:
result_signal[i, 0] = -1 # 将真实信号设置为-1
# 记录相关信息
pre_signal = -1 # 信号
stop_lose_price = array_close[i, 0] * (1 + stop_loss_pct / 100.0) # 以本周期的收盘价乘以一定比例作为止损价格,也可以用下周期的开盘价df.at[i+1, 'open']
# 无信号
else:
# 记录相关信息
pre_signal = 0
stop_lose_price = np.nan
# 如果之前是多头仓位
elif pre_signal == 1:
# 当本周期有平多仓信号,或者需要止损,止盈
if (array1[i, 0] == 0) or (array_close[i, 0] < stop_lose_price):
result_signal[i, 0] = 0 # 将真实信号设置为0
# 记录相关信息
pre_signal = 0
stop_lose_price = np.nan
# 当本周期有平多仓并且还要开空仓
if array1[i, 1] == -1:
result_signal[i, 0] = -1 # 将真实信号设置为-1
# 记录相关信息
pre_signal = -1 # 信号
stop_lose_price = array_close[i, 0] * (1 + stop_loss_pct / 100.0) # 以本周期的收盘价乘以一定比例作为止损价格,也可以用下周期的开盘价df.at[i+1, 'open']
# zwx add, if pre_signal still is 1, use max value as zhisun
if pre_signal == 1:
tmp_stop_lose_price = array_close[i, 0] * (1 - stop_loss_pct / 100.0)
if tmp_stop_lose_price > stop_lose_price:
stop_lose_price = tmp_stop_lose_price
# 如果之前是空头仓位
elif pre_signal == -1:
# 当本周期有平空仓信号,或者需要止损, 止盈
if (array1[i, 1] == 0) or (array_close[i, 0] > stop_lose_price):
result_signal[i, 0] = 0 # 将真实信号设置为0
# 记录相关信息
pre_signal = 0
stop_lose_price = np.nan
# 当本周期有平空仓并且还要开多仓
if array1[i, 0] == 1:
result_signal[i, 0] = 1 # 将真实信号设置为1
# 记录相关信息
pre_signal = 1 # 信号
stop_lose_price = array_close[i, 0] * (1 - stop_loss_pct / 100.0) # 以本周期的收盘价乘以一定比例作为止损价格,也可以用下周期的开盘价df.at[i+1, 'open']
# if pre_signal still is -1, use min value as zhiying
if pre_signal == -1:
tmp_stop_lose_price = array_close[i, 0] * (1 + stop_loss_pct / 100.0)
if tmp_stop_lose_price < stop_lose_price:
stop_lose_price = tmp_stop_lose_price
# 其他情况
else:
raise ValueError('不可能出现其他的情况,如果出现,说明代码逻辑有误,报错')
return result_signal
| 35.344156 | 165 | 0.533346 |
287237c213c17fd114b9833d581d53122d6ad18d | 492 | py | Python | app/core/admin/__init__.py | 3darkman/faction-builder-api | 9dda323ef44a1ca0976306a4f20f9cc3e13704ec | [
"MIT"
] | null | null | null | app/core/admin/__init__.py | 3darkman/faction-builder-api | 9dda323ef44a1ca0976306a4f20f9cc3e13704ec | [
"MIT"
] | null | null | null | app/core/admin/__init__.py | 3darkman/faction-builder-api | 9dda323ef44a1ca0976306a4f20f9cc3e13704ec | [
"MIT"
] | null | null | null | from django.contrib import admin
from core import models
from .category import CategoryAdmin
from .trait import TraitAdmin
from .user import UserAdmin
admin.site.register(models.User, UserAdmin)
admin.site.register(models.Ability)
admin.site.register(models.Category, CategoryAdmin)
admin.site.register(models.Trait, TraitAdmin)
admin.site.register(models.Domain)
admin.site.register(models.FactionType)
admin.site.register(models.CategorySlot)
admin.site.register(models.StartingProfile)
| 28.941176 | 51 | 0.835366 |
287358c4458cfef128d6223f0355c87f498e047c | 2,196 | py | Python | src/predict_video_file.py | irfanmustafas/TeethClassifierCNN | 8c58b50162b3f9eb7f12251cbca9fcbd4d6c43b7 | [
"MIT"
] | 1 | 2018-12-05T01:49:54.000Z | 2018-12-05T01:49:54.000Z | src/predict_video_file.py | irfanmustafas/TeethClassifierCNN | 8c58b50162b3f9eb7f12251cbca9fcbd4d6c43b7 | [
"MIT"
] | null | null | null | src/predict_video_file.py | irfanmustafas/TeethClassifierCNN | 8c58b50162b3f9eb7f12251cbca9fcbd4d6c43b7 | [
"MIT"
] | null | null | null | import numpy as np
import sys
import caffe
import glob
import uuid
import cv2
from util import transform_img
from mouth_detector_dlib import mouth_detector
from caffe.proto import caffe_pb2
import os
import shutil
from util import histogram_equalization
from teeth_cnn import teeth_cnn
mouth_detector_instance = mouth_detector()
teeth_cnn_instance = teeth_cnn()
size = cv2.getTextSize("Showing teeth", cv2.FONT_HERSHEY_PLAIN, 2, 1)[0]
x,y = (50,250)
# Define the codec and create VideoWriter object
fourcc = cv2.cv.CV_FOURCC(*'mp4v')
cap = cv2.VideoCapture('../elon.mp4')
cap.set(1,19300);
ret, frame = cap.read()
#cv2.imshow('window_name', frame) # show frame on window
w = cap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH);
h = cap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT);
out = cv2.VideoWriter('output_elon.avi',fourcc, 24, (int(w),int(h)))
#cap.set(3,500)
#cap.set(4,500)
#cap.set(5,30)
ret, frame = cap.read()
while(cap.isOpened()):
ret, frame = cap.read()
copy_frame = frame.copy()
result,prob,xf,yf,wf,hf = teeth_cnn_instance.predict(copy_frame,mouth_detector_instance)
if result is not None:
if(result == 1):
cv2.rectangle(frame, (xf,yf),(wf,hf),(0,255,0),4,0)
prob_round = prob[0][1]*100
print prob_round
cv2.rectangle(frame, (xf-2,yf-25),(wf+2,yf),(0,255,0),-1,0)
cv2.rectangle(frame, (xf-2,hf),(xf+((wf-xf)/2),hf+25),(0,255,0),-1,0)
cv2.putText(frame, "Teeth!!",(xf,hf+14),cv2.FONT_HERSHEY_PLAIN,1.2,0,2)
cv2.putText(frame, str(prob_round)+"%",(xf,yf-10),cv2.FONT_HERSHEY_PLAIN,1.2,0,2)
#out.write(frame)
print "SHOWING TEETH!!!"
elif(result==0):
cv2.rectangle(frame, (xf,yf),(wf,hf),(64,64,64),4,0)
prob_round = prob[0][1]*100
print prob_round
cv2.rectangle(frame, (xf-2,yf-25),(wf+2,yf),(64,64,64),-1,0)
cv2.rectangle(frame, (xf-2,hf),(xf+((wf-xf)/2),hf+25),(64,64,64),-1,0)
cv2.putText(frame, "Teeth??",(xf,hf+14),cv2.FONT_HERSHEY_PLAIN,1.2,0,2)
cv2.putText(frame, str(prob_round)+"%",(xf,yf-10),cv2.FONT_HERSHEY_PLAIN,1.2,0,2)
out.write(frame)
cv2.imshow('frame',frame)
if cv2.waitKey(200) & 0xFF == ord('q'):
break
cap.release()
out.release()
cv2.destroyAllWindows()
| 31.371429 | 89 | 0.679417 |
28738d283bf4868349454e25d748bec7dc9a9c6f | 33,650 | py | Python | sdk/python/pulumi_gcp/dataloss/prevention_deidentify_template.py | sisisin/pulumi-gcp | af6681d70ea457843409110c1324817fe55f68ad | [
"ECL-2.0",
"Apache-2.0"
] | 121 | 2018-06-18T19:16:42.000Z | 2022-03-31T06:06:48.000Z | sdk/python/pulumi_gcp/dataloss/prevention_deidentify_template.py | sisisin/pulumi-gcp | af6681d70ea457843409110c1324817fe55f68ad | [
"ECL-2.0",
"Apache-2.0"
] | 492 | 2018-06-22T19:41:03.000Z | 2022-03-31T15:33:53.000Z | sdk/python/pulumi_gcp/dataloss/prevention_deidentify_template.py | sisisin/pulumi-gcp | af6681d70ea457843409110c1324817fe55f68ad | [
"ECL-2.0",
"Apache-2.0"
] | 43 | 2018-06-19T01:43:13.000Z | 2022-03-23T22:43:37.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['PreventionDeidentifyTemplateArgs', 'PreventionDeidentifyTemplate']
@pulumi.input_type
class PreventionDeidentifyTemplateArgs:
def __init__(__self__, *,
deidentify_config: pulumi.Input['PreventionDeidentifyTemplateDeidentifyConfigArgs'],
parent: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a PreventionDeidentifyTemplate resource.
:param pulumi.Input['PreventionDeidentifyTemplateDeidentifyConfigArgs'] deidentify_config: Configuration of the deidentify template
Structure is documented below.
:param pulumi.Input[str] parent: The parent of the template in any of the following formats:
* `projects/{{project}}`
* `projects/{{project}}/locations/{{location}}`
* `organizations/{{organization_id}}`
* `organizations/{{organization_id}}/locations/{{location}}`
:param pulumi.Input[str] description: A description of the template.
:param pulumi.Input[str] display_name: User set display name of the template.
"""
pulumi.set(__self__, "deidentify_config", deidentify_config)
pulumi.set(__self__, "parent", parent)
if description is not None:
pulumi.set(__self__, "description", description)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
@property
@pulumi.getter(name="deidentifyConfig")
def deidentify_config(self) -> pulumi.Input['PreventionDeidentifyTemplateDeidentifyConfigArgs']:
"""
Configuration of the deidentify template
Structure is documented below.
"""
return pulumi.get(self, "deidentify_config")
@deidentify_config.setter
def deidentify_config(self, value: pulumi.Input['PreventionDeidentifyTemplateDeidentifyConfigArgs']):
pulumi.set(self, "deidentify_config", value)
@property
@pulumi.getter
def parent(self) -> pulumi.Input[str]:
"""
The parent of the template in any of the following formats:
* `projects/{{project}}`
* `projects/{{project}}/locations/{{location}}`
* `organizations/{{organization_id}}`
* `organizations/{{organization_id}}/locations/{{location}}`
"""
return pulumi.get(self, "parent")
@parent.setter
def parent(self, value: pulumi.Input[str]):
pulumi.set(self, "parent", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A description of the template.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
User set display name of the template.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@pulumi.input_type
class _PreventionDeidentifyTemplateState:
def __init__(__self__, *,
deidentify_config: Optional[pulumi.Input['PreventionDeidentifyTemplateDeidentifyConfigArgs']] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
parent: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering PreventionDeidentifyTemplate resources.
:param pulumi.Input['PreventionDeidentifyTemplateDeidentifyConfigArgs'] deidentify_config: Configuration of the deidentify template
Structure is documented below.
:param pulumi.Input[str] description: A description of the template.
:param pulumi.Input[str] display_name: User set display name of the template.
:param pulumi.Input[str] name: Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed at [https://cloud.google.com/dlp/docs/infotypes-reference](https://cloud.google.com/dlp/docs/infotypes-reference) when specifying a built-in type. When sending Cloud DLP results to Data Catalog, infoType names should conform to the pattern `[A-Za-z0-9$-_]{1,64}`.
:param pulumi.Input[str] parent: The parent of the template in any of the following formats:
* `projects/{{project}}`
* `projects/{{project}}/locations/{{location}}`
* `organizations/{{organization_id}}`
* `organizations/{{organization_id}}/locations/{{location}}`
"""
if deidentify_config is not None:
pulumi.set(__self__, "deidentify_config", deidentify_config)
if description is not None:
pulumi.set(__self__, "description", description)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if name is not None:
pulumi.set(__self__, "name", name)
if parent is not None:
pulumi.set(__self__, "parent", parent)
@property
@pulumi.getter(name="deidentifyConfig")
def deidentify_config(self) -> Optional[pulumi.Input['PreventionDeidentifyTemplateDeidentifyConfigArgs']]:
"""
Configuration of the deidentify template
Structure is documented below.
"""
return pulumi.get(self, "deidentify_config")
@deidentify_config.setter
def deidentify_config(self, value: Optional[pulumi.Input['PreventionDeidentifyTemplateDeidentifyConfigArgs']]):
pulumi.set(self, "deidentify_config", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A description of the template.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
User set display name of the template.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed at [https://cloud.google.com/dlp/docs/infotypes-reference](https://cloud.google.com/dlp/docs/infotypes-reference) when specifying a built-in type. When sending Cloud DLP results to Data Catalog, infoType names should conform to the pattern `[A-Za-z0-9$-_]{1,64}`.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def parent(self) -> Optional[pulumi.Input[str]]:
"""
The parent of the template in any of the following formats:
* `projects/{{project}}`
* `projects/{{project}}/locations/{{location}}`
* `organizations/{{organization_id}}`
* `organizations/{{organization_id}}/locations/{{location}}`
"""
return pulumi.get(self, "parent")
@parent.setter
def parent(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "parent", value)
class PreventionDeidentifyTemplate(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
deidentify_config: Optional[pulumi.Input[pulumi.InputType['PreventionDeidentifyTemplateDeidentifyConfigArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
parent: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Allows creation of templates to de-identify content.
To get more information about DeidentifyTemplate, see:
* [API documentation](https://cloud.google.com/dlp/docs/reference/rest/v2/projects.deidentifyTemplates)
* How-to Guides
* [Official Documentation](https://cloud.google.com/dlp/docs/concepts-templates)
## Example Usage
### Dlp Deidentify Template Basic
```python
import pulumi
import pulumi_gcp as gcp
basic = gcp.dataloss.PreventionDeidentifyTemplate("basic",
deidentify_config=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigArgs(
info_type_transformations=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsArgs(
transformations=[
gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationArgs(
info_types=[gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationInfoTypeArgs(
name="FIRST_NAME",
)],
primitive_transformation=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationArgs(
replace_with_info_type_config=True,
),
),
gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationArgs(
info_types=[
gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationInfoTypeArgs(
name="PHONE_NUMBER",
),
gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationInfoTypeArgs(
name="AGE",
),
],
primitive_transformation=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationArgs(
replace_config=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationReplaceConfigArgs(
new_value=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationReplaceConfigNewValueArgs(
integer_value=9,
),
),
),
),
gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationArgs(
info_types=[
gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationInfoTypeArgs(
name="EMAIL_ADDRESS",
),
gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationInfoTypeArgs(
name="LAST_NAME",
),
],
primitive_transformation=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationArgs(
character_mask_config=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCharacterMaskConfigArgs(
characters_to_ignore=[{
"commonCharactersToIgnore": "PUNCTUATION",
}],
masking_character="X",
number_to_mask=4,
reverse_order=True,
),
),
),
gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationArgs(
info_types=[gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationInfoTypeArgs(
name="DATE_OF_BIRTH",
)],
primitive_transformation=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationArgs(
replace_config=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationReplaceConfigArgs(
new_value=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationReplaceConfigNewValueArgs(
date_value=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationReplaceConfigNewValueDateValueArgs(
day=1,
month=1,
year=2020,
),
),
),
),
),
gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationArgs(
info_types=[gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationInfoTypeArgs(
name="CREDIT_CARD_NUMBER",
)],
primitive_transformation=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationArgs(
crypto_deterministic_config=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoDeterministicConfigArgs(
context=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoDeterministicConfigContextArgs(
name="sometweak",
),
crypto_key=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoDeterministicConfigCryptoKeyArgs(
transient=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransientArgs(
name="beep",
),
),
surrogate_info_type=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeArgs(
name="abc",
),
),
),
),
],
),
),
description="Description",
display_name="Displayname",
parent="projects/my-project-name")
```
## Import
DeidentifyTemplate can be imported using any of these accepted formats
```sh
$ pulumi import gcp:dataloss/preventionDeidentifyTemplate:PreventionDeidentifyTemplate default {{parent}}/deidentifyTemplates/{{name}}
```
```sh
$ pulumi import gcp:dataloss/preventionDeidentifyTemplate:PreventionDeidentifyTemplate default {{parent}}/{{name}}
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['PreventionDeidentifyTemplateDeidentifyConfigArgs']] deidentify_config: Configuration of the deidentify template
Structure is documented below.
:param pulumi.Input[str] description: A description of the template.
:param pulumi.Input[str] display_name: User set display name of the template.
:param pulumi.Input[str] parent: The parent of the template in any of the following formats:
* `projects/{{project}}`
* `projects/{{project}}/locations/{{location}}`
* `organizations/{{organization_id}}`
* `organizations/{{organization_id}}/locations/{{location}}`
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: PreventionDeidentifyTemplateArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Allows creation of templates to de-identify content.
To get more information about DeidentifyTemplate, see:
* [API documentation](https://cloud.google.com/dlp/docs/reference/rest/v2/projects.deidentifyTemplates)
* How-to Guides
* [Official Documentation](https://cloud.google.com/dlp/docs/concepts-templates)
## Example Usage
### Dlp Deidentify Template Basic
```python
import pulumi
import pulumi_gcp as gcp
basic = gcp.dataloss.PreventionDeidentifyTemplate("basic",
deidentify_config=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigArgs(
info_type_transformations=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsArgs(
transformations=[
gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationArgs(
info_types=[gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationInfoTypeArgs(
name="FIRST_NAME",
)],
primitive_transformation=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationArgs(
replace_with_info_type_config=True,
),
),
gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationArgs(
info_types=[
gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationInfoTypeArgs(
name="PHONE_NUMBER",
),
gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationInfoTypeArgs(
name="AGE",
),
],
primitive_transformation=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationArgs(
replace_config=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationReplaceConfigArgs(
new_value=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationReplaceConfigNewValueArgs(
integer_value=9,
),
),
),
),
gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationArgs(
info_types=[
gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationInfoTypeArgs(
name="EMAIL_ADDRESS",
),
gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationInfoTypeArgs(
name="LAST_NAME",
),
],
primitive_transformation=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationArgs(
character_mask_config=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCharacterMaskConfigArgs(
characters_to_ignore=[{
"commonCharactersToIgnore": "PUNCTUATION",
}],
masking_character="X",
number_to_mask=4,
reverse_order=True,
),
),
),
gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationArgs(
info_types=[gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationInfoTypeArgs(
name="DATE_OF_BIRTH",
)],
primitive_transformation=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationArgs(
replace_config=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationReplaceConfigArgs(
new_value=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationReplaceConfigNewValueArgs(
date_value=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationReplaceConfigNewValueDateValueArgs(
day=1,
month=1,
year=2020,
),
),
),
),
),
gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationArgs(
info_types=[gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationInfoTypeArgs(
name="CREDIT_CARD_NUMBER",
)],
primitive_transformation=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationArgs(
crypto_deterministic_config=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoDeterministicConfigArgs(
context=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoDeterministicConfigContextArgs(
name="sometweak",
),
crypto_key=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoDeterministicConfigCryptoKeyArgs(
transient=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoDeterministicConfigCryptoKeyTransientArgs(
name="beep",
),
),
surrogate_info_type=gcp.dataloss.PreventionDeidentifyTemplateDeidentifyConfigInfoTypeTransformationsTransformationPrimitiveTransformationCryptoDeterministicConfigSurrogateInfoTypeArgs(
name="abc",
),
),
),
),
],
),
),
description="Description",
display_name="Displayname",
parent="projects/my-project-name")
```
## Import
DeidentifyTemplate can be imported using any of these accepted formats
```sh
$ pulumi import gcp:dataloss/preventionDeidentifyTemplate:PreventionDeidentifyTemplate default {{parent}}/deidentifyTemplates/{{name}}
```
```sh
$ pulumi import gcp:dataloss/preventionDeidentifyTemplate:PreventionDeidentifyTemplate default {{parent}}/{{name}}
```
:param str resource_name: The name of the resource.
:param PreventionDeidentifyTemplateArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(PreventionDeidentifyTemplateArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
deidentify_config: Optional[pulumi.Input[pulumi.InputType['PreventionDeidentifyTemplateDeidentifyConfigArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
parent: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = PreventionDeidentifyTemplateArgs.__new__(PreventionDeidentifyTemplateArgs)
if deidentify_config is None and not opts.urn:
raise TypeError("Missing required property 'deidentify_config'")
__props__.__dict__["deidentify_config"] = deidentify_config
__props__.__dict__["description"] = description
__props__.__dict__["display_name"] = display_name
if parent is None and not opts.urn:
raise TypeError("Missing required property 'parent'")
__props__.__dict__["parent"] = parent
__props__.__dict__["name"] = None
super(PreventionDeidentifyTemplate, __self__).__init__(
'gcp:dataloss/preventionDeidentifyTemplate:PreventionDeidentifyTemplate',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
deidentify_config: Optional[pulumi.Input[pulumi.InputType['PreventionDeidentifyTemplateDeidentifyConfigArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
parent: Optional[pulumi.Input[str]] = None) -> 'PreventionDeidentifyTemplate':
"""
Get an existing PreventionDeidentifyTemplate resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['PreventionDeidentifyTemplateDeidentifyConfigArgs']] deidentify_config: Configuration of the deidentify template
Structure is documented below.
:param pulumi.Input[str] description: A description of the template.
:param pulumi.Input[str] display_name: User set display name of the template.
:param pulumi.Input[str] name: Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed at [https://cloud.google.com/dlp/docs/infotypes-reference](https://cloud.google.com/dlp/docs/infotypes-reference) when specifying a built-in type. When sending Cloud DLP results to Data Catalog, infoType names should conform to the pattern `[A-Za-z0-9$-_]{1,64}`.
:param pulumi.Input[str] parent: The parent of the template in any of the following formats:
* `projects/{{project}}`
* `projects/{{project}}/locations/{{location}}`
* `organizations/{{organization_id}}`
* `organizations/{{organization_id}}/locations/{{location}}`
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _PreventionDeidentifyTemplateState.__new__(_PreventionDeidentifyTemplateState)
__props__.__dict__["deidentify_config"] = deidentify_config
__props__.__dict__["description"] = description
__props__.__dict__["display_name"] = display_name
__props__.__dict__["name"] = name
__props__.__dict__["parent"] = parent
return PreventionDeidentifyTemplate(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="deidentifyConfig")
def deidentify_config(self) -> pulumi.Output['outputs.PreventionDeidentifyTemplateDeidentifyConfig']:
"""
Configuration of the deidentify template
Structure is documented below.
"""
return pulumi.get(self, "deidentify_config")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
A description of the template.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Output[Optional[str]]:
"""
User set display name of the template.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the information type. Either a name of your choosing when creating a CustomInfoType, or one of the names listed at [https://cloud.google.com/dlp/docs/infotypes-reference](https://cloud.google.com/dlp/docs/infotypes-reference) when specifying a built-in type. When sending Cloud DLP results to Data Catalog, infoType names should conform to the pattern `[A-Za-z0-9$-_]{1,64}`.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def parent(self) -> pulumi.Output[str]:
"""
The parent of the template in any of the following formats:
* `projects/{{project}}`
* `projects/{{project}}/locations/{{location}}`
* `organizations/{{organization_id}}`
* `organizations/{{organization_id}}/locations/{{location}}`
"""
return pulumi.get(self, "parent")
| 56.841216 | 422 | 0.624101 |
2874251d928931a6c2a13448e5d757c4351cb292 | 2,163 | py | Python | examples/quickstart.py | miketlk/omfgp | 6e5a0f52f2688d81bde3e5169a37311c9517fe1d | [
"MIT"
] | null | null | null | examples/quickstart.py | miketlk/omfgp | 6e5a0f52f2688d81bde3e5169a37311c9517fe1d | [
"MIT"
] | null | null | null | examples/quickstart.py | miketlk/omfgp | 6e5a0f52f2688d81bde3e5169a37311c9517fe1d | [
"MIT"
] | 1 | 2021-08-16T10:19:52.000Z | 2021-08-16T10:19:52.000Z | import sys
import omfgp as gp
import time
if gp.USES_USCARD:
import uscard
from machine import Pin
def get_default_reader():
"""Return default smart card reader."""
if gp.USES_USCARD:
return uscard.Reader(name="Smart card reader",
ifaceId=2,
ioPin=Pin.cpu.A2, clkPin=Pin.cpu.A4,
rstPin=Pin.cpu.G10, presPin=Pin.cpu.C2,
pwrPin=Pin.cpu.C5)
else:
return None
def card_status(card: gp.card.GPCard) -> list:
"""Display all kinds of smart card status information returning file list
:param card: instance of smart card interface
:return: list of load file AID
"""
isd_status = card.get_status(gp.StatusKind.ISD)
app_sd_status = card.get_status(gp.StatusKind.APP_SSD)
file_mod_status = card.get_status(gp.StatusKind.LOAD_FILES_MOD)
file_status = card.get_status(gp.StatusKind.LOAD_FILES)
print("\n=== ISD status ===\n", isd_status, "\n")
print("\n=== Apps and SDs ===\n", app_sd_status, "\n")
print("\n=== Load files & modules ===\n", file_mod_status, "\n")
print("\n=== Load files only ===\n", file_status, "\n")
return [s.aid for s in file_status]
def main(applet_file: str = 'examples/teapot_applet.ijc'):
# Loads applet to the card using first available reader and default keys
# If the applet already exists it is deleted prior to load
card = gp.card.GPCard(reader=get_default_reader(), debug=True)
try:
select_rsp = card.select()
print("SELECT response:", select_rsp)
card.open_secure_channel()
card_file_aid_list = card_status(card)
file = open(applet_file, 'rb')
applet = gp.applet.Applet.read_from(file)
if applet.package_aid in card_file_aid_list:
print("Deleting load file '%s' and related applets" %
applet.package_aid)
card.delete_object(applet.package_aid)
card.load_applet(applet, target_sd_aid=select_rsp.aid)
card_status(card)
finally:
card.disconnect()
if __name__ == '__main__':
main() | 32.283582 | 77 | 0.631993 |
28742e12e6739c290a95e278f025627ff9c82803 | 685 | py | Python | basic/myunittest/test_timeit.py | fplust/python3-cookbook | 0eaca2e3631bb69deaf466c32023bbb2093513da | [
"Apache-2.0"
] | 1 | 2019-07-25T09:09:54.000Z | 2019-07-25T09:09:54.000Z | basic/myunittest/test_timeit.py | fplust/python3-cookbook | 0eaca2e3631bb69deaf466c32023bbb2093513da | [
"Apache-2.0"
] | null | null | null | basic/myunittest/test_timeit.py | fplust/python3-cookbook | 0eaca2e3631bb69deaf466c32023bbb2093513da | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
Topic: 基准测试
Desc :
"""
from timeit import timeit
__author__ = 'Xiong Neng'
class Stock():
# 鼓励使用__slots__提升性能
__slots__ = ["name", "shares", "price"]
def __init__(self, name, shares, price):
self.name = name
self.shares = shares
self.price = price
def my_timeit():
cdeque = """
import collections
s = collections.deque()
"""
t1 = timeit("s.appendleft(37)", cdeque, number=100000)
t2 = timeit("s.insert(0, 37)",
"s=[]", number=100000)
print("t1=", t1)
print("t2=", t2)
pass
def main():
my_timeit()
if __name__ == '__main__':
main()
| 17.125 | 58 | 0.563504 |
28753e72046917448d518c1d4a909cdfe502ee60 | 7,276 | py | Python | tectosaur2/hmatrix/tree.py | tbenthompson/BIE_tutorials | 02cd56ab7e63e36afc4a10db17072076541aab77 | [
"MIT"
] | 15 | 2021-08-31T15:02:45.000Z | 2022-02-11T21:10:01.000Z | tectosaur2/hmatrix/tree.py | tbenthompson/BIE_tutorials | 02cd56ab7e63e36afc4a10db17072076541aab77 | [
"MIT"
] | 79 | 2021-08-29T15:35:39.000Z | 2022-03-25T14:56:42.000Z | tectosaur2/hmatrix/tree.py | tbenthompson/BIE_tutorials | 02cd56ab7e63e36afc4a10db17072076541aab77 | [
"MIT"
] | 3 | 2022-03-12T14:44:41.000Z | 2022-03-21T20:43:20.000Z | from dataclasses import dataclass
from typing import Optional
import matplotlib.pyplot as plt
import numpy as np
@dataclass()
class TreeNode:
idx_start: int
idx_end: int
center: np.ndarray
radius: float
is_leaf: bool
left: Optional["TreeNode"]
right: Optional["TreeNode"]
@dataclass()
class Tree:
ordered_idxs: np.ndarray
pts: np.ndarray
radii: np.ndarray
root: TreeNode
def build_tree(pts, radii, min_pts_per_box=10):
# The tree construction process receives three parameters:
#
# pts: the center of each element.
#
# radii: the radius of each element. Remember that we're dealing with spherical
# approximations to elements here instead of the triangular elements
# themselves.
#
# min_pts_per_box: this determines when we'll stop splitting. If a box has more
# than min_pts_per_box elements, we keep splitting.
# We'll start with the element indices in the order that they were given to this function.
# build_tree_node will re-order these indices at each step to enforce the rule that
# left child indices must be less than right child indices.
ordered_idxs = np.arange(pts.shape[0])
# The rest of the tree construction process will be handled by the recursive function:
# build_tree_node. The last two parameters are idx_start and idx_end. For the root of the
# tree, we pass the full set of elements: (0, pts.shape[0])
root = build_tree_node(pts, radii, min_pts_per_box, ordered_idxs, 0, pts.shape[0])
return Tree(ordered_idxs, pts, radii, root)
def build_tree_node(
all_pts, all_radii, min_pts_per_box, ordered_idxs, idx_start, idx_end
):
# 1) Collect the relevant element data.
# A view into the ordered_idxs array for the elements we're working on here.
idx_view = ordered_idxs[idx_start:idx_end]
# And the center and radius of each element.
pts = all_pts[idx_view]
radii = all_radii[idx_view]
# 2) Define the bounding box.
box_center = np.mean(pts, axis=0)
sep = pts - box_center[None, :]
box_axis_length = np.max(sep, axis=0)
box_radius = np.max(np.linalg.norm(sep, axis=1) + radii)
# 3) Build the node
# To start with, the left and right child are absent and is_leaf=True.
# If the node is not a leaf, we'll overwrite these below.
node = TreeNode(
idx_start, idx_end, box_center, box_radius, is_leaf=True, left=None, right=None
)
# 4) Return if the node is a leaf node.
# If there are fewer than min_pts_per_box elements in this node, then we do not split.
if idx_end - idx_start <= min_pts_per_box:
return node
# 5) If the node is not a leaf, split!
# First, find which axis of the box is longest
split_d = np.argmax(box_axis_length)
# Then identify which elements are on the left hand side of the box along that axis.
split_val = np.median(pts[:, split_d])
is_left = pts[:, split_d] < split_val
# 6) Re-arrange indices.
# Since we're going to re-arrange indices, we need to save the relevant indices first.
left_idxs = idx_view[np.where(is_left)[0]].copy()
right_idxs = idx_view[np.where(~is_left)[0]].copy()
n_left = left_idxs.shape[0]
# Then assign the left side indices to the beginning of our index block
idx_view[:n_left] = left_idxs
# And assign the right side indices to the end of our index block.
idx_view[n_left:] = right_idxs
# 7) Create children!
idx_split = idx_start + n_left
node.is_leaf = False
# We recursively call build_tree_node here. The key difference between the left and right
# sides is that the left receives the index block [idx_start, idx_split) and the right
# receives the index block [idx_split, idx_end). Thus, we've created a smaller, equivalent
# problem.
node.left = build_tree_node(
all_pts, all_radii, min_pts_per_box, ordered_idxs, idx_start, idx_split
)
node.right = build_tree_node(
all_pts, all_radii, min_pts_per_box, ordered_idxs, idx_split, idx_end
)
return node
def _traverse(obs_node, src_node, safety_factor, direct_list, approx_list):
dist = np.linalg.norm(obs_node.center - src_node.center)
if dist > safety_factor * (obs_node.radius + src_node.radius):
# We're far away, use an approximate interaction
approx_list.append((obs_node, src_node))
elif obs_node.is_leaf and src_node.is_leaf:
# If we get here, then we can't split the nodes anymore but they are
# still close. That means we need to use a exact interaction.
direct_list.append((obs_node, src_node))
else:
# We're close by, so we should recurse and use the child tree nodes.
# But which node should we recurse with?
split_src = (
(obs_node.radius < src_node.radius) and not src_node.is_leaf
) or obs_node.is_leaf
if split_src:
_traverse(obs_node, src_node.left, safety_factor, direct_list, approx_list)
_traverse(obs_node, src_node.right, safety_factor, direct_list, approx_list)
else:
_traverse(obs_node.left, src_node, safety_factor, direct_list, approx_list)
_traverse(obs_node.right, src_node, safety_factor, direct_list, approx_list)
def traverse(obs_node, src_node, safety_factor=1.5):
direct_list = []
approx_list = []
_traverse(obs_node, src_node, safety_factor, direct_list, approx_list)
return direct_list, approx_list
def check_tree(pts, radii, tree, node):
if node is None:
return True
idxs = tree.ordered_idxs[node.idx_start : node.idx_end]
dist = np.linalg.norm(pts[idxs] - node.center, axis=1) + radii[idxs]
if np.any(dist > node.radius):
return False
else:
return check_tree(pts, radii, tree, node.left) and check_tree(
pts, radii, tree, node.right
)
def plot_tree_level(node, depth, **kwargs):
if depth == 0:
circle = plt.Circle(tuple(node.center[:2]), node.radius, fill=False, **kwargs)
plt.gca().add_patch(circle)
if node.left is None or depth == 0:
return
else:
plot_tree_level(node.left, depth - 1, **kwargs)
plot_tree_level(node.right, depth - 1, **kwargs)
def plot_tree(tree):
plt.figure(figsize=(9, 9))
for depth in range(9):
plt.subplot(3, 3, 1 + depth)
plt.title(f"level = {depth}")
plot_tree_level(tree.root, depth, color="b", linewidth=0.5)
plt.xlim(
[
tree.root.center[0] - tree.root.radius,
tree.root.center[0] + tree.root.radius,
]
)
plt.ylim(
[
tree.root.center[1] - tree.root.radius,
tree.root.center[1] + tree.root.radius,
]
)
plt.tight_layout()
plt.show()
@dataclass()
class TempSurface:
# ideally, these arrays are all views into other arrays without copying.
pts: np.ndarray
normals: np.ndarray
quad_wts: np.ndarray
jacobians: np.ndarray
def build_temp_surface(surf, s, e):
return TempSurface(
surf.pts[s:e],
surf.normals[s:e],
surf.quad_wts[s:e],
surf.jacobians[s:e],
)
| 35.149758 | 94 | 0.665063 |
2875e4f861693b2c0256a550012c98712e49a11c | 644 | py | Python | 2017/day2/corruptionChecksum.py | madeleine-adams/advent_of_code_2020 | 8f142a91d1a40390aad274c5e0513f50b168d029 | [
"MIT"
] | null | null | null | 2017/day2/corruptionChecksum.py | madeleine-adams/advent_of_code_2020 | 8f142a91d1a40390aad274c5e0513f50b168d029 | [
"MIT"
] | null | null | null | 2017/day2/corruptionChecksum.py | madeleine-adams/advent_of_code_2020 | 8f142a91d1a40390aad274c5e0513f50b168d029 | [
"MIT"
] | null | null | null | file = open('corruptionChecksum_input.txt', 'r')
spreadsheet = file.readlines()
def find_smallest(inputs):
smallest = int(inputs[0])
for value in inputs:
number = int(value)
if number < smallest:
smallest = number
return smallest
def find_largest(inputs):
largest = int(inputs[0])
for value in inputs:
number = int(value)
if number > largest:
largest = number
return largest
checksum = 0
for line in spreadsheet:
cells = line.split()
minimum = find_smallest(cells)
maximum = find_largest(cells)
checksum += maximum - minimum
print(checksum)
| 20.774194 | 48 | 0.636646 |
287609c52314ac1d737c0937fa4d8b3058a4d68f | 3,579 | py | Python | neurst/cli/inspect_checkpoint.py | ishine/neurst | 2ba322393fcfed4261b33f4a657e12bbe321baaa | [
"Apache-2.0"
] | 208 | 2020-11-12T03:56:41.000Z | 2022-03-27T07:01:27.000Z | neurst/cli/inspect_checkpoint.py | ishine/neurst | 2ba322393fcfed4261b33f4a657e12bbe321baaa | [
"Apache-2.0"
] | 16 | 2021-02-20T07:57:03.000Z | 2022-01-27T07:36:31.000Z | neurst/cli/inspect_checkpoint.py | ishine/neurst | 2ba322393fcfed4261b33f4a657e12bbe321baaa | [
"Apache-2.0"
] | 33 | 2020-11-12T04:44:50.000Z | 2022-03-23T09:22:29.000Z | # Copyright 2020 ByteDance Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
import tensorflow as tf
from neurst.models.model_utils import _summary_model_variables
from neurst.utils.compat import wrapper_var_name
def cli_main():
structured = False
if "--structured" in sys.argv:
structured = True
sys.argv.remove("--structured")
if len(sys.argv) == 1 or (len(sys.argv) == 2 and (sys.argv[1] in ["help", "--help", "-h"])):
print("Usage: ")
print(" >> python3 -m neurst.cli.inspect_checkpoint modeldir_or_checkpoint (--structured)")
print(" List all variables and their shapes.")
print()
print(" >> python3 -m neurst.cli.inspect_checkpoint model_dir/checkpoint regular_expr")
print(" List the variables and their shapes if the name matches the `regular_expr`.")
print()
print(" >> python3 -m neurst.cli.inspect_checkpoint model_dir/checkpoint var_name")
print(" Print the variable tensor.")
exit()
model_dir = sys.argv[1]
var_name = None
if len(sys.argv) == 3:
var_name = sys.argv[2]
latest_ckpt_path = tf.train.latest_checkpoint(model_dir)
if not latest_ckpt_path:
latest_ckpt_path = model_dir
try:
varname_shape_list = tf.train.list_variables(latest_ckpt_path)
except (tf.errors.NotFoundError, ValueError, tf.errors.DataLossError):
print(f"ERROR: fail to load checkpoint from {model_dir}")
exit()
clean_varname2ckpt_varname = {
wrapper_var_name(varname): (varname, shape)
for varname, shape in varname_shape_list}
specify_varname = False
if var_name is not None:
clean_varname2ckpt_varname = {
wrapper_var_name(varname): (varname, shape)
for varname, shape in varname_shape_list}
if var_name in clean_varname2ckpt_varname:
specify_varname = True
print(f"Variable name: {var_name}")
print(f"Tensor Shape: {str(clean_varname2ckpt_varname[var_name][1])}")
print("Tensor Value: ")
print(tf.train.load_variable(latest_ckpt_path, clean_varname2ckpt_varname[var_name][0]))
if not specify_varname:
if not structured:
if var_name is None:
print("\tvariable name \t shape")
else:
print(f"\tvariable name ({var_name}) \t shape")
print_varname_shape_list = []
for clean_varname, (varname, shape) in clean_varname2ckpt_varname.items():
if varname in ["_CHECKPOINTABLE_OBJECT_GRAPH"]:
continue
if var_name is None or re.search(var_name, clean_varname):
print_varname_shape_list.append((clean_varname, shape))
if structured:
_summary_model_variables(print_varname_shape_list, print)
else:
for clean_varname, shape in print_varname_shape_list:
print(clean_varname + "\t" + str(shape))
if __name__ == "__main__":
cli_main()
| 41.137931 | 103 | 0.658843 |
28762253888319609860c7b7288acdb032a74ac2 | 1,621 | py | Python | homepage/migrations/0005_professor_test_data.py | oriAdler/ClassRater | a68492ea8eab1475ab604da9d6efc99c73954d4b | [
"MIT"
] | 1 | 2021-04-12T18:05:12.000Z | 2021-04-12T18:05:12.000Z | homepage/migrations/0005_professor_test_data.py | ellaml/ClassRater | d786f9fb4bb51041590e46165badf12a7beef67e | [
"MIT"
] | 103 | 2021-03-09T07:12:20.000Z | 2021-05-23T06:13:21.000Z | homepage/migrations/0005_professor_test_data.py | ellaml/ClassRater | d786f9fb4bb51041590e46165badf12a7beef67e | [
"MIT"
] | 17 | 2021-03-09T07:07:44.000Z | 2021-05-02T16:31:45.000Z | from django.db import migrations, transaction
class Migration(migrations.Migration):
dependencies = [
('homepage', '0001_initial'),
('homepage', '0003_course_test_data'),
('homepage', '0004_followed_courses_test_data'),
]
def generate_data(apps, schema_editor):
from homepage.models import Course
from homepage.models import Professor
from homepage.models import Professor_to_Course
professor_test_data = [
('Septima Vector'),
('Sybill Patricia Trelawney'),
('Bathsheda Babbling'),
]
professors = [Professor(name=data) for data in professor_test_data]
# 10221 - Grammatica in Arithmancy, Septima Vector
# 12357 - Numerology, Septima Vector
# 10231 - UnFogging the Future, Sybill Patricia Trelawney
# 10111 - Resonance in Runes and Signs, Bathsheda Babbling
pro_to_course_test_data = [
(professors[0], Course.objects.get(pk=10221)),
(professors[0], Course.objects.get(pk=12357)),
(professors[1], Course.objects.get(pk=10231)),
(professors[2], Course.objects.get(pk=10111)),
]
with transaction.atomic():
for professor in professors:
professor.save()
for professor, course_id in pro_to_course_test_data:
Professor_to_Course(
professor_id=professor,
course_id=course_id).save()
operations = [
migrations.RunPython(generate_data),
]
| 33.770833 | 76 | 0.595312 |
2876482aeef7877b4183338fac2e85b74a0eaedf | 1,220 | py | Python | scripts/hsvanalyzer.py | acmerobotics/relic-recovery | 4ff05bbf906829aef0a98bc32691e5d0eadc1d8f | [
"MIT"
] | 32 | 2018-01-17T03:00:02.000Z | 2022-01-15T18:30:48.000Z | scripts/hsvanalyzer.py | acmerobotics/relic-recovery | 4ff05bbf906829aef0a98bc32691e5d0eadc1d8f | [
"MIT"
] | 4 | 2017-10-21T20:28:27.000Z | 2018-04-02T05:27:00.000Z | scripts/hsvanalyzer.py | acmerobotics/relic-recovery | 4ff05bbf906829aef0a98bc32691e5d0eadc1d8f | [
"MIT"
] | 7 | 2018-02-21T00:59:20.000Z | 2021-01-21T21:52:17.000Z | import cv2
import numpy as np
from matplotlib import pyplot as plt
from util import resize_min_dim, smart_hsv_range
IMAGE_FILENAME = '/Users/ryanbrott/Desktop/36.jpg'
MIN_DIMENSION = 480
# LOWER_HSV, UPPER_HSV = (170, 80, 0), (7, 255, 255)
LOWER_HSV, UPPER_HSV = (175, 80, 80), (22, 255, 255)
image = cv2.imread(IMAGE_FILENAME)
image = resize_min_dim(image, MIN_DIMENSION)
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
mask = smart_hsv_range(hsv, LOWER_HSV, UPPER_HSV)
hue_hist = cv2.calcHist([hsv], [0], None, [180], [0, 180])
plt.gcf().canvas.set_window_title(IMAGE_FILENAME)
plt.subplot(2, 3, 1)
plt.plot(hue_hist)
plt.xlim([0, 180])
plt.title('Hue Histogram')
plt.subplot(2, 3, 2)
plt.imshow(hsv[:,:,0], cmap=plt.cm.binary)
plt.title('Hue')
plt.subplot(2, 3, 4)
plt.imshow(hsv[:,:,1], cmap=plt.cm.binary)
plt.title('Saturation')
plt.subplot(2, 3, 5)
plt.imshow(hsv[:,:,2], cmap=plt.cm.binary)
plt.title('Value')
plt.subplot(2, 3, 3)
plt.imshow(mask, cmap=plt.cm.binary)
plt.title('Mask')
mask_3c = np.zeros(image.shape, np.uint8)
for i in range(3):
mask_3c[:,:,i] = mask
plt.subplot(2, 3, 6)
plt.imshow(cv2.cvtColor(cv2.bitwise_and(image, mask_3c), cv2.COLOR_BGR2RGB))
plt.title('Image')
plt.show()
| 23.461538 | 76 | 0.705738 |
2876ee5a4ee75e47b6a9d9c1abc057001acf18bc | 1,114 | py | Python | FEniCSUI/AnalysesHub/models.py | nasserarbabi/FEniCSUI-dev | f8f161e1b49932843e01301212e7d031fff4f6c8 | [
"MIT"
] | null | null | null | FEniCSUI/AnalysesHub/models.py | nasserarbabi/FEniCSUI-dev | f8f161e1b49932843e01301212e7d031fff4f6c8 | [
"MIT"
] | 8 | 2021-03-10T21:59:52.000Z | 2021-09-22T19:12:57.000Z | FEniCSUI/AnalysesHub/models.py | nasserarbabi/FEniCSUI | f8f161e1b49932843e01301212e7d031fff4f6c8 | [
"MIT"
] | null | null | null | from django.db import models
from dashboard.models import projects
class AnalysisConfig(models.Model):
project = models.OneToOneField(
projects,
on_delete=models.CASCADE,
primary_key=True)
mesh = models.TextField()
visualizationMesh = models.TextField()
config = models.TextField()
result = models.TextField()
def __str__(self):
return self.project.name
class SolverResults(models.Model):
project = models.OneToOneField(
projects,
on_delete=models.CASCADE,
primary_key=True)
path = models.TextField()
def __str__(self):
return self.project.name
class SolverProgress(models.Model):
project = models.OneToOneField(
projects,
on_delete=models.CASCADE,
primary_key=True)
progress = models.TextField()
def __str__(self):
return self.project.name
class DockerLogs(models.Model):
project = models.OneToOneField(
projects,
on_delete=models.CASCADE,
primary_key=True)
log = models.TextField()
def __str__(self):
return self.project.name | 23.208333 | 42 | 0.672352 |
2877efb3076b7e16a9739c3098ef12ad38d235d3 | 1,924 | py | Python | code/pyto/util/test/_test_numpy_plus.py | anmartinezs/pyseg_system | 5bb07c7901062452a34b73f376057cabc15a13c3 | [
"Apache-2.0"
] | 12 | 2020-01-08T01:33:02.000Z | 2022-03-16T00:25:34.000Z | code/pyto/util/test/_test_numpy_plus.py | anmartinezs/pyseg_system | 5bb07c7901062452a34b73f376057cabc15a13c3 | [
"Apache-2.0"
] | 8 | 2019-12-19T19:34:56.000Z | 2022-03-10T10:11:28.000Z | code/pyto/util/test/_test_numpy_plus.py | anmartinezs/pyseg_system | 5bb07c7901062452a34b73f376057cabc15a13c3 | [
"Apache-2.0"
] | 2 | 2022-03-30T13:12:22.000Z | 2022-03-30T18:12:10.000Z | """
ToDo: convert to proper format
Tests for modules in this directory
"""
from __future__ import print_function
# Author: Vladan Lucic, last modified 05.04.07
import scipy
import scipy.ndimage
import numpy
import pyto.util.numpy_plus as np_plus
# define test arrays
aa = numpy.arange(12, dtype='int32')
aa = aa.reshape((3,4))
bb = numpy.arange(6, dtype='int32')
bb = bb.reshape((2,3))
def run():
print("Checking numpy_plus.intersect_arrays ...")
# run
print("\n\taa: ")
print(aa)
print("\tbb: ")
print(bb)
res = np_plus.intersect_arrays(aa.shape, bb.shape)
print("\n\tno offset: ")
print("\t", res)
print(aa[res[0]])
print(bb[res[1]])
offset_1 = (0,0)
offset_2 = (1,0)
res = np_plus.intersect_arrays(aa.shape, bb.shape,
offset_1=offset_1, offset_2=offset_2)
print("\n\toffset_1 = ", offset_1, ", offset_2 = ", offset_2, ":")
print("\t", res)
print(aa[res[0]])
print(bb[res[1]])
offset_1 = (0,0)
offset_2 = (0,2)
res = np_plus.intersect_arrays(aa.shape, bb.shape,
offset_1=offset_1, offset_2=offset_2)
print("\n\toffset_1 = ", offset_1, ", offset_2 = ", offset_2, ":")
print("\t", res)
print(aa[res[0]])
print(bb[res[1]])
offset_1 = (1,0)
offset_2 = (0,2)
res = np_plus.intersect_arrays(aa.shape, bb.shape,
offset_1=offset_1, offset_2=offset_2)
print("\n\toffset_1 = ", offset_1, ", offset_2 = ", offset_2, ":")
print("\t", res)
print(aa[res[0]])
print(bb[res[1]])
offset_1 = (2,2)
offset_2 = (1,4)
res = np_plus.intersect_arrays(aa.shape, bb.shape,
offset_1=offset_1, offset_2=offset_2)
print("\n\toffset_1 = ", offset_1, ", offset_2 = ", offset_2, ":")
print("\t", res)
print(aa[res[0]])
print(bb[res[1]])
| 26.722222 | 73 | 0.572245 |
2878821e6aef46d6ef7a165e5a576c9bd3a04754 | 922 | py | Python | GithubRepositoryStatistics-Python3/Repository.py | SmileZXLee/GithubRepositoryStatistics | 62eeddd715aecf268c48b39aa596f1168a3c2661 | [
"MIT"
] | 1 | 2020-07-15T14:12:53.000Z | 2020-07-15T14:12:53.000Z | GithubRepositoryStatistics-Python3/Repository.py | SmileZXLee/GithubRepositoryStatistics | 62eeddd715aecf268c48b39aa596f1168a3c2661 | [
"MIT"
] | null | null | null | GithubRepositoryStatistics-Python3/Repository.py | SmileZXLee/GithubRepositoryStatistics | 62eeddd715aecf268c48b39aa596f1168a3c2661 | [
"MIT"
] | null | null | null | #coding=utf-8
__author__ = 'zxlee'
__github__ = 'https://github.com/SmileZXLee/GithubRepositoryStatistics'
class Repository :
def __init__(self,brief,url,language,star_count,fork_count,star_url,fork_url,is_fork):
#仓库介绍
self.brief = brief.replace('\r','').replace('\n','').replace('\t','').strip()
#仓库url
self.url = url
#仓库所使用的编程语言
self.language = language
#star数量
if star_count == 'None':
self.star_count = '0'
else:
self.star_count = star_count
self.star_count = self.star_count.replace(',','')
#fork数量
if fork_count == 'None':
self.fork_count = '0'
else:
self.fork_count = fork_count
self.fork_count = self.fork_count.replace(',','')
#star成员页面
self.star_url = star_url
#fork成员页面
self.fork_url = fork_url
#是否是fork他人的项目
self.is_fork = is_fork
#仓库的标题
self.title = self.url.split('/')[-1]
| 26.342857 | 87 | 0.627983 |
287c433b713a1b08f3c14e17afb0adcebbc1cab6 | 3,242 | py | Python | src/www/__init__.py | jbrezmorf/codecritic | 190df65f2f12667469b55abed48a45de5dc18965 | [
"MIT"
] | null | null | null | src/www/__init__.py | jbrezmorf/codecritic | 190df65f2f12667469b55abed48a45de5dc18965 | [
"MIT"
] | 20 | 2019-05-26T12:13:19.000Z | 2020-09-09T16:37:09.000Z | src/www/__init__.py | jbrezmorf/codecritic | 190df65f2f12667469b55abed48a45de5dc18965 | [
"MIT"
] | 1 | 2020-04-13T09:02:48.000Z | 2020-04-13T09:02:48.000Z | #!/bin/python3
# author: Jan Hybs
import enum
import pathlib
from bson import objectid
from flask import Flask, redirect, session, render_template, url_for
import flask.json
from flask_cors import CORS
from loguru import logger
from entities.crates import ICrate
from env import Env
from functools import wraps
def default(obj):
return CustomJSONEncoder().default(obj)
class CustomJSONEncoder(flask.json.JSONEncoder):
def default(self, obj):
try:
from processing import ExecutorStatus
if isinstance(obj, ExecutorStatus):
return obj.str
if isinstance(obj, enum.Enum):
return obj.value
if isinstance(obj, (pathlib.Path, pathlib.PosixPath)):
return str(obj)
if isinstance(obj, objectid.ObjectId):
return str(obj)
if isinstance(obj, ICrate):
return obj.peek()
if hasattr(obj, 'peek'):
return obj.peek()
return flask.json.JSONEncoder.default(self, obj)
except:
logger.exception('encoder error')
return {}
def login_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if 'user' not in session:
if Env.backdoor:
logger.info('using backdoor for login')
return redirect(url_for('backdoor_login', id='root', role='root'))
else:
return redirect(Env.url_login)
return f(*args, **kwargs)
return decorated_function
def dump_error(f):
@wraps(f)
def decorated_function(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
import traceback, html
logger.exception('@dump_error')
return '<pre>%s</pre>' % html.escape(traceback.format_exc())
return decorated_function
def admin_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
from database.objects import User
try:
user = User(session['user'])
if not user.is_admin():
raise Exception('Access denied')
except:
return redirect(Env.url_login)
return f(*args, **kwargs)
return decorated_function
def render_template_base(**kwargs):
def render(template, **kw):
kw2 = kwargs.copy()
kw2.update(kw)
return render_template(template, **kw2)
return render
render_template_ext = render_template_base(Env=Env, version=Env.version)
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
app.root_path = Env.www
app.json_encoder = CustomJSONEncoder
cors = CORS(app)
# @app.context_processor
# def override_url_for():
# """
# Generate a new token on every request to prevent the browser from
# caching static files.
# """
# return dict(url_for=dated_url_for)
#
#
# def dated_url_for(endpoint, **values):
# if endpoint == 'static':
# filename = values.get('filename', None)
# if filename:
# file_path = os.path.join(app.root_path, endpoint, filename)
# values['q'] = int(os.stat(file_path).st_mtime)
# return url_for(endpoint, **values)
| 25.936 | 82 | 0.612585 |
287d78c0342acb7571ce49d00f17612456b0c4a2 | 18,740 | py | Python | pages/getAltData.py | ngocuong0105/ts_webapp | 4399862ead6eb2d0c993d36fffe14967984ad4b2 | [
"MIT"
] | null | null | null | pages/getAltData.py | ngocuong0105/ts_webapp | 4399862ead6eb2d0c993d36fffe14967984ad4b2 | [
"MIT"
] | null | null | null | pages/getAltData.py | ngocuong0105/ts_webapp | 4399862ead6eb2d0c993d36fffe14967984ad4b2 | [
"MIT"
] | null | null | null | import base64
from collections import deque
from io import BytesIO
import os
import time
from PIL import Image
import pandas as pd
from pandas.core.arrays import boolean
import praw
import requests
import streamlit as st
import datetime
import pytesseract
pytesseract.pytesseract.tesseract_cmd ='context/tesseract'
import tweepy
from framework.page import Page
from framework.utils import markdown_css, click_button
class getAltData(Page):
'''
This object represents the Get Alternative Data page.
Supports downloading data in csv fies from Reddit and Twitter.
'''
def __init__(self, title: str) -> None:
super().__init__(title)
self.white = '#ffffff'
def load_page(self) -> None:
'''
Mandatory load page method.
'''
self.show_title()
data_source = self.get_data_source()
options = ['🚀 Reddit', '🐦 Twitter']
d = dict(zip([i for i in range(len(options))],options))
if data_source == d[0]:
options = ['Posts', 'Memes', '']
reddit_data_type = st.selectbox('Select reddit data',options, index = 2)
if reddit_data_type =='Posts':
self.reddit_post_data()
elif reddit_data_type=='Memes':
self.reddit_meme_data()
elif data_source == d[1]:
self.twitter_data()
def twitter_data(self) -> None:
'''
This method controls the user flow on streamlit UI on streamlit for downloading Tweets in a csv file.
'''
if 'twitter_data' not in st.session_state:
input_for_twitter = self.twitter_input()
if click_button('Load Tweets'):
hashtag, num_tweets, start_twitter, end_twitter = input_for_twitter
df = self.scrape_tweets(hashtag, num_tweets, start_twitter, end_twitter)
st.session_state['twitter_data'] = df
st.experimental_rerun()
elif 'download' not in st.session_state:
df = st.session_state['twitter_data']
st.write(df)
if click_button('Download csv'):
csv = df.to_csv(index=False)
b64 = base64.b64encode(csv.encode()).decode() # some strings
linko= f'<a href="data:file/csv;base64,{b64}" download="twitter_data.csv">Click to download </a>'
st.markdown(linko, unsafe_allow_html=True)
if click_button('Finished'):
for key in st.session_state.keys():
del st.session_state[key]
st.experimental_rerun()
def reddit_post_data(self) -> None:
'''
This method controls the user flow on streamlit UI for downloading Reddit Posts in a csv file.
'''
if 'reddit_posts_data' not in st.session_state:
input_for_reddit = self.reddit_user_input_posts()
if click_button('Load Posts'):
df_comments = self.scrape_reddit_data(input_for_reddit)
st.session_state['reddit_posts_data'] = df_comments
st.experimental_rerun()
elif 'download' not in st.session_state:
df_comments = st.session_state['reddit_posts_data']
st.write(df_comments)
if click_button('Download csv'):
csv = df_comments.to_csv(index=False)
b64 = base64.b64encode(csv.encode()).decode()
linko= f'<a href="data:file/csv;base64,{b64}" download="reddit_comments.csv">Click to download </a>'
st.markdown(linko, unsafe_allow_html=True)
if click_button('Finished'):
for key in st.session_state.keys():
del st.session_state[key]
st.experimental_rerun()
def reddit_meme_data(self) -> None:
'''
This method controls the flow on streamlit UI for downloading Reddit Memes in a csv file.
'''
if 'reddit_memes_data' not in st.session_state:
input_for_reddit = self.reddit_user_input_memes()
if click_button('Load Memes'):
subreddit, num_memes, start_reddit, end_reddit = input_for_reddit
df_memes = self.scrape_reddit_memes(subreddit, num_memes, start_reddit, end_reddit)
st.session_state['reddit_memes_data'] = df_memes
st.experimental_rerun()
elif 'download' not in st.session_state:
df_memes = st.session_state['reddit_memes_data']
st.write(df_memes)
if click_button('Download csv'):
csv = df_memes.to_csv(index=False)
b64 = base64.b64encode(csv.encode()).decode()
linko= f'<a href="data:file/csv;base64,{b64}" download="reddit_memes.csv">Click to download </a>'
st.markdown(linko, unsafe_allow_html=True)
if click_button('Finished'):
for key in st.session_state.keys():
del st.session_state[key]
st.experimental_rerun()
def reddit_user_input_posts(self) -> tuple:
'''
Takes user input for downloading reddit posts data.
'''
# select subreddit
options = ['r/wallstreetbets', 'r/stocks', 'r/pennystocks', 'r/robinhood', 'r/GME', 'other']
subreddit = st.selectbox('Select your favourite subreddit',options, index = 0)
if subreddit == 'other':
subreddit = st.text_input('Choose subreddit (e.g r/subRedditName, subRedditName)')
subreddit = subreddit.strip() # remove trailing spaces
# select start-end dates
start_date = datetime.date.today() - datetime.timedelta(days=30)
end_date = start_date + datetime.timedelta(days=29)
start_reddit = pd.to_datetime(st.text_input('Select start date for reddit post', f'{start_date}'))
txt = f'Select end date for reddit post.'
end_reddit = pd.to_datetime(st.text_input(txt, f'{end_date}'))
# select number of submissions,comments, level
num_subs,num_comments,max_level = '','',''
num_subs = st.number_input('Select number of hottest posts to download (recommended 10-100)', value = 20, step=1)
if num_subs != '':
num_subs = int(num_subs)
num_comments = st.number_input("Select number of comments with most upvotes to download (recommended 50-500)", value = 100, step=1)
if num_comments != '':
num_comments = int(num_comments)
txt = 'Each reddit post has comments and replies to comments.\
Replies can have their own replies and so on. This defines a tree of comments and replies.\
We will use Breadth First Traversal to scrape all comments and replies. Define level of tree such that\
level 0 considers only comments to the post, then\
level 1 considers replies to comments, etc.'
markdown_css(txt,12,self.white)
max_level = st.number_input("Select maximum level of comments tree (recommended 1-10)", value = 5, step = 1)
if max_level != '':
max_level = int(max_level)
if num_subs == '':
st.write('Please choose number of hottest posts.')
return '','','','',''
elif num_comments == '':
st.write('Please choose number of comments.')
return '','','','',''
elif max_level == '':
st.write('Please choose level.')
return '','','','',''
return subreddit, start_reddit, end_reddit, num_subs, num_comments, max_level
def reddit_user_input_memes(self) -> tuple:
'''
This method taked user input for downloading Reddit memes.
'''
options = ['r/wallstreetbets', 'r/stocks', 'r/pennystocks', 'r/robinhood', 'r/GME', 'other']
subreddit = st.selectbox('Select your favourite subreddit',options, index = 0)
if subreddit == 'other':
subreddit = st.text_input('Choose subreddit (e.g r/subRedditName, subRedditName)')
subreddit = subreddit.strip() # remove trailing spaces
if subreddit[:2]=='r/':
subreddit = subreddit[2:]
num_memes = st.number_input('Select number of memes you want to consider', value = 50)
# select start-end dates
start_date = datetime.date.today() - datetime.timedelta(days=30)
end_date = start_date + datetime.timedelta(days=29)
start_reddit = pd.to_datetime(st.text_input('Select start date for reddit post', f'{start_date}'))
txt = f'Select end date for reddit post.'
end_reddit = pd.to_datetime(st.text_input(txt, f'{end_date}'))
return subreddit, num_memes, start_reddit, end_reddit
def scrape_reddit_data(self, user_input:tuple) -> pd.DataFrame:
'''
Scrapes reddit posts and returns them in the form of a dataframe.
Gives date, author, text and upvotes of hottest posts, comments and replies.
Comments have replies which on their own have replies. This defines a comments/replies
tree. We have used BFS to traverse all comments and stored them in the dataframe.
'''
subreddit, start, end, num_subs, num_comments, max_level = user_input
# reddit object
reddit = praw.Reddit(
client_id=st.secrets["client_id"],
client_secret=st.secrets["client_secret"],
user_agent=st.secrets["user_agent"],
username=st.secrets["username"],
password=st.secrets["password"],
)
if subreddit[:2]=='r/':
subreddit = subreddit[2:]
r_subreddit = reddit.subreddit(subreddit)
sub_placeholder = st.empty()
com_placeholder = st.empty()
bar = st.progress(0)
i = 0
comments = []
submissions = []
denom = num_subs+1
for r_submission in r_subreddit.hot(limit=None):
if not self._within_time_interval(r_submission,start,end):
continue
i += 1
bar.progress(i/denom)
if num_subs == 0:
break
submissions.append(r_submission)
num_subs -= 1
sub_placeholder.text(f'Scraping post: "{r_submission.title}"')
# handle More Comments tabs
r_submission.comments.replace_more(limit=0)
# seed level for BFS
seed = []
for comment in r_submission.comments[:]:
if not self._within_time_interval(comment,start,end):
continue
seed.append((comment,0))
if len(seed)==0:
continue
# sort comments in submission by score
seed.sort(key = lambda x: x[0].score, reverse = True)
# print top comment in reddit submission
com_placeholder.text(f'Top comment in post: {seed[0][0].body}')
# BFS to scrape through comments and replies
queue = deque(seed[:num_comments]) # take only top comments
level = 0
while queue:
comment,level = queue.popleft()
if not self._within_time_interval(comment,start,end):
continue
if level<=max_level:
comments.append(comment)
queue.extend([(com,level+1) for com in comment.replies])
comments = comments[1:]
comments_formatted =[]
for com in comments:
date = datetime.datetime.utcfromtimestamp(com.created_utc).strftime('%Y-%m-%d %H:%M:%S')
if com.author:
comments_formatted.append((date,com.author.name,com.body,com.score))
else:
comments_formatted.append((date,None,com.body,com.score))
bar.progress(1.00)
sub_placeholder.empty()
com_placeholder.empty()
df_comments = pd.DataFrame(comments_formatted)
df_comments.columns = ['Date','Author','Text','Upvotes']
return df_comments
def scrape_reddit_memes(self, subreddit:str, num_memes:int, start_reddit:str, end_reddit:str) -> pd.DataFrame:
'''
Scrapes reddit memes and returns them into a dataframe. Meme themselves are stored as urls.
Direct image download (e.g .png files) is avoided due to memory contraints.
Text of memes is taken out using tesseract.
'''
# reddit object
reddit = praw.Reddit(
client_id=st.secrets["client_id"],
client_secret=st.secrets["client_secret"],
user_agent=st.secrets["user_agent"],
username=st.secrets["username"],
password=st.secrets["password"],
)
subreddit = reddit.subreddit(subreddit)
allowed_image_extensions = ['.jpg', '.jpeg', '.png']
passed = 0
memes = []
post_placeholder = st.empty()
bar = st.progress(0)
image_placeholder = st.empty()
start = pd.to_datetime(start_reddit)
end = pd.to_datetime(end_reddit)
s = time.time()
for post in subreddit.hot(limit=None):
url = post.url
_,ext = os.path.splitext(url)
if ext in allowed_image_extensions and self._within_time_interval(post,start,end):
response = requests.get(url)
img = Image.open(BytesIO(response.content))
self._show_image(url)
meme_txt = pytesseract.image_to_string(img)
title = post.title
date = datetime.datetime.utcfromtimestamp(post.created_utc).strftime('%Y-%m-%d %H:%M:%S')
memes.append((date,meme_txt,title,url))
passed+=1
post_placeholder.text(f'Loading meme at {url}')
bar.progress(passed/num_memes)
image_placeholder.image(f'{url}',use_column_width = 'auto')
if passed>=num_memes:
break
e = time.time()
bar.progress(1.0)
if passed<num_memes:
txt = f'In this subreddit we found only {passed} memes/pictures for the selected scraping period.'
self._markdown_css(txt,self.text_size,self.white)
post_placeholder.text(f'Memes/pictures scraped in {round(e-s,4)} seconds. They are hot out of the oven and ready to download!')
df_memes = pd.DataFrame(memes)
df_memes.columns = ['Date','Meme Text', 'Post Title', 'URL']
return df_memes
def get_data_source(self) -> str:
'''
Input for selected data source
'''
st.subheader('Select Data Source')
options = ['🚀 Reddit', '🐦 Twitter', '']
data_source = st.selectbox('Select source',options, index = len(options)-1)
return data_source
def twitter_input(self) -> tuple:
'''
User input for getting twitter data.
'''
options = ['#GME','#trump','#StockMarket','#bitcoin','#crypto', 'other']
hashtag = st.selectbox('Select popular hashtag',options, index = 0)
if hashtag == 'other':
hashtag = st.text_input('Choose hashtag (e.g #hashtag)')
hashtag = hashtag.strip() # remove trailing spaces
num_tweets = st.number_input('Select number of tweets you want to dowload', value = 100)
# select start-end dates
start_date = datetime.date.today() - datetime.timedelta(days=5)
end_date = start_date + datetime.timedelta(days=1)
txt = 'Select start date for first tweet'
start_tweet = st.text_input(txt, f'{start_date}')
txt = f'Select end date for last tweet post.'
end_tweet = st.text_input(txt, f'{end_date}')
start_tweet = ''.join(start_tweet.split('-'))+'0000'
end_tweet = ''.join(end_tweet.split('-'))+'0000'
return hashtag, num_tweets, start_tweet, end_tweet
def scrape_tweets(self, hashtag:str, num_tweets:int, start_twitter:str, end_twitter:str) -> pd.DataFrame:
'''
Scrape Tweets for selected hashtag.
Note there is a user limit for fetches - 10000 tweets per month.
'''
text, user_name, media, date, tags = [],[],[],[],[]
auth = tweepy.OAuthHandler(st.secrets["consumer_key"],st.secrets['consumer_secret'])
auth.set_access_token(st.secrets['access_token_key'], st.secrets['access_token_secret'])
api = tweepy.API(auth,wait_on_rate_limit=True)
tweet_placeholder = st.empty()
bar = st.progress(0)
passed = 0
for status in tweepy.Cursor(api.search_full_archive,'prod', hashtag,
fromDate=start_twitter, toDate=end_twitter).items(num_tweets):
dt = status.created_at.strftime('%Y-%m-%d %H:%M:%S')
date.append(dt)
if status.truncated:
txt = status.extended_tweet['full_text']
text.append(txt)
else:
txt = status.text
text.append(txt)
user_name.append(status.user.screen_name)
ls_tags = [d['text'] for d in status.entities['hashtags']]
tags.append(', '.join(ls_tags))
if status.entities.get('media'):
media.append(status.entities.get('media')[0]['media_url'])
else:
media.append('NA')
passed += 1
tweet_placeholder.text(f'Loading Tweet: {txt}')
bar.progress(passed/num_tweets)
df = pd.DataFrame()
df['date'] = date
df['hashtags'] = tags
df['user_name'] = user_name
df['text'] = text
df['media'] = media
return df
def _within_time_interval(self, reddit_obj: praw.models, start:datetime, end: datetime) -> boolean:
'''
Check whether a reddit object such as submission, comment or reply
is between start and end timestamps.
'''
utc_time = datetime.datetime.utcfromtimestamp(reddit_obj.created_utc).strftime('%Y-%m-%d')
datetime_time = pd.to_datetime(utc_time)
if datetime_time < start or datetime_time > end:
return False
return True
def _show_image(self, png_url:str, placeholder:bool = False) -> None:
'''
Display image on the page. We use it for displaying memes.
'''
page_bg_img = f'<style>body {{background-image: url("{png_url}");background-size: 12px;}}</style>'
self.placeholder = st.empty()
if placeholder:
self.placeholder.markdown(page_bg_img, unsafe_allow_html=True)
else:
st.markdown(page_bg_img, unsafe_allow_html=True) | 44.513064 | 143 | 0.596265 |
287d866f9124af9905e3876a7fc982e255ffcb59 | 157 | py | Python | npt/pipelines/__init__.py | chbrandt/npt | 7d58db9987c8f4d93c4e61e1fc98cce38733d06e | [
"MIT"
] | null | null | null | npt/pipelines/__init__.py | chbrandt/npt | 7d58db9987c8f4d93c4e61e1fc98cce38733d06e | [
"MIT"
] | 2 | 2022-02-18T16:38:13.000Z | 2022-02-18T16:56:33.000Z | npt/pipelines/__init__.py | chbrandt/npt | 7d58db9987c8f4d93c4e61e1fc98cce38733d06e | [
"MIT"
] | 1 | 2022-03-15T09:03:51.000Z | 2022-03-15T09:03:51.000Z | from npt import log
from . import search as Search
from . import download as Download
from . import processing as Processing
from . import mosaic as Mosaic
| 22.428571 | 38 | 0.789809 |
287e24f0cb26f8666ac36c1761cfbf6bd7afe7ac | 252 | py | Python | PythonFiles/classes2.py | IamVaibhavsar/Python_Files | 283d73929a3e11955c71499407c4f8bff56e4273 | [
"MIT"
] | null | null | null | PythonFiles/classes2.py | IamVaibhavsar/Python_Files | 283d73929a3e11955c71499407c4f8bff56e4273 | [
"MIT"
] | null | null | null | PythonFiles/classes2.py | IamVaibhavsar/Python_Files | 283d73929a3e11955c71499407c4f8bff56e4273 | [
"MIT"
] | 1 | 2019-07-26T15:25:21.000Z | 2019-07-26T15:25:21.000Z | class Student:
def __init__(self,name,major,gpa):
self.name=name
self.major=major
self.gpa=gpa
def on_honor_roll (self):
if self.gpa>=8.5:
return True
else:
return False | 22.909091 | 39 | 0.519841 |
287f3fa6bbdc5def4722251d903f7d2865df6fbb | 324 | py | Python | config.py | Shutey/ShuteyBot2.0 | 16f0baf6e8725bb452cac06fa60d6db023212f6c | [
"MIT"
] | 2 | 2020-04-23T00:52:06.000Z | 2020-04-23T00:56:24.000Z | config.py | Shutey/ShuteyBot2.0 | 16f0baf6e8725bb452cac06fa60d6db023212f6c | [
"MIT"
] | null | null | null | config.py | Shutey/ShuteyBot2.0 | 16f0baf6e8725bb452cac06fa60d6db023212f6c | [
"MIT"
] | null | null | null | token = 'Mzc5NTc3MDc1NTU3NzI4MjU2.DXixQA.DLLB8b81nSyB1IGNJ6WeEeukAQU' #Put Your bots token here
prefix = '^^' #put prefix here
link = 'https://discordapp.com/oauth2/authorize?client_id=379577075557728256&scope=bot&permissions=134659080' #put bot invite link here
ownerid = '227860415709708288' #put your id here
| 36 | 136 | 0.774691 |
28815fa52327ea951a0284d87cd467c55924b39e | 761 | py | Python | 540. Single Element in a Sorted Array.py | Into-Y0u/Github-Baby | 5e4e6b02f49c2c99533289be9d49911006cad919 | [
"MIT"
] | 2 | 2022-01-25T04:30:26.000Z | 2022-01-25T10:36:15.000Z | 540. Single Element in a Sorted Array.py | Into-Y0u/Leetcode-Baby | 681ad4df01ee908f76d888aa4ccc10f04c03c56f | [
"MIT"
] | null | null | null | 540. Single Element in a Sorted Array.py | Into-Y0u/Leetcode-Baby | 681ad4df01ee908f76d888aa4ccc10f04c03c56f | [
"MIT"
] | null | null | null | class Solution:
def singleNonDuplicate(self, nums: List[int]) -> int:
if len(nums) == 1:
return nums[0]
start = 0
end = len(nums) - 1
while start <= end:
mid = start + ((end - start) // 2)
# Check if mid is the single number
if start < mid < end and nums[mid-1] < nums[mid] < nums[mid+1]:
return nums[mid]
else:
# Else goto the second index of nums[mid](duplicate) if already not there.
if mid < end and nums[mid] == nums[mid+1]:
mid += 1
if (mid - start + 1) % 2:
end = mid - 2
else:
start = mid + 1
return nums[start]
| 34.590909 | 90 | 0.442838 |
2881d51b1365029af80a4c7b248cb3bb598a7958 | 3,466 | py | Python | frispy/throw_data.py | carrino/FrisPy | db9e59f465ee25d1c037d580c37da8f35b930b50 | [
"MIT"
] | null | null | null | frispy/throw_data.py | carrino/FrisPy | db9e59f465ee25d1c037d580c37da8f35b930b50 | [
"MIT"
] | null | null | null | frispy/throw_data.py | carrino/FrisPy | db9e59f465ee25d1c037d580c37da8f35b930b50 | [
"MIT"
] | null | null | null | # Copyright (c) 2021 John Carrino
import struct
from dataclasses import dataclass
import numpy as np
from scipy.spatial.transform import Rotation
@dataclass
class ThrowData:
NUM_POINTS = 2000
SENSORS_GRAVITY_STANDARD = 9.80665
SENSORS_DPS_TO_RADS = 0.017453293
OUTPUT_SCALE_FACTOR_400G = (SENSORS_GRAVITY_STANDARD * 400 / ((1 << 15) - 1))
OUTPUT_SCALE_FACTOR_32G = (SENSORS_GRAVITY_STANDARD * 32 / ((1 << 15) - 1))
OUTPUT_SCALE_FACTOR_4000DPS = (SENSORS_DPS_TO_RADS * 4000 / ((1 << 15) - 1))
version: int
durationMicros: list[int]
accel0: list[np.ndarray]
gyros: list[np.ndarray]
accel1: list[np.ndarray]
accel2: list[np.ndarray]
endQ: Rotation
temperature: float
type: int
def getStartingRotation(self) -> Rotation:
q: Rotation = self.endQ
for i in reversed(range(ThrowData.NUM_POINTS)):
duration: float = self.durationMicros[i] / 1_000_000.0
delta = self.gyros[i] * duration
rot: Rotation = Rotation.from_euler('XYZ', [delta[0], delta[1], delta[2]])
q = q * rot.inv()
return q
@staticmethod
def readUnsignedShort(f) -> int:
buf = f.read(2)
return struct.unpack('<H', buf)[0]
@staticmethod
def readShort(f) -> int:
buf = f.read(2)
return struct.unpack('<h', buf)[0]
@staticmethod
def readFloat(f) -> float:
buf = f.read(4)
return struct.unpack('<f', buf)[0]
@staticmethod
def readVector(f, scaleFactor: float) -> np.ndarray:
x = ThrowData.readShort(f) * scaleFactor
y = ThrowData.readShort(f) * scaleFactor
z = ThrowData.readShort(f) * scaleFactor
return np.array([x, y, z])
@staticmethod
def readFile(fileName: str):
with open(fileName, "rb") as f:
return ThrowData.readFromFile(f)
@staticmethod
def readFromFile(f):
numPoints = ThrowData.NUM_POINTS
version = ThrowData.readUnsignedShort(f)
startIndex = ThrowData.readUnsignedShort(f)
lastIndex = ThrowData.readUnsignedShort(f)
type = ThrowData.readUnsignedShort(f)
durationMicros = [None] * numPoints
for i in range(numPoints):
durationMicros[(i - startIndex + numPoints) % numPoints] = ThrowData.readUnsignedShort(f)
accel0 = [None] * numPoints
gyros = [None] * numPoints
accel1 = [None] * numPoints
accel2 = [None] * numPoints
for i in range(numPoints):
accel0[(i - startIndex + numPoints) % numPoints] = ThrowData.readVector(f, ThrowData.OUTPUT_SCALE_FACTOR_32G)
for i in range(numPoints):
gyros[(i - startIndex + numPoints) % numPoints] = ThrowData.readVector(f, ThrowData.OUTPUT_SCALE_FACTOR_4000DPS)
for i in range(numPoints):
accel1[(i - startIndex + numPoints) % numPoints] = ThrowData.readVector(f, ThrowData.OUTPUT_SCALE_FACTOR_400G)
for i in range(numPoints):
accel2[(i - startIndex + numPoints) % numPoints] = ThrowData.readVector(f, ThrowData.OUTPUT_SCALE_FACTOR_400G)
qw = ThrowData.readFloat(f)
qx = ThrowData.readFloat(f)
qy = ThrowData.readFloat(f)
qz = ThrowData.readFloat(f)
rotation: Rotation = Rotation.from_quat([qx, qy, qz, qw])
temp = ThrowData.readFloat(f)
return ThrowData(version, durationMicros, accel0, gyros, accel1, accel2, rotation, temp, type)
| 35.731959 | 124 | 0.638777 |
288275f551eb96263ac0a6a9893bc8305effff9d | 7,754 | py | Python | test/test_hankelutils.py | hlatkydavid/vnmrjpy | 48707a1000dc87e646e37c8bd686e695bd31a61e | [
"MIT"
] | null | null | null | test/test_hankelutils.py | hlatkydavid/vnmrjpy | 48707a1000dc87e646e37c8bd686e695bd31a61e | [
"MIT"
] | null | null | null | test/test_hankelutils.py | hlatkydavid/vnmrjpy | 48707a1000dc87e646e37c8bd686e695bd31a61e | [
"MIT"
] | null | null | null | import unittest
import vnmrjpy as vj
import numpy as np
import matplotlib.pyplot as plt
import time
#import cupy as cp
RP={'rcvrs':4,'filter_size':(11,7),'virtualcoilboost':False}
PLOTTING = False
class Test_hankelutils(unittest.TestCase):
def test_lvl2_hankel_average(self):
#rp={'rcvrs':4,'filter_size':(11,7),'virtualcoilboost':False,\
# 'recontype':'k-t','fiber_shape':(4,128,21)}
rp={'rcvrs':4,'filter_size':(21,21),'virtualcoilboost':False,\
'recontype':'kx-ky','fiber_shape':(4,192,192)}
stage = 0
(p,q) = rp['filter_size']
(rcvrs,m,n) = rp['fiber_shape']
fiber = np.random.rand(*rp['fiber_shape'])
hankel = vj.aloha.construct_hankel(fiber,rp)
noise = np.random.rand(*hankel.shape)
hankel_noisy = hankel+noise
# average merthod old:
start_old = time.time()
fiber_avg = vj.aloha.deconstruct_hankel(hankel_noisy,stage,rp)
hankel_avg1 = vj.aloha.construct_hankel(fiber_avg,rp)
end_old = time.time()
# new average method
start_new = time.time()
hankel_avg2 = vj.aloha.lvl2_hankel_average(hankel_noisy\
,rp['filter_size'],fiber.shape)
end_new = time.time()
print('old avg time: {}'.format(end_old-start_old))
print('new avg time: {}'.format(end_new-start_new))
plt.subplot(1,4,1)
plt.imshow(np.real(hankel))
plt.title('orig')
plt.subplot(1,4,2)
plt.imshow(np.real(hankel_avg1))
plt.title('old avg')
plt.subplot(1,4,3)
plt.imshow(np.real(hankel_avg2))
plt.title('new avg')
plt.subplot(1,4,4)
plt.imshow(np.real(hankel_avg2-hankel_avg1))
plt.title('difference')
plt.show()
# this wont be implemented probably
"""
def test_lvl2_hankel_weights(self):
rp={'rcvrs':4,'filter_size':(11,7),'virtualcoilboost':False,\
'recontype':'k-t','fiber_shape':(4,32,21)}
filter_shape = (11,7)
fiber_shape = (4,32,21)
a = vj.aloha.lvl2_hankel_weights(filter_shape,fiber_shape)
"""
# this works, but is really slow
"""
def test_average_hankel(self):
rp={'rcvrs':4,'filter_size':(11,7),'virtualcoilboost':False,\
'recontype':'k-t','fiber_shape':(4,128,21),'stages':3}
stage = 0
hankel = np.random.rand(1770,308)
start = time.time()
hankel_avg = vj.aloha.average_hankel(hankel,stage,rp)
print('hankel avg small : {}'.format(time.time()-start))
rp={'rcvrs':4,'filter_size':(21,21),'virtualcoilboost':False,
'recontype':'kx-ky','fiber_shape':(4,192,192),'stages':3}
hankel = np.random.rand(29584,1764)
start = time.time()
hankel = vj.aloha.average_hankel(hankel,stage,rp)
end = time.time()
self.assertEqual(hankel.shape,(29584,1764))
print('average big hankel time : {}'.format(end-start))
"""
#--------------------------STANDARD TESTS----------------------------------
def test_construct_hankel_2d(self):
# this is the old one for time comparison
rp={'rcvrs':4,'filter_size':(11,7),'virtualcoilboost':False}
indata = np.random.rand(4,128,21)
start = time.time()
hankel = vj.aloha.construct_hankel_2d(indata,rp)
end = time.time()
self.assertEqual(hankel.shape,(1770,308))
print('Construct_2d small hankel time : {}'.format(end-start))
# test bigger kx ky
rp={'rcvrs':4,'filter_size':(21,21),'virtualcoilboost':False}
indata = np.random.rand(4,192,192)
start = time.time()
hankel = vj.aloha.construct_hankel_2d(indata,rp)
end = time.time()
self.assertEqual(hankel.shape,(29584,1764))
print('Construct_2d big hankel time : {}'.format(end-start))
def test_construct_hankel(self):
rp={'rcvrs':4,'filter_size':(11,7),'virtualcoilboost':False}
indata = np.random.rand(4,128,21)
start = time.time()
hankel = vj.aloha.construct_hankel(indata,rp)
end = time.time()
self.assertEqual(hankel.shape,(1770,308))
print('Construct small hankel time : {}'.format(end-start))
# test bigger kx ky
rp={'rcvrs':4,'filter_size':(21,21),'virtualcoilboost':False}
indata = np.random.rand(4,192,192)
start = time.time()
hankel = vj.aloha.construct_hankel(indata,rp)
end = time.time()
self.assertEqual(hankel.shape,(29584,1764))
print('Construct big hankel time : {}'.format(end-start))
def test_deconstruct_hankel(self):
rp={'rcvrs':4,'filter_size':(11,7),'virtualcoilboost':False,\
'recontype':'k-t','fiber_shape':(4,128,21)}
hankel = np.random.rand(1770,308)
stage = 0
start = time.time()
nd_data = vj.aloha.deconstruct_hankel(hankel, stage, rp)
end = time.time()
self.assertEqual(nd_data.shape,(4,128,21))
print('Deconstruct small hankel time : {}'.format(end-start))
rp={'rcvrs':4,'filter_size':(21,21),'virtualcoilboost':False,\
'recontype':'k-t','fiber_shape':(4,192,192)}
hankel = np.random.rand(29584,1764)
stage = 0
start = time.time()
nd_data = vj.aloha.deconstruct_hankel(hankel, stage, rp)
end = time.time()
self.assertEqual(nd_data.shape,(4,192,192))
print('Deconstruct big hankel time : {}'.format(end-start))
def test_make_kspace_weights(self):
rp={'rcvrs':4,'filter_size':(11,7),'virtualcoilboost':False,\
'recontype':'k-t','fiber_shape':(4,128,21),'stages':3}
weights = vj.aloha.make_kspace_weights(rp)
self.assertEqual(weights[1].shape,(4,64,21))
rp={'rcvrs':4,'filter_size':(11,7),'virtualcoilboost':True,\
'recontype':'kx-ky','fiber_shape':(4,128,128),'stages':3}
weights = vj.aloha.make_kspace_weights(rp)
self.assertEqual(weights[3].shape,(8,64,64))
if PLOTTING == True:
plt.imshow(np.absolute(weights[0][0,...]))
plt.show()
def test_init_kspace_stage(self):
rp={'rcvrs':4,'filter_size':(11,7),'virtualcoilboost':False,\
'recontype':'kx-ky','fiber_shape':(4,128,128),'stages':3}
kspace = np.random.rand(4,128,128)
stage = 1
kspace_init = vj.aloha.init_kspace_stage(kspace,stage,rp)
self.assertEqual(kspace_init.shape,(4,64,64))
rp={'rcvrs':4,'filter_size':(11,7),'virtualcoilboost':True,\
'recontype':'k-t','fiber_shape':(4,128,21),'stages':3}
kspace = np.random.rand(8,128,21)
stage = 1
kspace_init = vj.aloha.init_kspace_stage(kspace,stage,rp)
self.assertEqual(kspace_init.shape,(8,64,21))
def test_finalize_kspace_stage(self):
rp={'rcvrs':4,'filter_size':(11,7),'virtualcoilboost':False,\
'recontype':'kx-ky','fiber_shape':(4,128,128),'stages':3}
fullk = np.zeros((4,128,128))
stagek = np.ones((4,64,64))
kspace_full = vj.aloha.finish_kspace_stage(stagek,fullk,0,rp)
self.assertEqual(kspace_full.shape,(4,128,128))
self.assertEqual(kspace_full[1,64,64],0)
self.assertEqual(kspace_full[1,60,60],1)
rp={'rcvrs':4,'filter_size':(11,7),'virtualcoilboost':False,\
'recontype':'k-t','fiber_shape':(4,128,21),'stages':3}
fullk = np.zeros((4,128,21))
stagek = np.ones((4,64,21))
kspace_full = vj.aloha.finish_kspace_stage(stagek,fullk,0,rp)
self.assertEqual(kspace_full.shape,(4,128,21))
self.assertEqual(kspace_full[1,60,10],1)
| 39.969072 | 79 | 0.596595 |
2883fadc5c01ff1f187f68ba84f5e3ae0a52978b | 1,057 | py | Python | src/archive/test3.py | felipearcaro/indexing-repository-python | 4fa504d3535495b30db443cc753ebc56e7e329c2 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | src/archive/test3.py | felipearcaro/indexing-repository-python | 4fa504d3535495b30db443cc753ebc56e7e329c2 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | src/archive/test3.py | felipearcaro/indexing-repository-python | 4fa504d3535495b30db443cc753ebc56e7e329c2 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | #setup
import docx2txt
# extract text
text = docx2txt.process("../documents/ambev.docx")
text2 = docx2txt.process("../documents/ambev2.docx")
text3 = docx2txt.process("../documents/ambev3.docx")
from whoosh.fields import Schema, TEXT
schema = Schema(title=TEXT, content=TEXT)
import os, os.path
from whoosh import index
if not os.path.exists("indexdir"):
os.mkdir("indexdir")
ix = index.create_in("indexdir", schema)
#indexando arquivos
ix = index.open_dir("indexdir")
writer = ix.writer()
writer.add_document(title=u"ambev", content=text)
writer.add_document(title=u"gleidson", content=text2)
writer.add_document(title=u"gleidson", content=text3)
writer.add_document(title=u"sato", content=u"thiago")
writer.commit()
#buscando
with ix.searcher() as searcher:
searcher = ix.searcher()
from whoosh.query import *
myquery = And([Term("content", u"chamorro"))
from whoosh.qparser import QueryParser
parser = QueryParser("content", ix.schema)
myquery = parser.parse(querystring)
results = searcher.search(myquery)
print(len(results))
| 24.581395 | 53 | 0.746452 |
28846fa1c1e9c7ab3ae95eddc73455be7f366a02 | 196 | py | Python | Source Code/web/backend/files_app/fileapi/urls.py | creosB/Virtual-pdf-library | edb334b16dfd0d3c616683f6fbb370e54f489560 | [
"CC0-1.0"
] | 11 | 2021-12-20T01:51:56.000Z | 2022-01-01T10:17:47.000Z | Source Code/web/backend/files_app/fileapi/urls.py | creosB/Virtual-pdf-library | edb334b16dfd0d3c616683f6fbb370e54f489560 | [
"CC0-1.0"
] | null | null | null | Source Code/web/backend/files_app/fileapi/urls.py | creosB/Virtual-pdf-library | edb334b16dfd0d3c616683f6fbb370e54f489560 | [
"CC0-1.0"
] | 1 | 2021-12-21T08:47:56.000Z | 2021-12-21T08:47:56.000Z | from django.urls import path
from .views import FileList, FileDetail
urlpatterns = [
path('',FileList.as_view()),
path('<int:pk>/',FileDetail.as_view()), # individual items from django
] | 24.5 | 74 | 0.704082 |
288538cbd5c881814ddd71394b3b7fcabde021bf | 4,427 | py | Python | barcodes/generateBarcodes.py | sbarton272/AcousticBarcodes-Explorations | 73f019228988727575af7d67d1b7c7119f6c49a6 | [
"MIT"
] | null | null | null | barcodes/generateBarcodes.py | sbarton272/AcousticBarcodes-Explorations | 73f019228988727575af7d67d1b7c7119f6c49a6 | [
"MIT"
] | null | null | null | barcodes/generateBarcodes.py | sbarton272/AcousticBarcodes-Explorations | 73f019228988727575af7d67d1b7c7119f6c49a6 | [
"MIT"
] | null | null | null | """
Generate barcode images with various encodings
TODO
- Include text at bottom
- DXF instead
"""
#========================================================
# Imports
#========================================================
from dxfwrite import DXFEngine as dxf
#========================================================
# Constants
#========================================================
START_BAND = [1,1]
STOP_BAND = [0,1]
#========================================================
# Hamming Codes
#========================================================
class Encoding(object):
def __init__(s, code):
s.code = code
def __str__(s):
strCode = "".join(map(lambda x: str(x), s.code))
strEncode = "".join(map(lambda x: str(x), s.encode))
return "Encoding(" + strCode + " -> " + strEncode + ")"
#========================================================
# Barcode drawer
#========================================================
class BarcodeDrawer(object):
OUTFILE = 'drawings/'
# Encoding: 0 = 2 unit, 1 = 1 unit
ZERO_WIDTH = 2
ONE_WIDTH = 1
def __init__(s, code, width=200, height=None, unitWidth=None, barWidth=None,
notchWidth=2, startBand=[1,1], stopBand=[0,1], includeText=False):
s.digits = startBand + code + stopBand
# Bar width by defualt assumed to be full width
if barWidth == None:
barWidth = width
s.barWidth = barWidth
s.size = (width, height)
s.notchWidth = notchWidth
s.includeText = includeText
s.startBand = startBand
s.stopBand = stopBand
s.code = code
# Notch unit width
unitLen = sum(map(lambda x: s.ZERO_WIDTH if x==0 else s.ONE_WIDTH, s.digits))
if unitWidth == None:
# Sub one notchWidth for first notch, add 2 unit len for buffer
codeWidth = (height - notchWidth)/(unitLen + 2)
unitWidth = codeWidth - notchWidth
s.unitWidth = unitWidth
# Check valid lengths
s.barcodeLen = (unitWidth + notchWidth) * unitLen + notchWidth
if s.barcodeLen > height:
raise ValueError('Too many digits to fit window')
def draw(s,outfile):
# Create image
drawing = dxf.drawing(s.OUTFILE + outfile)
drawing.add_layer('layer', color=1)
# Draw text
if s.includeText:
text = ''
text += ''.join(map(str,s.startBand))
text += '-'
text += ''.join(map(str,s.code))
text += '-'
text += ''.join(map(str,s.stopBand))
drawing.add(dxf.text(text, height=s.unitWidth))
# Initial offset
xOffset = (s.size[0] - s.barWidth) / 2
yOffset = (s.size[1] - s.barcodeLen) / 2
# Draw notches
for d in s.digits:
newOffset = s.drawBar(drawing,d,xOffset,yOffset)
yOffset = newOffset[1]
# Draw final notch
s.drawNotch(drawing, xOffset, yOffset)
drawing.save()
def drawBar(s, drawing, digit, xOffset, yOffset):
""" Draw bar and return lower right corner """
(x0,y0,x1,y1) = s.drawNotch(drawing, xOffset, yOffset)
# Draw end notch
x0 = xOffset
# Notch distance apart encodes digit
if digit == 0:
y0 = y1 + s.ZERO_WIDTH*s.unitWidth
elif digit == 1:
y0 = y1 + s.ONE_WIDTH*s.unitWidth
return (x0,y0)
def drawNotch(s, drawing, xOffset, yOffset):
# Draw start notch
x0 = xOffset
y0 = yOffset
x1 = x0 + s.barWidth
y1 = y0 + s.notchWidth
drawing.add(dxf.polyline(points=[(x0,y0), (x1,y0), (x1,y1), (x0,y1), (x0,y0)]))
return (x0,y0,x1,y1)
#========================================================
# Parse options
#========================================================
if __name__ == '__main__':
codes = [[1,1,1],[0,0,0],[0,1,0],[1,0,1]]
width = 15
height = 20
unit = 1
notchWidth = [.3,.5]
for code in codes:
codeStr = ''.join(map(str,code))
for n in notchWidth:
filename = 'code' + codeStr + '-notch' + str(n) + '-len' + str(unit) + '.dxf'
drawer = BarcodeDrawer(code, width=width, height=height, unitWidth=unit, notchWidth=n,
includeText=True)
drawer.draw(filename)
| 29.125 | 98 | 0.483171 |
2887834c88b90ae4d29891c6021442f87fb025c0 | 110 | py | Python | example/cheeseshop/apps/catalog/admin.py | sflems/django-constance | e177292c74cbf158c7218d8818d5d6c34b17eee1 | [
"BSD-3-Clause"
] | 899 | 2015-12-17T09:24:11.000Z | 2022-03-31T15:57:53.000Z | example/cheeseshop/apps/catalog/admin.py | sflems/django-constance | e177292c74cbf158c7218d8818d5d6c34b17eee1 | [
"BSD-3-Clause"
] | 342 | 2015-12-27T11:07:44.000Z | 2022-03-24T13:34:46.000Z | example/cheeseshop/apps/catalog/admin.py | sflems/django-constance | e177292c74cbf158c7218d8818d5d6c34b17eee1 | [
"BSD-3-Clause"
] | 213 | 2015-12-23T00:32:34.000Z | 2022-03-17T17:04:57.000Z | from django.contrib import admin
from cheeseshop.apps.catalog.models import Brand
admin.site.register(Brand)
| 22 | 48 | 0.836364 |
28878c846aed485a7bc9a73365300409e1defb8b | 672 | py | Python | leetcode/editor/cn/FindKPairsWithSmallestSums.py | huangge1199/leet-code-python | 5d01bbb6f12a495ea7ea0a90b5b3b4aa92bcc2f7 | [
"Apache-2.0"
] | 1 | 2022-02-12T13:55:41.000Z | 2022-02-12T13:55:41.000Z | leetcode/editor/cn/FindKPairsWithSmallestSums.py | huangge1199/leet-code-python | 5d01bbb6f12a495ea7ea0a90b5b3b4aa92bcc2f7 | [
"Apache-2.0"
] | null | null | null | leetcode/editor/cn/FindKPairsWithSmallestSums.py | huangge1199/leet-code-python | 5d01bbb6f12a495ea7ea0a90b5b3b4aa92bcc2f7 | [
"Apache-2.0"
] | null | null | null | # 373:查找和最小的 K 对数字
# leetcode submit region begin(Prohibit modification and deletion)
from heapq import heappop, heappush
from typing import List
class Solution:
def kSmallestPairs(self, nums1: List[int], nums2: List[int], k: int) -> List[List[int]]:
m, n = len(nums1), len(nums2)
ans = []
pq = [(nums1[i] + nums2[0], i, 0) for i in range(min(k, m))]
while pq and len(ans) < k:
_, i, j = heappop(pq)
ans.append([nums1[i], nums2[j]])
if j + 1 < n:
heappush(pq, (nums1[i] + nums2[j + 1], i, j + 1))
return ans
# leetcode submit region end(Prohibit modification and deletion)
| 33.6 | 92 | 0.578869 |
288bed12c190fb35b526a110b53eefe990c1f7a5 | 2,505 | py | Python | urls.py | Shakil-1501/Quizdjango | 5e201d0f05ce2a49d36484009ff6032821365bc6 | [
"MIT"
] | null | null | null | urls.py | Shakil-1501/Quizdjango | 5e201d0f05ce2a49d36484009ff6032821365bc6 | [
"MIT"
] | null | null | null | urls.py | Shakil-1501/Quizdjango | 5e201d0f05ce2a49d36484009ff6032821365bc6 | [
"MIT"
] | null | null | null | # Core Django imports.
from django.urls import path
from django.shortcuts import redirect, render
# LMS app imports
from lms.views.course.course_views import (
CourseListView,fetch_questions,compute_stats,display_lp,Edit_quiz,preview_quiz,fetch_questions_oneatatime,compute_html,enter_comment,quiz_lp
)
from lms.views.dashboard.student.dashboard_views import (
DashboardHomeView,
)
from lms.views.account.register_view import \
(
ActivateView,
AccountActivationSentView,
UserRegisterView,
)
from lms.views.account.logout_view import UserLogoutView
from lms.views.account.login_view import UserLoginView
# Specifies the app name for name spacing.
app_name = "lms"
# lms/urls.py
urlpatterns = [
# LMS URLS #
# /home/
path(
route='',
view=CourseListView.as_view(),
name='home'
),
path('lms/course',compute_stats,name="compute_stats"),
path('lms/quiz',fetch_questions_oneatatime,name="fetch_questions_oneatatime"),
path('lms/quiz3',fetch_questions,name="fetch_questions"),
path('lms/quizlp',quiz_lp,name="quiz_lp"),
path('lms/quiz2',display_lp,name="display_lp"),
path('admin/',Edit_quiz,name="Edit_quiz"),
path('lms/quizs',preview_quiz,name="preview_quiz"),
path('lms/file',compute_html,name="compute_html"),
path('lms/enter_comment',enter_comment,name="enter_comment"),
#path('quiz2', lambda request: render(request, 'templates/lms/quiz2.html')),
# ACCOUNT URLS #
# /account/login/
path(
route='account/login/',
view=UserLoginView.as_view(),
name='login'
),
# /account/login/
path(
route='account/register/',
view=UserRegisterView.as_view(),
name='register'
),
# /account/logout/
path(
route='account/logout/',
view=UserLogoutView.as_view(),
name='logout'
),
path(route='account_activation_sent/',
view=AccountActivationSentView.as_view(),
name='account_activation_sent'
),
path(route='activate/<uidb64>/<token>/',
view=ActivateView.as_view(),
name='activate'
),
# DASHBOARD URLS #
# /author/dashboard/home/
path(
route="student/dashboard/home/",
view=DashboardHomeView.as_view(),
name="dashboard_home"
),
]
| 22.567568 | 145 | 0.621956 |
288fb0e62147ed4c6a19e3faeb3476a5404525aa | 270 | py | Python | rasterio/errors.py | clembou/rasterio | 57169c31dae04e1319b4c4b607345475a7122910 | [
"BSD-3-Clause"
] | null | null | null | rasterio/errors.py | clembou/rasterio | 57169c31dae04e1319b4c4b607345475a7122910 | [
"BSD-3-Clause"
] | null | null | null | rasterio/errors.py | clembou/rasterio | 57169c31dae04e1319b4c4b607345475a7122910 | [
"BSD-3-Clause"
] | null | null | null | """A module of errors."""
class RasterioIOError(IOError):
"""A failure to open a dataset using the presently registered drivers."""
class RasterioDriverRegistrationError(ValueError):
"""To be raised when, eg, _gdal.GDALGetDriverByName("MEM") returns NULL"""
| 27 | 78 | 0.733333 |
28935cc03b2db600110d382016ba629d850712b4 | 236 | py | Python | cloud_test_app/cloud_test/utils.py | gabrielamelian/django-docker-code-test | 0500980c519140460f66d3812da29e651a0d0ac6 | [
"Apache-2.0"
] | null | null | null | cloud_test_app/cloud_test/utils.py | gabrielamelian/django-docker-code-test | 0500980c519140460f66d3812da29e651a0d0ac6 | [
"Apache-2.0"
] | null | null | null | cloud_test_app/cloud_test/utils.py | gabrielamelian/django-docker-code-test | 0500980c519140460f66d3812da29e651a0d0ac6 | [
"Apache-2.0"
] | null | null | null |
def parse_boolean(value):
if type(value) == bool:
return value
if value in ['True', 'true', 'T', 't', '1']:
return True
if value in ['False', 'false', 'F', 'f', '0']:
return False
return False
| 19.666667 | 50 | 0.512712 |