content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import asyncio
from itertools import cycle
import pytest
from gino import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from .factory import Factory
from .models import db, PG_URL, UserType
@pytest.fixture(scope="session")
@pytest.fixture(scope="session")
@pytest.fixture(scope='function', autouse=True)
@pytest.fixture
@pytest.fixture
| [
11748,
30351,
952,
198,
6738,
340,
861,
10141,
1330,
6772,
198,
198,
11748,
12972,
9288,
198,
6738,
308,
2879,
1330,
2251,
62,
18392,
198,
6738,
44161,
282,
26599,
13,
579,
1330,
629,
19458,
62,
29891,
11,
6246,
10297,
198,
198,
6738,
... | 3.108333 | 120 |
import argparse
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from utils.model import get_model
from utils.tools import get_configs_of, to_device, get_mask_from_lengths
from dataset import Dataset
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--restore_step", type=int, required=True)
parser.add_argument("--path_tag", type=str, default="")
parser.add_argument(
"--model",
type=str,
default="aux",
)
parser.add_argument(
"--dataset",
type=str,
required=True,
help="name of dataset",
)
args = parser.parse_args()
# Read Config
args.model = "aux"
preprocess_config, model_config, train_config = get_configs_of(args.dataset)
configs = (preprocess_config, model_config, train_config)
path_tag = "_{}".format(args.path_tag) if args.path_tag != "" else args.path_tag
train_config["path"]["ckpt_path"] = train_config["path"]["ckpt_path"]+"_{}{}".format("shallow", path_tag)
train_config["path"]["log_path"] = train_config["path"]["log_path"]+"_{}{}".format("shallow", path_tag)
train_config["path"]["result_path"] = train_config["path"]["result_path"]+"_{}{}".format("aux", path_tag)
if preprocess_config["preprocessing"]["pitch"]["pitch_type"] == "cwt":
import numpy as np
from utils.pitch_tools import get_lf0_cwt
preprocess_config["preprocessing"]["pitch"]["cwt_scales"] = get_lf0_cwt(np.ones(10))[1]
# Log Configuration
print("\n==================================== Prediction Configuration ====================================")
print(" ---> Total Batch Size:", int(train_config["optimizer"]["batch_size"]))
print(" ---> Path of ckpt:", train_config["path"]["ckpt_path"])
print("================================================================================================")
# Get model
model = get_model(args, configs, device, train=False)
# Get dataset
dataset = Dataset(
"val.txt", preprocess_config, train_config, sort=False, drop_last=False
)
batch_size = train_config["optimizer"]["batch_size"]
loader = DataLoader(
dataset,
batch_size=batch_size,
shuffle=False,
collate_fn=dataset.collate_fn,
)
predict(model, args.restore_step, configs, loader, len(dataset))
| [
11748,
1822,
29572,
201,
198,
201,
198,
11748,
28034,
201,
198,
6738,
28034,
13,
26791,
13,
7890,
1330,
6060,
17401,
201,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
201,
198,
201,
198,
6738,
3384,
4487,
13,
19849,
1330,
651,
62,... | 2.489655 | 1,015 |
# $Id$
from module_base import ModuleBase
from module_mixins import ScriptedConfigModuleMixin
import module_utils
import vtk
| [
2,
720,
7390,
3,
198,
198,
6738,
8265,
62,
8692,
1330,
19937,
14881,
198,
6738,
8265,
62,
19816,
1040,
1330,
12327,
276,
16934,
26796,
35608,
259,
198,
11748,
8265,
62,
26791,
198,
11748,
410,
30488,
198,
220,
220,
220,
220,
220,
220,... | 2.263889 | 72 |
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 2 03:40:59 2020
@author: hp
"""
import cv2 # pip install opencv-python
import dlib
import numpy as np
# Use a file on your computer:
videoCapture = cv2.VideoCapture('video/clinton.mp4')
# Or use a web cam:
# videoCapture = cv2.VideoCapture(0)
# Initialise three separate models
# dlib
detector2 = dlib.get_frontal_face_detector()
# caffe (DNN)
modelFile = "models/res10_300x300_ssd_iter_140000.caffemodel"
configFile = "models/deploy.prototxt.txt"
net = cv2.dnn.readNetFromCaffe(configFile, modelFile)
# Haar cascade
classifier2 = cv2.CascadeClassifier('models/haarcascade_frontalface2.xml')
font = cv2.FONT_HERSHEY_SIMPLEX
# Each iteration of the while loop captures a single frame from the capture device (file or webcam)
while(True):
# Get the next frame
ret, img = videoCapture.read()
# If a frame was successfully captured
if ret == True:
# Resize image
img = cv2.resize(img, None, fx=0.5, fy=0.5)
height, width = img.shape[:2]
img2 = img.copy()
img3 = img.copy()
img4 = img.copy()
# Convert to greyscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Detect faces using dlib and draw bounding boxes
faces2 = detector2(gray, 1)
for result in faces2:
x = result.left()
y = result.top()
x1 = result.right()
y1 = result.bottom()
cv2.rectangle(img2, (x, y), (x1, y1), (0, 0, 255), 2)
cv2.putText(img2, 'dlib', (30, 30), font, 1, (255, 255, 0), 2, cv2.LINE_AA)
# Detect faces using caffe (DNN) and draw bounding boxes
blob = cv2.dnn.blobFromImage(cv2.resize(img, (300, 300)),
1.0, (300, 300), (104.0, 117.0, 123.0))
net.setInput(blob)
faces3 = net.forward()
for i in range(faces3.shape[2]):
confidence = faces3[0, 0, i, 2]
if confidence > 0.5:
box = faces3[0, 0, i, 3:7] * np.array([width, height, width, height])
(x, y, x1, y1) = box.astype("int")
# cv2.rectangle(img3, (x, y), (x1, y1), (0, 0, 255), 2)
cv2.rectangle(img3, (x, y), (x1, y1), (0, 0, 255), -1) # -1 fills the rectangle
cv2.putText(img3, 'dnn', (30, 30), font, 1, (255, 255, 0), 2, cv2.LINE_AA)
# Detect faces using Haar cascades and draw bounding boxes
faces4 = classifier2.detectMultiScale(img)
for result in faces4:
x, y, w, h = result
x1, y1 = x + w, y + h
cv2.rectangle(img4, (x, y), (x1, y1), (0, 0, 255), 2)
cv2.putText(img4, 'haar', (30, 30), font, 1, (255, 255, 0), 2, cv2.LINE_AA)
# Show on the screen
cv2.imshow("dlib", img2)
cv2.imshow("dnn", img3)
cv2.imshow("haar", img4)
# Exit the loop with the escape key (with one of the video windows active)
if cv2.waitKey(1) & 0xFF == 27: # esc
break
else:
break
# Release resources
videoCapture.release()
cv2.destroyAllWindows()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
26223,
5979,
220,
362,
7643,
25,
1821,
25,
3270,
12131,
198,
198,
31,
9800,
25,
27673,
198,
37811,
198,
11748,
269,
85,
17,
220,
1303,
7347,
27... | 2.035948 | 1,530 |
# ==================================================================================================
# Copyright 2011 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
import struct
| [
2,
38093,
10052,
28,
198,
2,
15069,
2813,
3009,
11,
3457,
13,
198,
2,
16529,
3880,
438,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
670,
2845,
287,
... | 5.319767 | 172 |
#!/usr/bin/python3
import os
import uuid
import mne
import numpy as np
import pandas as pd
import pyedflib
import scipy.io as sio
from mne.time_frequency import *
import matplotlib.pyplot as plt
def get_recorder_time(data):
'''
:param data: raw data
:return: 这个文件记录的时间长度
'''
time = data.times[-1]
return time
def re_sampling(data, fz):
'''
:param data: mne 模块读取的数据
:param fz: 重采样的频率
:return: 返回的是重采样的频率
'''
data.resample(fz, npad="auto")
return data
def rewrite(raw, include_names, save_path): # 对数据进行重写,主要是包含某些特殊的信道分离重写
'''
:param raw: 读取的原始数据
:param include_names: 包含信道的名称
:param save_path: 保存的路径
:return: 返回只包含对应信道的数据
'''
want_meg = True
want_eeg = False
want_stim = False
picks = mne.pick_types(raw.info, meg=want_meg, eeg=want_eeg, stim=want_stim,
include=include_names, exclude='bads')
print("include channel names:{}".format(include_names))
raw.save(save_path, picks=picks, overwrite=True)
# raw.save("SEEG.fif", picks=picks_seeg, overwrite=True)
print("successfully written!")
return True
def get_common_channels(ch_names1, ch_names2): # 寻找两个数据的公共信道
'''
:param ch_names1: raw1 ch_names list
:param ch_names2: raw2 ch_names list
:return: common ch_names list
'''
common_channels = [x for x in ch_names1 if x in ch_names2]
return common_channels
def data_connection(raw1, raw2): # 数据的拼接
'''
:param raw1: raw data1
:param raw2: raw data2
:return: data connection raw1:raw2
'''
raw1.append(raw2)
return raw1
def select_channel_data(raw, select_channel_names): # 根据某些信道的名称进行数据选择,直接选择这个信道的数据
'''
:param raw: raw data
:return: channel data
'''
ch_names = get_channels_names(raw)
pick_channel_No = mne.pick_channels(ch_names=ch_names, include=select_channel_names)
data, time = raw[pick_channel_No, :]
return data
def data_split(raw, time_step): # 数据的切片处理
'''
:param raw: 读取的原始数据
:param time_step: 窗口的大小
:return:
'''
data_split = []
end = max(raw.times)
epoch = int(end // time_step)
fz = int(len(raw) / end) # 采样频率
for index in range(epoch - 1):
start = index * fz * time_step
stop = (index + 1) * fz * time_step
data, time = raw[:, start:stop]
data_split.append(data)
return data_split
def get_duration_raw_data(raw, start, stop):
'''
:param raw: 原始数据
:param start: 开始的时间点
:param stop: 终止的时间点
:return:
'''
end = max(raw.times)
if stop > end:
print("over range!!!")
return None
else:
duration_data = raw.crop(start, stop)
return duration_data
def save_split_data(data_split, path, flag): # 切片数据的保存
'''
:param data_split: 被切片的数据
:param path: 所存储的文件夹,也就是存储文件的上一级文件夹
:param flag: 对应数据的标识
:return:
'''
if not os.path.exists(path):
os.makedirs(path)
for d in data_split:
name = str(uuid.uuid1()) + "-" + str(flag)
path_all = os.path.join(path, name)
save_numpy_info(d, path_all)
print("File save successfully {}".format(path))
return True
def seeg_preprocess(fin, fout, seeg_chan_name):
'''
SEEG滤波
:param fin: 源数据文件名
:param fout: 输出文件名(***以_raw.fif结尾***)
:param seeg_chan_name: 需要滤波的信道名列表
:return:
'''
raw = mne.io.read_raw_edf(fin, preload=True)
specific_chans = raw.pick_channels(seeg_chan_name)
del raw
if len(specific_chans.info['ch_names']) != len(seeg_chan_name):
print("channels number not matched")
return
sfreq = specific_chans.info['sfreq'] # 采样频率
nyq = sfreq / 2. # 奈奎斯特频率
specific_chans.notch_filter(np.arange(50, nyq, 50), filter_length='auto',
phase='zero')
specific_chans.filter(0.5, None, fir_design='firwin')
specific_chans.save(fout)
del specific_chans
def eeg_preprocess(fin, fout, seeg_chan_name):
'''
EEG滤波
:param fin: 源数据文件名
:param fout: 输出文件名(***以_raw.fif结尾***)
:param seeg_chan_name: 需要滤波的信道名列表
:return:
'''
raw = mne.io.read_raw_edf(fin, preload=True)
specific_chans = raw.copy().pick_channels(seeg_chan_name)
del raw
if len(specific_chans.info['ch_names']) != len(seeg_chan_name):
print("channels number not matched")
return
sfreq = specific_chans.info['sfreq'] # 采样频率
nyq = sfreq / 2. # 奈奎斯特频率
specific_chans.notch_filter(np.arange(50, nyq, 50), filter_length='auto',
phase='zero')
specific_chans.filter(1., None, fir_design='firwin')
specific_chans.save(fout)
del specific_chans
def seeg_npy_plot(data, channels, save_path, save_path_npy=None):
'''
:param data: numpy 格式的数据
:param cahnnels: 所选择的信道list
:return:
'''
k = len(channels)
k = 1 # 只选取一个信道
plt.figure(0)
plt.subplots_adjust(hspace=0.6, wspace=0.6)
if save_path_npy is not None:
data_p = data[channels[0]]
np.save(save_path_npy, data_p)
for i in range(k):
try:
plt.subplot(k, 1, i + 1)
plt.title("channel:{}".format(channels[i]))
plt.plot(data[channels[i]])
except IndexError:
print("IndexError")
plt.savefig(save_path)
plt.close(0)
# plt.show()
return True
def split_edf(filename, NEpochs=1): # 把太大的edf文件分成NEpochs个小edf文件
'''
:param filename: 源文件名称
:param NEpochs: 要划分的数量
:return:
'''
dirname = os.path.dirname(filename)
basename = os.path.basename(filename)
oridir = os.getcwd()
if dirname != "": # pyedflib只能读取当前工作目录的文件
os.chdir(dirname)
f = pyedflib.EdfReader(basename)
os.chdir(oridir) # 路径换回去
NSamples = int(f.getNSamples()[0] / NEpochs)
NChannels = f.signals_in_file
fileOutPrefix = basename + '_'
channels_info = list()
for ch in range(NChannels):
ch_dict = dict()
ch_dict['label'] = f.getLabel(ch)
ch_dict['dimension'] = f.getPhysicalDimension(ch)
ch_dict['sample_rate'] = f.getSampleFrequency(ch)
ch_dict['physical_max'] = f.getPhysicalMaximum(ch)
ch_dict['physical_min'] = f.getPhysicalMinimum(ch)
ch_dict['digital_max'] = f.getDigitalMaximum(ch)
ch_dict['digital_min'] = f.getDigitalMinimum(ch)
ch_dict['transducer'] = f.getTransducer(ch)
ch_dict['prefilter'] = f.getPrefilter(ch)
channels_info.append(ch_dict)
for i in range(NEpochs):
print("File %d starts" % i)
fileOut = os.path.join('.', fileOutPrefix + str(i) + '.edf')
fout = pyedflib.EdfWriter(fileOut, NChannels, file_type=pyedflib.FILETYPE_EDFPLUS)
data_list = list()
for ch in range(NChannels):
if ch == NChannels - 1:
data_list.append(f.readSignal(ch)[i * NSamples:])
else:
data_list.append(f.readSignal(ch)[i * NSamples: (i + 1) * NSamples - 1])
fout.setSignalHeaders(channels_info)
fout.writeSamples(data_list)
fout.close()
del fout
del data_list
print("File %d done" % i)
def save_raw_as_edf(raw, fout_name): # 把raw数据存为edf格式
'''
:param raw: raw格式数据
:param fout_name: 输出的文件名
:return:
'''
NChannels = raw.info['nchan']
channels_info = list()
for i in range(NChannels):
'''默认参数来自edfwriter.py'''
ch_dict = dict()
ch_dict['label'] = raw.info['chs'][i]['ch_name']
ch_dict['dimension'] = 'mV'
ch_dict['sample_rate'] = raw.info['sfreq']
ch_dict['physical_max'] = 1.0
ch_dict['physical_min'] = -1.0
ch_dict['digital_max'] = 32767
ch_dict['digital_min'] = -32767
ch_dict['transducer'] = 'trans1'
ch_dict['prefilter'] = "pre1"
channels_info.append(ch_dict)
fileOut = os.path.join('.', fout_name + '.edf')
fout = pyedflib.EdfWriter(fileOut, NChannels, file_type=pyedflib.FILETYPE_EDFPLUS)
data_list, _ = raw[:, :]
print(data_list)
fout.setSignalHeaders(channels_info)
fout.writeSamples(data_list)
fout.close()
print("Done!")
del fout
del data_list
def make_whole_as_epoch(raw, e_id=666):
'''
将一整个raw作为一个epoch返回
:param raw: raw类型对象
:param e_id: 整数类型,指定event的id,不能与已有id重复
:return: Epochs对象
'''
data, _ = raw[:, :]
event_id = {'Added': e_id} # 人为增加一个event
event = [[0, 0, e_id]] # 在第一个样本处标记event为id
epoch = mne.EpochsArray([data], raw.info, event, 0, event_id)
return epoch
def tfr_analyze(epochs, freqs, resample=None, decim=1):
'''
freqs:type为ndarray,指定一个离散的频率数组
:param epochs: 待分析的Epochs对象
:param freqs: ndarray类型,包含感兴趣的所有频率,例np.arange(80,100,0.5)
:param resample: 整数类型,指明重采样频率,通过对数据重采样减轻内存压力
:param decim: 整数类型,只抽取时频变换后的部分结果,减轻内存压力
:return: AverageTFR对象,包含时频变换后的数据和信息
'''
if resample is not None:
epochs.resample(resample, npad='auto') # 重采样,减少内存消耗
n_cycles = freqs / 2.
# 使用小波变换进行时频变换
# decim参数指定对转换过的结果后再次重采样的频率,例如若指定为5,则频率变为原来的5分之一
power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles, use_fft=True, return_itc=True, decim=decim)
power.info['sfreq'] /= decim
return power
def tfr_extract(power, tmin=0, tmax=None):
'''
提取tfr_analyze返回的数据中感兴趣的时间段
:param power: AverageTFR对象,时频变换的输出
:param tmin: 时间起点(包含在区间内)
:param tmax: 时间终点(不包含在区间内)
:return: ndarray, shape(n_channels, n_freqs, n_times)
'''
sfreq = power.info['sfreq']
start = int(tmin * sfreq)
if tmax is None:
return np.array([[[k for k in power.data[i][j][start:]] for j in range(len(power.data[i]))] for i in
range(len(power.data))])
else:
end = int(tmax * sfreq)
return np.array([[[k for k in power.data[i][j][start: end]] for j in range(len(power.data[i]))] for i in
range(len(power.data))])
def get_cost_matrix(elec_pos):
'''
获取代价矩阵(不同电极之间的距离)
:param elec_pos: 含有信道名以及坐标的字典
:return: cost_matrix: 代价矩阵
'''
n = len(elec_pos)
cost_matrix = [[0 for _ in range(n)] for _ in range(n)]
i = 0
while i < n:
j = i + 1
while j < n:
cost_matrix[i][j] = np.linalg.norm(elec_pos[i]['pos'] - elec_pos[j]['pos'])
cost_matrix[j][i] = cost_matrix[i][j]
j += 1
i += 1
return cost_matrix
def least_traversal(elec_pos):
'''
枚举所有起点计算出最小代价的遍历路径
:param elec_pos: 含有信道名以及坐标的字典
:return: min_cost: 最小代价
:return: min_path: 对应路径
'''
cost_matrix = get_cost_matrix(elec_pos)
n = len(elec_pos)
maximum = 9999999
min_cost = maximum
min_path = None
for start in range(n):
visited = [False for _ in range(n)]
n_visited = 0
cur = start
cost = 0
path = [elec_pos[cur]['name']]
while n_visited < n - 1:
visited[cur] = True
n_visited += 1
min_d = maximum
min_i = 0
for i in range(n):
d = cost_matrix[cur][i]
if d < min_d and not visited[i]:
min_d = d
min_i = i
cost += min_d
path.append(elec_pos[min_i]['name'])
cur = min_i
if cost < min_cost:
min_cost = cost
min_path = path
return min_cost, min_path
def retrieve_chs_from_mat(patient_name):
'''
提取.mat文件中的信道名和坐标信息
:param patient_name: 目标病人名(须保证文件名为patient_name.mat)
:return: elec_pos: 含有信道名以及坐标的字典
'''
pos_info = sio.loadmat(patient_name + ".mat")
elec_pos = list()
for i in range(pos_info['elec_Info_Final'][0][0][1][0].size): # name为字符串,pos为ndarray格式
elec_pos.append({'name': pos_info['elec_Info_Final'][0][0][0][0][i][0],
'pos': pos_info['elec_Info_Final'][0][0][1][0][i][0]})
return elec_pos
def get_path(patient_name):
'''
获取当前病人的信道排列并保存在.csv文件中
:param patient_name: 目标病人名
'''
_, path = least_traversal(retrieve_chs_from_mat(patient_name))
print(path)
path_len = len(path)
print(path_len)
to_csv = [[i for i in range(path_len)], path]
to_csv = [[row[i] for row in to_csv] for i in range(path_len)]
col = ['ID', 'chan_name']
csv_frame = pd.DataFrame(columns=col, data=to_csv)
csv_frame.to_csv('./' + patient_name + '_seq.csv', encoding='utf-8')
def draw_seeg_picture(data, sampling=500, x_axis='Time(s)', y_axis='Channel'):
'''
:param data: SEEG读取的信号, 进行可视化的读取
:return:
'''
width = data.shape[1]
height = data.shape[0]
dpi = 50
plt.figure(figsize=(width // (dpi * 5), height // dpi), dpi=200)
# my_x_ticks = np.arange(0, width // sampling, 1.0 / sampling) # 原始数据有width个数据,故此处为设置从0开始,间隔为1/sampling
# plt.xticks(my_x_ticks)
plt.xlabel(x_axis)
plt.ylabel(y_axis)
# plt.axis('off')
plt.imshow(data, aspect='auto')
plt.show()
plt.close()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
198,
11748,
28686,
198,
11748,
334,
27112,
198,
198,
11748,
285,
710,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
12972,
276,
2704,
571,
198,
117... | 1.67758 | 7,819 |
from django.contrib import admin
from .models import IntegerValue
admin.site.register(
IntegerValue,
list_display=["id", "name", "value"],
)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
764,
27530,
1330,
34142,
11395,
628,
198,
28482,
13,
15654,
13,
30238,
7,
198,
220,
220,
220,
34142,
11395,
11,
198,
220,
220,
220,
1351,
62,
13812,
28,
14692,
312,
1600,
366,... | 3.02 | 50 |
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 14 21:35:15 2017
@author: austin
V1.2 use SoupStrainer for lower RAM usage. But looks a little bit slow, then skip the sleep time
"""
#主要程序
import requests
import re
from bs4 import BeautifulSoup,SoupStrainer
import pandas#pandas大法好
from fake_useragent import UserAgent
import time,random,sys
import gc,psutil,os #To get the system memory information
import linecache
import tracemalloc
#Get system empty memory
proc = psutil.Process(os.getpid())
gc.collect()
mem0 = proc.memory_info().rss/1048576
#tracemalloc.start()
ua=UserAgent()#使用随机header,模拟人类
headers1={'User-Agent': 'ua.random'}#使用随机header,模拟人类
TotalPrice=[] #Total price
PricePerArea=[] #price per meter
HouseArea=[]
HouseHeight=[]
HouseConfig=[]
HouseCommunit=[]
HouseLocMajor=[]
HouseLocMinor=[]
HouseBuildYear=[]
LinkUrl=[]
domain='http://sh.lianjia.com'#为了之后拼接子域名爬取详细信息
#'http://sh.lianjia.com/ershoufang/pudong/a1p21d2',#爬取拼接域名
DistrictList=['pudong','minhang']
SizeLevelList=['a'] #总共a1~a7
PriceLevelList=['p2'] #总共p21~p27
# Use SoupStrainer to minimize the memory
StrainerPrice = SoupStrainer('span',attrs={'class':'total-price strong-num'})
StrainerPriceper = SoupStrainer('span',attrs={'class':'info-col price-item minor'})
StrainerHouseInfo = SoupStrainer('span',attrs={'class':'info-col row1-text'})
StrainerHouseAddr = SoupStrainer('span',attrs={'class':'info-col row2-text'})
StrainerHouseWeb = SoupStrainer('div',attrs={'class':'prop-title'})
for SizeLevel in range(1,7):
totalpage=100
i=1
for i in range(1,100):#爬取2页,想爬多少页直接修改替换掉400,不要超过总页数就好
if i>totalpage:
break
begin = time.time()
mem0 = proc.memory_info().rss/1048576
res=requests.get('http://sh.lianjia.com/ershoufang/'+DistrictList[0]+'/a'+str(SizeLevel)+'d'+str(i),headers=headers1)#爬取拼接域名
soup = BeautifulSoup(res.text,'html.parser')#使用html筛选器
if i==1:
#soup = BeautifulSoup(res.text,'html.parser')#使用html筛选器
results_totalpage=soup.find('a',attrs={'gahref':'results_totalpage'})
totalpage=int(results_totalpage.string)
results_totalpage=None
print(totalpage,DistrictList[0]+'/a'+str(SizeLevel))
#else:
# soup = BeautifulSoup(res.text,'html.parser',parse_only=strainer)#使用html筛选器
#links = SoupStrainer('a')
#price=soup.find_all('span',attrs={'class':'total-price strong-num'})
price=BeautifulSoup(res.text,'html.parser',parse_only=StrainerPrice).contents
#price[0].string # 323
priceper=BeautifulSoup(res.text,'html.parser',parse_only=StrainerPriceper).contents
#priceper=soup.find_all('span',attrs={'class':'info-col price-item minor'})
#re.findall(r'\d{5}',priceper[0].string) # ['66123']
houseInfo=BeautifulSoup(res.text,'html.parser',parse_only=StrainerHouseInfo).contents
#houseInfo=soup.find_all('span',attrs={'class':'info-col row1-text'})
#houseInfo[0].get_text() #'\n\n\t\t\t\t\t\t\t1室1厅 | 40.53平\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t| 中区/5层\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t'
#text=re.sub(r'\n\t*| |','',houseInfo[0].get_text()) #'1室1厅|40.53平|中区/5层'
#re.split(r'\|', text) #['1室1厅', '40.53平', '中区/5层']
houseAddr=BeautifulSoup(res.text,'html.parser',parse_only=StrainerHouseAddr).contents
#houseAddr=soup.find_all('span',attrs={'class':'info-col row2-text'})
#houseAddr2=houseAddr[0].find_all('a')
#houseAddr2[0].string #'虹延小区'
#houseAddr2[1].string #'长宁'
#houseAddr2[2].string #'西郊'
#re.findall(r'\d{4}',houseAddr[0].get_text()) # ['1995']
houseWeb=BeautifulSoup(res.text,'html.parser',parse_only=StrainerHouseWeb)
j=0
for j in range(0,(len(price))): #并非所有页都是30项
try:
LinkUrl.append(houseWeb.select('.prop-title a')[j]['href'])
TotalPrice.append(price[j].string)
# 323
UnitPrice=re.findall(r'\d{5}',priceper[j].string)
#['66123']
if UnitPrice:
PricePerArea.append(int(UnitPrice[0])) # '66123'
else:
PricePerArea.append('unknow') # '1995'
HouseInfo1=re.split(r'\|',re.sub(r'\n\t*| |平','',houseInfo[j].get_text()))
#['1室1厅', '40.53平', '中区/5层']
HouseArea.append(float(HouseInfo1[1]))
HouseHeight.append(HouseInfo1[2])
HouseConfig.append(HouseInfo1[0])
houseAddr2=houseAddr[j].find_all('a')
HouseCommunit.append(houseAddr2[0].string) #'虹延小区'
HouseLocMajor.append(houseAddr2[1].string) #'长宁'
HouseLocMinor.append(houseAddr2[2].string) #'西郊'
BuildYear=re.findall(r'\d{4}',houseAddr[j].get_text())
if BuildYear:
HouseBuildYear.append(int(BuildYear[0])) # '1995'
else:
HouseBuildYear.append('unknow') # '1995'
except:
info=sys.exc_info()
print(info[0],":",info[1])
#soup.decompose()
#gc.collect()
end = time.time()
#sleeptime=random.randint(1, 5)/10
sleeptime=0
mem1 = proc.memory_info().rss/1048576
#print("Allocation: %0.1f" % (mem1-mem0))
#print(str(i),round(end - begin,2),sleeptime)
print("#%s-%s/%s-process:%.2fs wait:%.2fs Mem:%.1fMB"
% (str(SizeLevel),str(i),str(totalpage), end - begin, sleeptime, mem1-mem0))
#time.sleep(sleeptime)
#When every new page request, empty the memory
# del res,soup,price,priceper,houseInfo,houseAddr
# mem2 = proc.memory_info().rss
# gc.collect()
# mem3 = proc.memory_info().rss
# pd = lambda x2, x1: 100.0 * (x2 - x1) / mem0
# print("Allocation: %0.2f%%" % pd(mem1, mem0),
# "Unreference: %0.2f%%" % pd(mem2, mem1),
# "Collect: %0.2f%%" % pd(mem3, mem2),
# "Overall: %0.2f%%" % pd(mem3, mem0))
#snapshot = tracemalloc.take_snapshot()
#display_top(snapshot)
df=pandas.DataFrame({'总价':TotalPrice,'单价':PricePerArea,'房型':HouseConfig,
'层':HouseHeight,'面积':HouseArea,'小区':HouseCommunit,
'区':HouseLocMajor,'板块':HouseLocMinor,'房龄':HouseBuildYear,
'网址':LinkUrl})
datetimestr=time.strftime('%Y-%m-%d',time.localtime(time.time()))
df.to_csv(datetimestr+'-'+DistrictList[0]+'-LianJia.csv')
#def gethousedetail1(url,soup,j):#定义函数,目标获得子域名里的房屋详细信息
# info={}#构造字典,作为之后的返回内容
# s=soup.select('.info-col a')[1+3*j]#通过传入的j获取所在区的内容
# pat='<a.*?>(.c)</a>'#构造提取正则
# info['所在区']=''.join(list(re.compile(pat).findall(str(s))))#使用join将提取的列表转为字符串
# s1=soup.select('.info-col a')[0+3*j]#[0].text.strip()
# pat1='<span.*?>(.*?)</span>'
# info['具体地点']=''.join(list(re.compile(pat1).findall(str(s1))))
# s2=soup.select('.info-col a')[2+3*j]#[0].text.strip()
# pat2='<a.*?>(.*?)</a>'
# info['位置']=''.join(list(re.compile(pat2).findall(str(s2))))
# q=requests.get(url)#使用子域名
# soup=BeautifulSoup(q.text,'html.parser')#提取子域名内容,即页面详细信息
# for dd in soup.select('.content li'):#提取class=content标签下的li标签房屋信息
# a=dd.get_text(strip=True)#推荐的去空格方法,比strip()好用
# if ':' in a:#要有冒号的,用中文的冒号,因为网页中是中文
# key,value=a.split(':')#根据冒号切分出键和值
# info[key]=value
# info['总价']=soup.select('.bold')[0].text.strip()#提取总价信息
# return info#传回这一个页面的详细信息
#for i in range(1,5):#爬取399页,想爬多少页直接修改替换掉400,不要超过总页数就好
# res=requests.get('http://sh.lianjia.com/ershoufang/d'+str(i),headers=headers1)#爬取拼接域名
# soup = BeautifulSoup(res.text,'html.parser')#使用html筛选器
##print(soup)
#for j in range(0,29):#网站每页呈现30条数据,循环爬取
# url1=soup.select('.prop-title a')[j]['href']#选中class=prop-title下的a标签里的第j个元素的href子域名内容
# url=domain+url1#构造子域名
# print(soup)
# houseary.append(gethousedetail1(url,soup,j))#传入自编函数需要的参数
#
#df=pandas.DataFrame(houseary)
#df
#df.to_excel('house_lianjia.xlsx')
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
41972,
319,
7031,
2556,
1478,
2310,
25,
2327,
25,
1314,
2177,
201,
198,
201,
198,
31,
9800,
25,
38132,
259,
201,
198,
53,
16,
13,
17,
779,
3401... | 1.64018 | 5,097 |
try:
raise AssertionError('this is a test')
except:
print('test passed')
| [
28311,
25,
198,
220,
220,
220,
5298,
2195,
861,
295,
12331,
10786,
5661,
318,
257,
1332,
11537,
198,
16341,
25,
198,
220,
220,
220,
3601,
10786,
9288,
3804,
11537,
198
] | 2.7 | 30 |
# !/user/bin/python
# coding=utf-8
from __future__ import print_function
import urllib
import time
import os
import re
try:
from my_net import net
except ImportError:
raise ImportError('Sorry, can not find \'my_net\' .\nPlease view https://github.com/WuJunkai2004/Pyself/blob/master/my_net/my_net.py to download .')
__author__ ='Wu Junkai(wujunkai20041123@outlook.com)'
__version__ ='1.10.0'
__run_environment__ ='python 2.6 and above'
__edit_environment__='python 2.7.14 by IDLE'
if(__name__=='__main__'):
m=user()
m._login()
| [
2,
5145,
14,
7220,
14,
8800,
14,
29412,
198,
2,
19617,
28,
40477,
12,
23,
198,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
11748,
2956,
297,
571,
198,
11748,
640,
198,
11748,
28686,
198,
11748,
302,
198,
198,
28311... | 2.429185 | 233 |
# Import packages
import argparse
import json
import multiprocessing
import os
from pathlib import Path
import numpy as np
import scipy as scipy
from scipy.io import wavfile
from config_path import get_paths
from parallel_proc import process
from utils import (ProgressBar, is_clipped, read_source_images,
wav_format_to_float)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Creates all the configuration files")
parser.add_argument("config", type=Path, help="Path to configuration file")
parser.add_argument(
"original_dataset_paths",
type=Path,
help="Path to folders containing original datasets",
)
parser.add_argument(
"output_path", type=Path, help="Path to destination folder for the output"
)
args = parser.parse_args()
with open(args.config, "r") as f:
config = json.load(f)
# get all the paths
config_path = get_paths(config, args.original_dataset_paths, args.output_path)
check_mix(config, config_path)
| [
2,
17267,
10392,
198,
11748,
1822,
29572,
198,
11748,
33918,
198,
11748,
18540,
305,
919,
278,
198,
11748,
28686,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
629,
541,
88,
355,
629,
541,
88,
... | 2.75853 | 381 |
"""
写教学评价简直是在浪费我的时间
Author: Aber Sheeran
Time: 2017-12-16
"""
import re
import json
from .Base import log
def fuck_the_teaching_evaluation(session):
"""教学评价"""
for each in re.findall(r"<a[\s\S]*?href='([\s\S]+?)'", (session.get_page("jxpj/xsjxpj.shtml"))):
log.debug(f"处理{each}中...")
_deal_teaching_evaluation_page(session, each)
def _deal_teaching_evaluation_page(session, page_url):
"""处理单个教学评价页面"""
post_data = {} # 将发送的信息
page = session.get_page(page_url)
# 这里不能改,教务系统写死的
for hidden_input in re.findall(r'input.+?type="hidden".*?>', page):
temproray = re.search(r'name="(?P<key>.*?)".*?value="(?P<value>.*?)"', hidden_input)
post_data[temproray.group("key")] = temproray.group("value")
# 打分部分,可以自行下调
for key, max_num in re.findall(r'input name="(?P<name>.+?)".+?max="(?P<max>\d+)".+?class="number', page):
post_data[key] = max_num
# 评语部分,随便改
post_data["PJXX"] = "上课生动有趣,深入浅出!"
log.debug(post_data)
message = session.post_data(
"/jxpj/xsjxpj/saveinfo?action=ok",
data=post_data,
)
try:
assert message["success"] == "success", message["msg"]
except AssertionError as e:
log.error(e)
| [
37811,
201,
198,
37863,
247,
46763,
247,
27764,
99,
46237,
226,
20015,
115,
163,
106,
222,
33566,
112,
42468,
28839,
101,
38184,
103,
164,
112,
117,
22755,
239,
21410,
33768,
114,
29785,
112,
201,
198,
13838,
25,
27700,
1375,
263,
272,
... | 1.675926 | 756 |
import sys
import os
# Make sure that the application source directory (this directory's parent) is
# on sys.path.
import pytest
here = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, here)
print(sys.path)
@pytest.fixture
| [
11748,
25064,
198,
11748,
28686,
198,
198,
2,
6889,
1654,
326,
262,
3586,
2723,
8619,
357,
5661,
8619,
338,
2560,
8,
318,
198,
2,
319,
25064,
13,
6978,
13,
198,
11748,
12972,
9288,
198,
198,
1456,
796,
28686,
13,
6978,
13,
15908,
36... | 2.835165 | 91 |
#!/usr/bin/env python3
# https://codeforces.com/problemset/problem/92/A
# 之前是模拟解; 现在尝试数学解..
import math
n,m = list(map(int,input().split())) #50,1e4
cc = (n*(n+1))//2
m = m%cc
x = int((math.sqrt((m<<3)+1)-1)/2)
print(m-(x*(x+1))//2)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
2,
3740,
1378,
19815,
891,
273,
728,
13,
785,
14,
1676,
22143,
316,
14,
45573,
14,
5892,
14,
32,
198,
2,
220,
45298,
30298,
235,
42468,
162,
101,
94,
162,
233,
253,
164,
... | 1.592105 | 152 |
# Copyright 2021 Palo Alto Networks, Inc
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# Adapted from:
# https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/action/wait_for_connection.py
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import json
import time
import xml.etree.ElementTree as ET
from datetime import datetime, timedelta
import xmltodict
from ansible.module_utils._text import to_text
from ansible.plugins.action import ActionBase
from ansible.utils.display import Display
from ansible_collections.mrichardson03.panos.plugins.httpapi.panos import (
TimedOutException,
)
display = Display()
| [
2,
15069,
33448,
44878,
34317,
27862,
11,
3457,
198,
2,
198,
2,
2448,
3411,
284,
779,
11,
4866,
11,
13096,
11,
290,
14,
273,
14983,
428,
3788,
329,
597,
198,
2,
4007,
351,
393,
1231,
6838,
318,
29376,
7520,
11,
2810,
326,
262,
202... | 3.414758 | 393 |
from xCore import xCore
CSS811_REG_STATUS = 0x00
CSS811_REG_MEAS_MODE = 0x01
CSS811_REG_ALG_RST_DATA = 0x02
CSS811_REG_RAW_DATA = 0x03
CSS811_REG_ENV_DATA = 0x05
CSS811_REG_THRESHOLDS = 0x10
CSS811_REG_BASELINE = 0x11
CSS811_REG_HW_VERSION = 0x21
CSS811_REG_FW_BOOT_V = 0x23
CSS811_REG_FW_APP_V = 0x24
CSS811_REG_FW_ERROR_ID = 0xE0
CSS811_REG_SW_RESET = 0xFF
CSS811_DATA_READY = 0x08
CSS811_REG_HW_ID = 0x20
CSS811_HW_CODE = 0x81
CCS811_BOOTLOADER_APP_ERASE = 0xF1
CCS811_BOOTLOADER_APP_DATA = 0xF2
CCS811_BOOTLOADER_APP_VERIFY = 0xF3
CCS811_BOOTLOADER_APP_START = 0xF4
CCS811_DRIVE_MODE_IDLE = 0x00
CCS811_DRIVE_MODE_1SEC = 0x10
CCS811_DRIVE_MODE_10SEC = 0x20
CCS811_DRIVE_MODE_60SEC = 0x30
CCS811_DRIVE_MODE_250MS = 0x40
| [
6738,
2124,
14055,
1330,
2124,
14055,
198,
198,
49155,
23,
1157,
62,
31553,
62,
35744,
2937,
796,
657,
87,
405,
198,
49155,
23,
1157,
62,
31553,
62,
11682,
1921,
62,
49058,
796,
657,
87,
486,
198,
49155,
23,
1157,
62,
31553,
62,
184... | 1.799505 | 404 |
from factory import Sequence, SubFactory
from factory.django import DjangoModelFactory
from lessons.models import Lesson, Resource, Activity
| [
6738,
8860,
1330,
45835,
11,
3834,
22810,
198,
6738,
8860,
13,
28241,
14208,
1330,
37770,
17633,
22810,
198,
198,
6738,
11658,
13,
27530,
1330,
12892,
261,
11,
20857,
11,
24641,
198
] | 4.580645 | 31 |
rainbow = [ 'красный', 'оранжевый', 'жёлтый', 'зелёный', 'голубой', 'синий', 'фиолетовый']
i = 0
for i in range(0, 7):
print(rainbow[i])
| [
3201,
8176,
796,
685,
705,
31583,
21169,
16142,
21727,
22177,
45035,
140,
117,
3256,
705,
15166,
21169,
16142,
22177,
140,
114,
16843,
38857,
45035,
140,
117,
3256,
705,
140,
114,
141,
239,
30143,
20375,
45035,
140,
117,
3256,
705,
140,
... | 1.293578 | 109 |
#-------------------------------------------------------------------------------
# bob: environment.py
#
# Environment object.
#
# Eli Bendersky (eliben@gmail.com)
# This code is in the public domain
#-------------------------------------------------------------------------------
class Environment(object):
""" An environment in which variables are bound to values. Variable names
must be hashable, values are arbitrary objects.
Environment objects are linked via parent references. When bindings are
queried or assigned and the variable name isn't bound in the
environment, the parent environment is recursively searched.
All environment chains ultimately terminate in a "top-level" environment
which has None in its parent link.
"""
def __init__(self, binding, parent=None):
""" Create a new environment with the given binding (dict var -> value)
and a reference to a parent environment.
"""
self.binding = binding
self.parent = parent
def lookup_var(self, var):
""" Looks up the bound value for the given variable, climbing up the
parent reference if required.
"""
if var in self.binding:
return self.binding[var]
elif self.parent is not None:
return self.parent.lookup_var(var)
else:
raise Environment.Unbound('unbound variable "%s"' % var)
def define_var(self, var, value):
""" Add a binding of var -> value to this environment. If a binding for
the given var exists, it is replaced.
"""
self.binding[var] = value
def set_var_value(self, var, value):
""" Sets the value of var. If var is unbound in this environment, climbs
up the parent reference.
"""
if var in self.binding:
self.binding[var] = value
elif self.parent is not None:
self.parent.set_var_value(var, value)
else:
raise Environment.Unbound('unbound variable "%s"' % var)
| [
2,
10097,
24305,
198,
2,
29202,
25,
2858,
13,
9078,
198,
2,
198,
2,
9344,
2134,
13,
220,
198,
2,
198,
2,
25204,
347,
7338,
2584,
357,
417,
571,
268,
31,
14816,
13,
785,
8,
198,
2,
770,
2438,
318,
287,
262,
1171,
7386,
198,
2,
... | 2.731017 | 777 |
# *****************************************************************************
#
# Copyright (c) 2021, the temporal-cache authors.
#
# This file is part of the temporal-cache library, distributed under the terms of
# the Apache License 2.0. The full license can be found in the LICENSE file.
#
| [
2,
41906,
17174,
4557,
35625,
198,
2,
198,
2,
15069,
357,
66,
8,
33448,
11,
262,
21964,
12,
23870,
7035,
13,
198,
2,
198,
2,
770,
2393,
318,
636,
286,
262,
21964,
12,
23870,
5888,
11,
9387,
739,
262,
2846,
286,
198,
2,
262,
2484... | 4.484848 | 66 |
import json
import boto3
import logging
import decimal
from boto3.dynamodb.conditions import Key, Attr
dynamodb = boto3.resource('dynamodb')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# Helper class to convert a DynamoDB item to JSON.
| [
11748,
33918,
198,
11748,
275,
2069,
18,
198,
11748,
18931,
198,
11748,
32465,
198,
6738,
275,
2069,
18,
13,
67,
4989,
375,
65,
13,
17561,
1756,
1330,
7383,
11,
3460,
81,
198,
198,
67,
4989,
375,
65,
796,
275,
2069,
18,
13,
31092,
... | 2.908046 | 87 |
from django import forms
from .models import About, Info | [
6738,
42625,
14208,
1330,
5107,
198,
6738,
764,
27530,
1330,
7994,
11,
14151
] | 4.307692 | 13 |
from gui import *
from tkinter import *
if __name__ == "__main__":
main()
| [
6738,
11774,
1330,
1635,
198,
198,
6738,
256,
74,
3849,
1330,
1635,
628,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1388,
3419,
198
] | 2.59375 | 32 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_django-blockstack-auth
------------
Tests for `django-blockstack-auth` views module.
"""
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.test import Client
from django_blockstack_auth.views import LoginView, CallbackView, LogoutView
# TODO: Find a way to test the full login/logout flow using the
# Blockstack portal
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
9288,
62,
28241,
14208,
12,
9967,
25558,
12,
18439,
198,
10541,
198,
198,
51,
3558,
329,
4600,
28241,
... | 3.043478 | 138 |
from .text_to_docs import text_to_docs
| [
198,
198,
6738,
764,
5239,
62,
1462,
62,
31628,
1330,
2420,
62,
1462,
62,
31628,
198
] | 2.5625 | 16 |
from .OBJET import OBJET
import numpy as np
class Objet(object):
"""OBJET"""
| [
6738,
764,
9864,
41,
2767,
1330,
25334,
41,
2767,
198,
198,
11748,
299,
32152,
355,
45941,
198,
198,
4871,
1835,
31173,
7,
15252,
2599,
198,
220,
220,
220,
37227,
9864,
41,
2767,
37811,
198
] | 2.441176 | 34 |
from unittest import TestCase
from fpipe.file import File
from fpipe.meta import Size, MD5
from fpipe.gen import Meta, Program
from fpipe.exceptions import FileDataException
from fpipe.meta.stream import Stream
from fpipe.utils.const import PIPE_BUFFER_SIZE
from test_utils.test_file import TestStream
| [
6738,
555,
715,
395,
1330,
6208,
20448,
198,
198,
6738,
277,
34360,
13,
7753,
1330,
9220,
198,
6738,
277,
34360,
13,
28961,
1330,
12849,
11,
10670,
20,
198,
6738,
277,
34360,
13,
5235,
1330,
30277,
11,
6118,
198,
6738,
277,
34360,
13,... | 3.454545 | 88 |
import tempfile
from io import BytesIO
import pandas as pd
import arrow
import hanshu
from reportlab.lib import colors
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.pdfmetrics import registerFontFamily
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.platypus import Paragraph, SimpleDocTemplate, Table, LongTable, Image
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.lib.enums import TA_LEFT
from reportlab.lib.enums import TA_CENTER
story=[]
#获取年月日
time=arrow.now()
year=time.year
month=time.month-1
day=(time.shift(days=-time.day)).day
month0=str(int(month)-5)
month1=time.month-2
month2=time.month-3
month3=time.month-4
month4=time.month-5
month5=time.month-6
#input('起始月份:')
#累计检测应用数
appacc=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'累计检测应用数')
appnum_acc=appacc['检测应用数'].sum()
appnum_now=appacc.values.tolist()
#print(appacc.检测应用数.tolist()[-1])
#累计检测次数
numacc=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'累计检测次数')
num_acc=numacc['检测次数'].sum()
num_now=numacc.values.tolist()
#累计检测代码行数
codeacc=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'累计代码行数')
codenum_acc=codeacc['代码行数'].sum()
code_now=codeacc.values.tolist()
#累计缺陷数
defectacc=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'累计缺陷类型及占比')
defectnum_acc=defectacc['数量'].sum()
#语言占比
language=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'本月语言占比')
language0=language.values.tolist()
#当月缺陷占比
defectnow=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'本月安全缺陷')
defectnownum=defectnow['爆发数'].sum()
defectnow0=defectnow.values.tolist()
#计算省份个数
pronumnow=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'当月省公司检测次数')
pronumnow0=hanshu.zyzh(pronumnow)
pronumnow1=hanshu.btcs(pronumnow0['org_id'])
#统计当月未检测省公司
pronan=hanshu.diff(pronumnow)
#查找检测次数排名前五的省公司
pronumtop5=hanshu.top5(pronumnow)
#print(pronumtop5.org_id)
#统计检测次数排名靠前的应用,省公司、应用名、检测次数
appnum=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'当月应用检测次数')
appnum0=hanshu.zyzh(appnum)
appnumtop5=hanshu.top52(appnum0)
apptop5pro=appnumtop5.org_id.tolist()
apptop5=appnumtop5.app_name.tolist()
apptop5num=appnumtop5.次数.tolist()
#潮汐分析
datenum=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'当月检测潮汐分析')
datetop1=hanshu.top1(datenum)
dateno1=pd.to_datetime(datetop1.datetime.tolist()[0])
cxyear=dateno1.year
cxmonth=dateno1.month
cxday=dateno1.day
#缺陷类型及爆发频率
defectype=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'当月缺陷类型及占比')
defectype0=defectype.sort_values(by='爆发频率', axis=0, ascending=False)
defectype1=defectype0.head(5)
#省公司缺陷密度
prodef=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'当月各省公司缺陷密度')
prodef00=hanshu.zyzh(prodef)
prodef0=prodef00.sort_values(by='midu', axis=0, ascending=False).head(3)
prodef1=prodef00.sort_values(by='midu', axis=0, ascending=True).head(5)
#当月应用缺陷密度
appdef=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'当月应用缺陷密度')
appdef00=hanshu.zyzh(appdef)
appdef0=appdef00.sort_values(by='rat', axis=0, ascending=False).head(5)
#筛选检测超过1次的应用
appnum2=appnum.loc[appnum["次数"] > 1]
#携带审计情况
xdsj=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'携带审计情况')
#计算携带审计利用率
shenji2=xdsj["app_name"].value_counts().reset_index()
jiance2=appnum[appnum.app_name.isin(xdsj['app_name'])]
shenji2.columns=['app_name','携带审计次数']
hebing=pd.merge(shenji2, jiance2, on = 'app_name')
hebing['携带审计利用率']=list(map(lambda x,y: (x/y), hebing['携带审计次数'], hebing['次数']))
xdsjtop3=hebing.sort_values(by='携带审计利用率', axis=0, ascending=False).head(3)
#table1
proapp=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'省公司-月份-检测应用数')
proapp1=hanshu.suanzzeng(hanshu.zyzh((proapp)))
#TABLE2
data=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'省公司-月份-应用平均缺陷密度')
data1=hanshu.suanzzeng(hanshu.zyzh((data)))
#TABLE3
tab3=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'本月安全缺陷')
tab3['缺陷数0']=round(tab3['爆发数']/10000,1)
tab3['缺陷数'] = [str(i) + '万' for i in tab3['缺陷数0']]
tab3['平均缺陷数/应用']=round(tab3['爆发数']/appnum_now[-1][-1],2)
tab30=tab3.T.values.tolist()
#table4
dataa0=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'应用第一次检测最后一次检测')
dataa=hanshu.zyzh(dataa0)
appno11=pd.DataFrame()
appno11['app_name']=dataa['app_name']
appno11['org_id']=dataa['org_id']
appno11['总检测数']=dataa['检测次数']
appno11['第1次检测']=dataa['第一次密度']
appno11['最后1次检测']=dataa['最后一次密度']
appno11['变动']=round((dataa['最后一次密度']-dataa['第一次密度'])/dataa['第一次密度']*100,2)
appno11['变动率']=[str(i)+'%' for i in appno11['变动']]
appno111 = appno11.sort_values(by='变动', axis=0, ascending=True)#按从小到大排序
appno112=appno111.values.tolist()
appaccnum=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'省公司每月检测应用数')
appaccnum0=hanshu.zyzh(appaccnum)
pro_acc=hanshu.btcs(appaccnum0['org_id'])
appaccnum1 = appaccnum0.groupby(by=['org_id'],as_index = False)['appCount'].sum()
appaccnum2=appaccnum1.sort_values(by = 'appCount',axis = 0,ascending = False)#按大小顺序排名
appaccnumt=appaccnum2.head(5).values.tolist()
pdefect0=pd.read_excel("C:\\Users\\eric\\Desktop\\月报数据\\月报数据.xlsx",'省公司每月检测缺陷密度')
pdefect1=hanshu.zyzh(pdefect0)
#print(appaccnum2.head(5))
pdfmetrics.registerFont(TTFont('SimSun', './SimSun.ttf')) # 默认不支持中文,需要注册字体
pdfmetrics.registerFont(TTFont('SimSunBd', './simhei.ttf'))
pdfmetrics.registerFont(TTFont('Arial', './Arial.ttf'))
# registerFontFamily('SimSun', normal='SimSun', bold='SimSunBd', italic='VeraIt', boldItalic='VeraBI')
stylesheet = getSampleStyleSheet() # 获取样式集
stylesheet1 = getSampleStyleSheet()
# 获取reportlab自带样式
Normal = stylesheet['Normal']
BodyText = stylesheet['BodyText']
Italic = stylesheet['Italic']
Title = stylesheet['Title']
Heading1 = stylesheet['Heading1']
Heading2 = stylesheet['Heading2']
Heading3 = stylesheet['Heading3']
Heading4 = stylesheet['Heading4']
Heading5 = stylesheet['Heading5']
Heading6 = stylesheet['Heading6']
Bullet = stylesheet['Bullet']
Definition = stylesheet['Definition']
Code = stylesheet['Code']
# 自带样式不支持中文,需要设置中文字体,但有些样式会丢失,如斜体Italic。有待后续发现完全兼容的中文字体
Normal.fontName = 'SimSun'
Italic.fontName = 'SimSun'
BodyText.fontName = 'SimSun'
Title.fontName = 'SimSunBd'
Heading1.fontName = 'SimSunBd'
Heading2.fontName = 'SimSunBd'
Heading3.fontName = 'SimSunBd'
Heading4.fontName = 'SimSunBd'
Heading5.fontName = 'SimSun'
Heading6.fontName = 'SimSun'
Bullet.fontName = 'SimSun'
Definition.fontName = 'SimSun'
Code.fontName = 'SimSun'
# 添加自定义样式
stylesheet.add(
ParagraphStyle(name='body',
fontName="SimSun",
fontSize=12,
textColor='black',
leading=20, # 行间距
spaceBefore=10, # 段前间距
spaceAfter=10, # 段后间距
leftIndent=0, # 左缩进
rightIndent=0, # 右缩进
firstLineIndent=20, # 首行缩进,每个汉字为10
alignment=TA_LEFT, # 对齐方式
bulletFontSize=15, #bullet为项目符号相关的设置
bulletIndent=-50,
bulletAnchor='start',
bulletFontName='Symbol'
)
)
# 添加自定义样式
stylesheet1.add(
ParagraphStyle(name='body',
fontName="SimSun",
fontSize=10,
textColor='black',
leading=10, # 行间距
spaceBefore=10, # 段前间距
spaceAfter=0, # 段后间距
leftIndent=0, # 左缩进
rightIndent=0, # 右缩进
firstLineIndent=0, # 首行缩进,每个汉字为10
alignment=TA_CENTER, # 对齐方式
bulletFontSize=15, #bullet为项目符号相关的设置
bulletIndent=-50,
bulletAnchor='start',
bulletFontName='Symbol'
)
)
body = stylesheet['body']
body1=stylesheet1['body']
# 段落
content1="<font fontsize=12> 代码安全审计主要是通过找出代码中存在的潜在安全风险并修复,以提高应用系统代码质量,降低系统安全风险。自2020年9月份上线以来,代码安全子系统在云道平台安全审计中心稳定运行。<br/>    本报告基于云道平台安全审计中心"\
"代码安全检测子系统的检测数据进行统计分析,内容分为两大部分:第一部分介绍了2021年"+str(month0)+"-"+str(month)+"月每个月代码安全检测的情况以及趋势。第二部分介绍了"+str(month)+"月份的总体检测情况、安全缺陷情况。</font>"
content2 = " <font fontsize=12>2021年"+str(month0)+"-"+str(month)+"月,云道安全审计中心代码安全检测引擎共检测了</font><font name=SimSunBd fontsize=12>"+str(appnum_acc)+"</font><font fontsize=12>个"\
"应用系统,检测任务数<font name=SimSunBd fontsize=12>"+str(num_acc)+"</font>次,共计<font name=SimSunBd fontsize=12>"+str(round((codenum_acc/100000000),2))+"亿</font>行代码,"\
"共检测出缺陷<font name=SimSunBd fontsize=12>"+str(round((defectnum_acc/10000),1))+"万</font>个,其中严重缺陷占比<font name=SimSunBd fontsize=12>"+str(round((defectacc['占比'][0]*100),2))+"%</font>,"\
"高危缺陷占比<font name=SimSunBd fontsize=12>"+str(round((defectacc['占比'][1]*100),2))+"%</font>,中危缺陷占比<font name=SimSunBd fontsize=12>"+str(round((defectacc['占比'][2]*100),2))+"%</font>。"\
"低危和警告占比<font name=SimSunBd fontsize=12>"+str(round((defectacc['占比'][3]*100+defectacc['占比'][4]*100),2))+"%。</font><br/>    检测次数趋势如图2.1所示:</font>"
content3 = " <font fontsize=12>从图中可以看出,1月份检测次数达到峰值,2月份急剧下降,3月份开始检测次数有所回升。</font>"
content4 = " <font fontsize=12>"+str(year)+"年"+str(month0)+"-"+str(month)+"月,<font name=SimSunBd fontsize=12>"+str(pro_acc)+"</font>个省公司累计检测应用数为:"\
"<font name=SimSunBd fontsize=12>"+str(appnum_acc)+"</font>个。每个月的检测应用数及其变化如表2.1所示。可以看出,<font name=SimSunBd fontsize=12>"+str(appaccnumt[0][0])+"、"+str(appaccnumt[1][0])+"、"\
""+str(appaccnumt[2][0])+"、"+str(appaccnumt[3][0])+"、"+str(appaccnumt[4][0])+"</font>月均检测应用数排前五,<font name=SimSunBd fontsize=12>江西、山东、山西、四川</font>等省公司自2月份以来检测应用数呈上升趋势。</font>"
content5 = " <font fontsize=12>"+str(year)+"年"+str(month0)+"-"+str(month)+"月,云道平台安全审计中心对来自<font name=SimSunBd fontsize=12>"+str(pro_acc)+"</font>个省公司的<font name=SimSunBd fontsize=12>"\
+str(appnum_acc)+"</font>个应用,累计检测次数:<font name=SimSunBd fontsize=12>"+str(num_acc)+"次</font>,总发现缺陷数<font name=SimSunBd fontsize=12>"+str(round((defectnum_acc/10000),1))+"万</font>个,"\
"平均千行代码缺陷密度为<font name=SimSunBd fontsize=12>"+str(round((defectnum_acc*1000/codenum_acc),2))+"</font>。省公司应用平均千行代码缺陷密度变化情况,如表2.2,可以看出,<font name=SimSunBd fontsize=12>安徽、北京、四川</font>三个"\
"省公司的应用平均千行代码缺陷密度总体呈下降趋势。</font>"
content51=" <font fontsize=12>截至"+str(year)+"年"+str(month)+"月"+str(day)+"日,代码安全检测引擎"+str(month)+"月份共检测<font name=SimSunBd fontsize=12>"+str(appnum_now[-1][-1])+"</font>个应用系统,检测任务数<font name=SimSunBd fontsize=12>"\
+str(num_now[-1][-1])+"</font>次,共计<font name=SimSunBd fontsize=12>"+str(round((code_now[-1][-1]/10000000),2))+"千万</font>行代码。<br/>    检测的应用系统中,使用数量最多的两种编程语言为<font name=SimSunBd fontsize=12>"\
+str(language0[0][0])+"、"+str(language0[1][0])+"</font>,对应的应用数量分别为<font name=SimSunBd fontsize=12>"+str(language0[0][1])+"</font>个和<font name=SimSunBd fontsize=12>"+str(language0[1][1])+"</font>个。可以看出,"\
"各公司在进行应用开发时的首选语言是<font name=SimSunBd fontsize=12>"+str(language0[0][0])+"</font>语言,占比高达<font name=SimSunBd fontsize=12>"+str(round(language0[0][1]*100/(language['存在应用数'].sum()),2))+"%</font>。编程语言的总体分布情况如图3.1所示。</font>"
content6 = " <font fontsize=12>共检测出缺陷<font name=SimSunBd fontsize=12>"+str(round((defectnownum/10000),1))+"万</font>个,其中严重缺陷占比<font name=SimSunBd fontsize=12>"+str(round((defectnow0[0][1]/defectnownum*100),2))+"%</font>,"\
"高危缺陷占比<font name=SimSunBd fontsize=12>"+str(round((defectnow0[1][1]/defectnownum*100),2))+"%</font>,中危缺陷占比<font name=SimSunBd fontsize=12>"+str(round((defectnow0[2][1]/defectnownum*100),2))+"%</font>,"\
"低危和警告占比<font name=SimSunBd fontsize=12>"+str(round(((defectnow0[3][1]+defectnow0[4][1])/defectnownum*100),2))+"%</font>。具体详情将从应用检测情况、应用安全缺陷情况、缺陷改善情况以及缺陷审计情况四个角度展开。</font>"
content7 = " <font fontsize=12>截至"+str(month)+"月"+str(day)+"日,共有来自<font name=SimSunBd fontsize=12>"+str(pronumnow1)+"</font>个省公司(不包括"+str(pronan)+")的<font name=SimSunBd fontsize=12>"+str(appacc.检测应用数.tolist()[-1])+"</font>个应用进行代码安全检测<font name=SimSunBd fontsize=12>"+str(numacc.检测次数.tolist()[-1])+"次</font>,"\
"各省公司应用检测总数如图3.2所示,颜色越深表示检测次数越多,可以看出,排在前面的省份是<font name=SimSunBd fontsize=12>"+str('、'.join(pronumtop5.org_id.tolist()))+"</font>,均超过了<font name=SimSunBd fontsize=12>"+str(min(pronumtop5.total.tolist())-1)+"</font>次。</font>"
content8 = " <font fontsize=12>各应用检测次数排名如图3.3所示。可以看出,排在前5的应用分别是:"\
"来自"+str(apptop5pro[0])+"省公司的<font name=SimSunBd fontsize=12>"+str(apptop5[0])+"</font>检测了<font name=SimSunBd fontsize=12>"+str(apptop5num[0])+"次</font>;"\
"来自"+str(apptop5pro[1])+"省公司的<font name=SimSunBd fontsize=12>"+str(apptop5[1])+"</font>检测了<font name=SimSunBd fontsize=12>"+str(apptop5num[1])+"次</font>;"\
"来自"+str(apptop5pro[2])+"省公司的<font name=SimSunBd fontsize=12>"+str(apptop5[2])+"</font>检测了<font name=SimSunBd fontsize=12>"+str(apptop5num[2])+"次</font>;"\
"来自"+str(apptop5pro[3])+"省公司的<font name=SimSunBd fontsize=12>"+str(apptop5[3])+"</font>检测了<font name=SimSunBd fontsize=12>"+str(apptop5num[3])+"次</font>;"\
"来自"+str(apptop5pro[4])+"省公司的<font name=SimSunBd fontsize=12>"+str(apptop5[4])+"</font>检测了<font name=SimSunBd fontsize=12>"+str(apptop5num[4])+"次</font>。</font>"
content9 = " <font fontsize=12>"+str(year)+"年"+str(month)+"月,云道安全审计中心代码安全检测引擎总共检测了<font name=SimSunBd fontsize=12>"+str(numacc.检测次数.tolist()[-1])+"次</font>,平均每天检测<font name=SimSunBd fontsize=12>"+str(round(numacc.检测次数.tolist()[-1]/int(day),2))+"次</font>。每天检测次数如图3.4所示。"\
"可以看出,<font name=SimSunBd fontsize=12>"+str(cxyear)+"年"+str(cxmonth)+"月"+str(cxday)+"日</font>应用检测最为密集,且各应用相对集中在<font name=SimSunBd fontsize=12>4月6日-4月14日</font>提交检测。</font>"
content10 = " <font fontsize=12>据统计,<font name=SimSunBd fontsize=12>"+str(appnum_now[-1][-1])+"</font>个应用总共检测出代码安全缺陷总数为:<font name=SimSunBd fontsize=12>"+str(round((defectnum_acc/10000),1))+"万</font>个,平均每个应用存在<font name=SimSunBd fontsize=12>"+str(int(defectnum_acc/appnum_now[-1][-1]))+"</font>个安全缺陷问题,"\
"各类安全缺陷出现次数及平均在每应用中的出现次数如表3.3内容所示。</font>"
content11 = " <font fontsize=12><font name=SimSunBd fontsize=12>"+str(appnum_now[-1][-1])+"</font>个检测的应用中,安全缺陷类型覆盖了<font name=SimSunBd fontsize=12>"+str(len(defectype))+"种</font>,如图3.5所示。可以看出,排名前五的安全缺陷类型占总缺陷爆发数的<font name=SimSunBd fontsize=12>"\
+str(round((defectype1['爆发频率'].sum()/defectype0['爆发频率'].sum())*100,2))+"%</font>,这六种缺陷类型的爆发频率均超过<font name=SimSunBd fontsize=12>"+str(round((defectype1.爆发频率.tolist()[4]-1)/10000,2))+"万</font>,它们分别为:<font name=SimSunBd fontsize=12>"\
+str(defectype1.defect_cname.tolist()[0])+"、"+str(defectype1.defect_cname.tolist()[1])+"、"+str(defectype1.defect_cname.tolist()[2])+"、"+str(defectype1.defect_cname.tolist()[3])+"、"+str(defectype1.defect_cname.tolist()[4])+"</font>。</font>"
content12 = " <font fontsize=12>云道平台安全审计中心对来自<font name=SimSunBd fontsize=12>"+str(pronumnow1)+"</font>个省公司的<font name=SimSunBd fontsize=12>"+str(appnum_now[-1][-1])+"</font>个应用源代码进行检测,平均每个省公司"\
"存在<font name=SimSunBd fontsize=12>"+str(round((defectnownum/10000/pronumnow1),2))+"万</font>个代码缺陷问题,平均千行代码缺陷密度为:<font name=SimSunBd fontsize=12>"+str(round((defectnownum*1000/code_now[-1][-1]),2))+"</font>。"\
"其中,<font name=SimSunBd fontsize=12>"+str(prodef0.org_id.tolist()[0])+"、"+str(prodef0.org_id.tolist()[1])+"、"+str(prodef0.org_id.tolist()[2])+"</font>是千行代码缺陷密度最高的三家省公司,均超过了"+str(round((prodef0.midu.tolist()[2]-1),2))+";"\
"<font name=SimSunBd fontsize=12>"+str(prodef1.org_id.tolist()[0])+"、"+str(prodef1.org_id.tolist()[1])+"、"+str(prodef1.org_id.tolist()[2])+"、"+str(prodef1.org_id.tolist()[3])+"、"+str(prodef1.org_id.tolist()[4])+"</font>是千行代码缺陷密度最低的五家省公司,说明"\
"这五家省公司应用的安全性较高。各省公司缺陷密度分布情况如图3.6所示,颜色越深表示千行代码缺陷密度越大。</font>"
content13 = " <font fontsize=12>应用千行代码缺陷密度分布情况如图3.7所示,排在前五名的应用情况具体为:"\
"来自"+str(appdef0.org_id.tolist()[0])+"省公司的<font name=SimSunBd fontsize=12>"+str(appdef0.app_name.tolist()[0])+"</font>千行代码缺陷密度为"+str(round(appdef0.rat.tolist()[0],2))+";来自"+str(appdef0.org_id.tolist()[1])+"的<font name=SimSunBd fontsize=12>"+str(appdef0.app_name.tolist()[1])+"</font>千行代码缺陷密度为"+str(round(appdef0.rat.tolist()[1],2))+";"\
"来自"+str(appdef0.org_id.tolist()[2])+"的<font name=SimSunBd fontsize=12>"+str(appdef0.app_name.tolist()[2])+"</font>千行代码缺陷密度为"+str(round(appdef0.rat.tolist()[2],2))+";来自"+str(appdef0.org_id.tolist()[3])+"的<font name=SimSunBd fontsize=12>"+str(appdef0.app_name.tolist()[3])+"</font>千行代码缺陷密度为"+str(round(appdef0.rat.tolist()[3],2))+";"\
"来自"+str(appdef0.org_id.tolist()[4])+"的<font name=SimSunBd fontsize=12>"+str(appdef0.app_name.tolist()[4])+"</font>千行代码缺陷密度为"+str(round(appdef0.rat.tolist()[4],2))+"。</font>"
content14 = " <font fontsize=12>"+str(month)+"月份检测次数多于1次的应用有<font name=SimSunBd fontsize=12>"+str(len(appnum2))+"</font>个,占总应用数的<font name=SimSunBd fontsize=12>"+str(round((len(appnum2)/appnum_now[-1][-1]*100),2))+"%</font>。分析应用在4月份第1次检测和最后1次检测的千行代码缺陷密度如表3.2,"\
"变动幅度为负数表示应用千行代码缺陷密度降低、安全性提高,而大部分应用的源代码安全缺陷情况都存在明显的改善趋势。</font>"
content15 = " <font fontsize=12>"+str(year)+"年"+str(month)+"月,<font name=SimSunBd fontsize=12>"+str(appnum_now[-1][-1])+"</font>个应用发起了<font name=SimSunBd fontsize=12>"+str(numacc.检测次数.tolist()[-1])+"次</font>检测请求,携带审计<font name=SimSunBd fontsize=12>"+str(len(xdsj))+"</font>次,"\
"审计功能利用率(发起审计次数/总检测次数)为:<font name=SimSunBd fontsize=12>"+str(round(len(xdsj)/numacc.检测次数.tolist()[-1]*100,2))+"%</font>,对应用进行分析,如图3.8所示。可以看出,<font name=SimSunBd fontsize=12>"+str(xdsjtop3.app_name.tolist()[0])+"、"+str(xdsjtop3.app_name.tolist()[1])+"、"+str(xdsjtop3.app_name.tolist()[2])+"</font>的审计功能利用率较高。</font>"
content16 = " <font fontsize=12>"+str(year)+"年"+str(month)+"月发起人工审计的应用有<font name=SimSunBd fontsize=12>"+str()+"个</font>,分别为:<font name=SimSunBd fontsize=12>"+str()+"、"+str()+"、"+str()+"、"+str()+"、"+str()+"、"+str()+"、"+str()+"</font>,"\
"只占参与检测应用总数的<font name=SimSunBd fontsize=12>2.45%</font>,说明目前人工审计的使用率并不高。</font>"
content17 = " <font fontsize=12>针对"+str(month)+"月份的检测及审计数据进行分析后,提出以下建议:<br/>    ①"+str(defectype1.defect_cname.tolist()[0])+"、"+str(defectype1.defect_cname.tolist()[1])+"是最频繁爆发的缺陷,建议各省公司在应用维护时注意防范这两类安全问题。<br/>    ②分析表明,静态检测存在一定的误报,目前审计功能的使用率较低,"\
"建议各个省公司对缺陷进行审计,提高代码的安全性。</font>"
# Table 表格
image = Image('./1.jpg')
image.drawWidth = 160
image.drawHeight = 100
body = stylesheet['body']
table_data0 = [['省份', str(month5)+'月', str(month4)+'月',str(month3)+'月',str(month2)+'月',str(month1)+'月',str(month)+'月'],
]
table_data40=[['应用名','省公司','总检测数','第1次检测','最后1次检测','变动'],
]
table_data=table_data0
table_data1=table_data0
table_data4=table_data40
for i in range(0,len(data1)-1):
tabledata=[[Paragraph(str(data1[i][0]),body1), str(data1[i][1]), str(data1[i][2])+str(data1[i][-5]), str(data1[i][3])+str(data1[i][-4]), str(data1[i][4])+str(data1[i][-3]), str(data1[i][5])+str(data1[i][-2]), str(data1[i][6])+str(data1[i][-1])],
]
table_data=table_data+tabledata
i=i+1
for j in range(0,len(proapp1)-1):
tabledata1=[[Paragraph(str(proapp1[j][0]),body1), str(proapp1[j][1]), str(proapp1[j][2])+str(proapp1[j][-5]), str(proapp1[j][3])+str(proapp1[j][-4]), str(proapp1[j][4])+str(proapp1[j][-3]), str(proapp1[j][5])+str(proapp1[j][-2]), str(proapp1[j][6])+str(proapp1[j][-1])],
]
table_data1=table_data1+tabledata1
j=j+1
table_data2 =[['缺陷类型','严重','高危','中等','低风险','警告'],
[Paragraph('缺陷数',body1),str(tab30[3][0]),str(tab30[3][1]),str(tab30[3][2]),str(tab30[3][3]),str(tab30[3][4])],
[Paragraph('平均缺陷数/应用',body1),str(tab30[4][0]),str(tab30[4][1]),str(tab30[4][2]),str(tab30[4][3]),str(tab30[4][4])],
]
for m in range(0,len(appno112)-1):
tabledata4=[[Paragraph(str(appno112[m][0]),body1), str(appno112[m][1]), str(appno112[m][2]), str(appno112[m][3]), str(appno112[m][4]), str(appno112[m][6])],
]
table_data4=table_data4+tabledata4
m=m+1
table_style = [
('FONTNAME', (1, 0), (-1, -1), 'SimSun'), # 字体
('FONTNAME', (0, 0), (-1, 0), 'SimSunBd'), # 字体
('FONTSIZE', (0, 0), (-1, 0), 11), # 第一行的字体大小
('FONTSIZE', (0, 1), (-1, -1), 10), # 第二行到最后一行的字体大小
('ALIGN', (1, 0), (-1, -1), 'RIGHT'), # 所有表格左右中间对齐
('ALIGN', (0, 0), (-1, 0), 'CENTER'), # 所有表格左右中间对齐
('VALIGN', (0, 0), (-1, -1), 'MIDDLE'), # 所有表格上下居中对齐
# ('SPAN', (-2, -2), (-1, -1)), # 合并
# ('SPAN', (0, 4), (0, 5)), # 合并
# ('SPAN', (2, 4), (2, 5)), # 合并
('BACKGROUND', (0, 0), (-1, 0), colors.darkblue), # 设置第一行背景颜色
('TEXTCOLOR', (0, 0), (-1, 0), colors.white), # 设置表格内文字颜色
('GRID', (0, 0), (-1, -1), 0.75, colors.black), # 设置表格框线为灰色,线宽为0.1
]
table_style1 = [
('FONTNAME', (1, 0), (-1, -1), 'SimSun'), # 字体
('FONTNAME', (0, 0), (-1, 0), 'SimSunBd'), # 字体
('FONTSIZE', (0, 0), (-1, 0), 11), # 第一行的字体大小
('FONTSIZE', (0, 1), (-1, -1), 10), # 第二行到最后一行的字体大小
('ALIGN', (0, 0), (-1, -1), 'CENTER'), # 所有表格左右中间对齐
('VALIGN', (0, 0), (-1, -1), 'MIDDLE'), # 所有表格上下居中对齐
('BACKGROUND', (0, 0), (-1, 0), colors.darkblue), # 设置第一行背景颜色
('TEXTCOLOR', (0, 0), (-1, 0), colors.white), # 设置表格内文字颜色
('GRID', (0, 0), (-1, -1), 0.75, colors.black), # 设置表格框线为灰色,线宽为0.1
]
table = Table(data=table_data,style=table_style, colWidths=75)
table1= Table(data=table_data1,style=table_style, colWidths=75)
table2= Table(data=table_data2,style=table_style1, colWidths=75)
table3=Table(data=table_data4,style=table_style1, colWidths=75)
#story.append(Paragraph("区块链", Title))
story.append(Paragraph("一、报告背景", Heading1))
story.append(Paragraph(content1, body))
story.append(Paragraph("二、"+str(month0)+"-"+str(month)+"月份检测情况", Heading1))
story.append(Paragraph(content2, body))
story.append(Paragraph(content3, body))
story.append(Paragraph("2.1 应用检测情况", Heading2))
story.append(Paragraph(content4, body))
story.append(table1)
story.append(Paragraph("2.2 缺陷密度分布情况", Heading2))
story.append(Paragraph(content5, body))
story.append(table)
story.append(Paragraph("三、"+str(month)+"月份检测情况", Heading1))
story.append(Paragraph(content51, body))
story.append(Paragraph(content6, body))
story.append(Paragraph("3.1 应用检测情况", Heading2))
story.append(Paragraph("3.1.1 应用检测次数排序", Heading3))
story.append(Paragraph(content7, body))
story.append(Paragraph(content8, body))
story.append(Paragraph("3.1.2 检测潮汐分析", Heading3))
story.append(Paragraph(content9, body))
story.append(Paragraph("3.2 缺陷密度分布情况", Heading2))
story.append(Paragraph(content10, body))
story.append(Paragraph("3.2.1 总体缺陷类型分布情况", Heading3))
story.append(Paragraph(content11, body))
story.append(table2)
story.append(Paragraph("3.2.2 应用缺陷密度排序", Heading3))
story.append(Paragraph(content12, body))
story.append(Paragraph(content13, body))
story.append(Paragraph("3.3 缺陷改善情况", Heading2))
story.append(Paragraph(content14, body))
story.append(table3)
story.append(Paragraph("3.4 审计情况", Heading2))
story.append(Paragraph("3.4.1 携带审计使用情况", Heading3))
story.append(Paragraph(content15, body))
story.append(Paragraph("3.4.2 人工审计使用情况", Heading3))
story.append(Paragraph(content16, body))
story.append(Paragraph("四、建议", Heading1))
story.append(Paragraph(content17, body))
# bytes
# buf = BytesIO()
# doc = SimpleDocTemplate(buf, encoding='UTF-8')
# doc.build(story)
# print(buf.getvalue().decode())
# file
doc = SimpleDocTemplate('C:\\Users\\eric\\Desktop\\hello.pdf')
doc.build(story) | [
11748,
20218,
7753,
201,
198,
6738,
33245,
1330,
2750,
4879,
9399,
201,
198,
11748,
19798,
292,
355,
279,
67,
201,
198,
11748,
15452,
201,
198,
11748,
289,
504,
13415,
201,
198,
6738,
989,
23912,
13,
8019,
1330,
7577,
201,
198,
6738,
... | 1.407755 | 17,047 |
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 29 20:45:28 2019
@author: satishsaini
"""
import json
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
41972,
319,
30030,
2556,
2808,
1160,
25,
2231,
25,
2078,
13130,
201,
198,
201,
198,
31,
9800,
25,
3332,
680,
82,
391,
72,
201,
198,
37811,
201,
... | 2.18 | 50 |
"""
Loads the dbs for interactive sessions
"""
from regolith.runcontrol import DEFAULT_RC, load_rcfile, filter_databases, \
connect_db
rc = DEFAULT_RC
rc._update(load_rcfile("regolithrc.json"))
filter_databases(rc)
chained_db, dbs = connect_db(rc)
| [
37811,
198,
8912,
82,
262,
288,
1443,
329,
14333,
10991,
198,
37811,
198,
6738,
842,
21446,
13,
5143,
13716,
1330,
5550,
38865,
62,
7397,
11,
3440,
62,
6015,
7753,
11,
8106,
62,
19608,
18826,
11,
3467,
198,
220,
220,
220,
2018,
62,
... | 2.76087 | 92 |
#!/usr/bin/python3.9
# -*- coding: utf-8 -*-
#
# Copyright (C) 2021 LinYulong. All Rights Reserved
#
# @Time : 2021/10/31
# @Author : LinYulong
import numpy
import pandas
import xgboost
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from xgboost import XGBClassifier
from src.alg import cross_verify
from src.train import train_cfg
from src.train.train import column_split
from src.train.train_result import TrainResult
from sklearn import preprocessing, linear_model
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
13,
24,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
15069,
357,
34,
8,
33448,
5164,
56,
377,
506,
13,
1439,
6923,
33876,
220,
198,
2,
198,
2,
2488,
... | 3.220588 | 204 |
#coding:utf-8
#
# id: bugs.core_426
# title: Wrong sort order when using es_ES collate
# decription: Check if sort order for collate ES_ES is the one of DRAE , the oficial organization for standarization of spanish
# tracker_id: CORE-426
# min_versions: []
# versions: 2.1
# qmid: bugs.core_426
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 2.1
# resources: None
substitutions_1 = []
init_script_1 = """SET NAMES ISO8859_1;
CREATE TABLE TAB (A CHAR(3) CHARACTER SET ISO8859_1);
COMMIT;
INSERT INTO TAB VALUES ('zo');
INSERT INTO TAB VALUES ('ze');
INSERT INTO TAB VALUES ('yo');
INSERT INTO TAB VALUES ('ye');
INSERT INTO TAB VALUES ('xo');
INSERT INTO TAB VALUES ('xe');
INSERT INTO TAB VALUES ('vo');
INSERT INTO TAB VALUES ('ve');
INSERT INTO TAB VALUES ('uo');
INSERT INTO TAB VALUES ('ue');
INSERT INTO TAB VALUES ('to');
INSERT INTO TAB VALUES ('te');
INSERT INTO TAB VALUES ('so');
INSERT INTO TAB VALUES ('se');
INSERT INTO TAB VALUES ('ro');
INSERT INTO TAB VALUES ('re');
INSERT INTO TAB VALUES ('qo');
INSERT INTO TAB VALUES ('qe');
INSERT INTO TAB VALUES ('po');
INSERT INTO TAB VALUES ('pe');
INSERT INTO TAB VALUES ('oo');
INSERT INTO TAB VALUES ('oe');
INSERT INTO TAB VALUES ('no');
INSERT INTO TAB VALUES ('ne');
INSERT INTO TAB VALUES ('mo');
INSERT INTO TAB VALUES ('me');
INSERT INTO TAB VALUES ('llo');
INSERT INTO TAB VALUES ('lle');
INSERT INTO TAB VALUES ('lo');
INSERT INTO TAB VALUES ('le');
INSERT INTO TAB VALUES ('ko');
INSERT INTO TAB VALUES ('ke');
INSERT INTO TAB VALUES ('jo');
INSERT INTO TAB VALUES ('je');
INSERT INTO TAB VALUES ('io');
INSERT INTO TAB VALUES ('ie');
INSERT INTO TAB VALUES ('ho');
INSERT INTO TAB VALUES ('he');
INSERT INTO TAB VALUES ('go');
INSERT INTO TAB VALUES ('fe');
INSERT INTO TAB VALUES ('fo');
INSERT INTO TAB VALUES ('fe');
INSERT INTO TAB VALUES ('eo');
INSERT INTO TAB VALUES ('ee');
INSERT INTO TAB VALUES ('do');
INSERT INTO TAB VALUES ('de');
INSERT INTO TAB VALUES ('cho');
INSERT INTO TAB VALUES ('cha');
INSERT INTO TAB VALUES ('co');
INSERT INTO TAB VALUES ('ce');
INSERT INTO TAB VALUES ('bo');
INSERT INTO TAB VALUES ('be');
INSERT INTO TAB VALUES ('ao');
INSERT INTO TAB VALUES ('ae');"""
db_1 = db_factory(charset='ISO8859_1', sql_dialect=3, init=init_script_1)
test_script_1 = """SET HEADING OFF;
SELECT A FROM TAB ORDER BY A COLLATE ES_ES;
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """ae
ao
be
bo
ce
cha
cho
co
de
do
ee
eo
fe
fe
fo
go
he
ho
ie
io
je
jo
ke
ko
le
lle
llo
lo
me
mo
ne
no
oe
oo
pe
po
qe
qo
re
ro
se
so
te
to
ue
uo
ve
vo
xe
xo
ye
yo
ze
zo
"""
@pytest.mark.version('>=2.1')
| [
2,
66,
7656,
25,
40477,
12,
23,
198,
2,
198,
2,
4686,
25,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
11316,
13,
7295,
62,
42780,
198,
2,
3670,
25,
220,
220,
220,
220,
220,
220,
220,
28843,
3297,
1502,
618,
1262,
1658,
6... | 2.532895 | 1,064 |
from gym_tak.read_only.read_only_enum import read_only_enum
from gym_tak.read_only.read_only_properties import read_only_properties
| [
6738,
11550,
62,
83,
461,
13,
961,
62,
8807,
13,
961,
62,
8807,
62,
44709,
1330,
1100,
62,
8807,
62,
44709,
198,
6738,
11550,
62,
83,
461,
13,
961,
62,
8807,
13,
961,
62,
8807,
62,
48310,
1330,
1100,
62,
8807,
62,
48310,
198
] | 3 | 44 |
from __future__ import division, unicode_literals, print_function, absolute_import
import pytest
import numpy as np
from numpy.testing import assert_equal, assert_array_equal
import traitlets as tl
import podpac
from podpac import Coordinates, clinspace, crange
from podpac.algorithm import Arange
from podpac.data import Array
from podpac.core.algorithm.signal import Convolution
| [
6738,
11593,
37443,
834,
1330,
7297,
11,
28000,
1098,
62,
17201,
874,
11,
3601,
62,
8818,
11,
4112,
62,
11748,
198,
198,
11748,
12972,
9288,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
299,
32152,
13,
33407,
1330,
6818,
62,
40... | 3.53211 | 109 |
import logging
from pokebattle import celery_app
from pokemon.helpers import save_pokemon
from services.api import get_pokemon_list
logger = logging.getLogger(__name__)
@celery_app.task
| [
11748,
18931,
198,
198,
6738,
22620,
38471,
1330,
18725,
1924,
62,
1324,
198,
6738,
43962,
13,
16794,
364,
1330,
3613,
62,
79,
12717,
198,
6738,
2594,
13,
15042,
1330,
651,
62,
79,
12717,
62,
4868,
628,
198,
6404,
1362,
796,
18931,
13... | 3.183333 | 60 |
# Severe Weather Power Disruption Index 15 Day
#
# - https://weather.com/swagger-docs/ui/sun/v2/SUNv2SevereWeatherPowerDisruptionIndex.json
#
# The Power Disruption index provides indices indicating the potential for power
# disruptions due to weather.
#
# Base URL: api.weather.com/v2
# Endpoint: /indices/powerDisruption/daypart/15day
__name__ = 'severeweatherpowerdisruptionindex'
from lib.apiutil import host, default_params
| [
2,
1001,
4119,
15615,
4333,
3167,
6417,
12901,
1315,
3596,
198,
2,
198,
2,
532,
3740,
1378,
23563,
13,
785,
14,
2032,
7928,
12,
31628,
14,
9019,
14,
19155,
14,
85,
17,
14,
50,
4944,
85,
17,
4653,
4119,
41865,
13434,
7279,
6417,
15... | 3.315385 | 130 |
r"""Functions for $\tau\to V\ell$."""
import flavio
from flavio.physics.taudecays import common
from math import sqrt, pi
import numpy as np
# names of LFV sectors in WCxf
wcxf_sector_names = {('tau', 'mu'): 'mutau',
('tau', 'e'): 'taue',
('mu', 'e'): 'mue', }
def br_tauvl(wc_obj, par, V, lep):
r"""Branching ratio of $\tau^+\to V^0\ell^+$."""
scale = flavio.config['renormalization scale']['taudecays']
sec = wcxf_sector_names['tau', lep]
wc = wc_obj.get_wc(sec, scale, par, nf_out=4)
alpha = flavio.physics.running.running.get_alpha_e(par, scale, nf_out=3)
e = sqrt(4 * pi * alpha)
mtau = par['m_tau']
ml = par['m_' + lep]
mV = par['m_' + V]
fV = par['f_' + V]
fTV = flavio.physics.running.running.get_f_perp(par, V, scale)
Cgamma_taul = wc['Cgamma_tau{}'.format(lep)]
Cgamma_ltau = wc['Cgamma_{}tau'.format(lep)]
if V == 'rho0':
g_u = get_wcs(wc, 'u', lep)
g_d = get_wcs(wc, 'd', lep)
g = (g_u-g_d)/sqrt(2)
KV = -1/sqrt(2)*e
if V == 'phi':
g = get_wcs(wc, 's', lep)
KV = 1/3*e
gL = mV*fV/2 * (g[0] + g[1])
gR = mV*fV/2 * (g[2] + g[3])
gTL = +fTV * g[4].conjugate() + 2*fV*KV/mV * Cgamma_ltau.conjugate()
gtTL = -fTV * g[4].conjugate()
gTR = +fTV * g[5] + 2*fV*KV/mV * Cgamma_taul
gtTR = +fTV * g[5]
return (par['tau_tau']
* common.GammaFvf(mtau, mV, ml, gL, gR, gTL, gtTL, gTR, gtTR) )
# function returning function needed for prediction instance
# Observable and Prediction instances
_had = {'rho0': r'\rho^0', 'phi': r'\phi'}
_shortname = {'rho0': 'rho', 'phi': 'phi'}
_lep = {'e': ' e', 'mu': r'\mu',}
for V in _had:
for l in _lep:
_obs_name = "BR(tau->" + _shortname[V] + l + r")"
_obs = flavio.classes.Observable(_obs_name)
_process_tex = r"\tau^+\to " + _had[V] + _lep[l] + r"^+"
_process_taxonomy = r'Process :: $\tau$ lepton decays :: LFV decays :: $\tau\to V\ell$ :: $' + _process_tex + r"$"
_obs.add_taxonomy(_process_taxonomy)
_obs.set_description(r"Branching ratio of $" + _process_tex + r"$")
_obs.tex = r"$\text{BR}(" + _process_tex + r")$"
flavio.classes.Prediction(_obs_name, br_tauvl_fct(V, l))
| [
81,
37811,
24629,
2733,
329,
39280,
83,
559,
59,
1462,
569,
59,
695,
3,
526,
15931,
198,
198,
11748,
10525,
952,
198,
6738,
10525,
952,
13,
746,
23154,
13,
83,
3885,
721,
592,
1330,
2219,
198,
6738,
10688,
1330,
19862,
17034,
11,
31... | 1.863785 | 1,226 |
from flask import render_template, redirect, url_for
from app import app
from Team import Team
from HReferenceParser import HReferenceParser
from Schedule import Schedule
from GameLog import GameLog
from Stats import Stats
from BeltGame import BeltGame
season = 2014
availableSeasons = {
2006 : Team('CAR', 'Carolina Hurricanes'),
2007 : Team('ANA', 'Anaheim Ducks'),
2008 : Team('DET', 'Detroit Red Wings'),
2009 : Team('PIT', 'Pittsburgh Penguins'),
2010 : Team('CHI', 'Chicago Blackhawks'),
2011 : Team('BOS', 'Boston Bruins'),
2012 : Team('LAK', 'Los Angeles Kings'),
2013 : Team('CHI', 'Chicago Blackhawks'),
2014 : Team('LAK', 'Los Angeles Kings')
}
@app.route('/<season>')
@app.route('/')
| [
6738,
42903,
1330,
8543,
62,
28243,
11,
18941,
11,
19016,
62,
1640,
198,
6738,
598,
1330,
598,
198,
6738,
4816,
1330,
4816,
198,
6738,
367,
26687,
46677,
1330,
367,
26687,
46677,
198,
6738,
19281,
1330,
19281,
198,
6738,
3776,
11187,
13... | 2.933852 | 257 |
from __future__ import print_function
from cassandra.cluster import Cluster
import os
import sys
import time
from pyspark import SparkContext
from pyspark.streaming import StreamingContext
from pyspark.streaming.kafka import KafkaUtils
csvFields = ["Year", "Month", "DayofMonth", "DayOfWeek", "UniqueCarrier", "Origin", "Dest", "CRSDepTime", "DepDelay", "ArrDelay", "Cancelled", "Diverted"]
rparam="UniqueCarrier"
# state: (rparam -> (num_flights, total_delay))
cassandraSession = None
prepared_stmt = None
c_table_name = "airport_best_airlines"
c_field_name = "airline"
if __name__ == "__main__":
ssc = StreamingContext.getOrCreate("/home/centos/spark-checkpoint",
lambda: createContext())
ssc.start()
ssc.awaitTermination()
print("await done")
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
6738,
30606,
15918,
13,
565,
5819,
1330,
38279,
198,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
640,
198,
198,
6738,
279,
893,
20928,
1330,
17732,
21947,
198,
6738,
279,
893,
20... | 2.618123 | 309 |
# -*- coding: utf-8 -*-
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from .list_rule import (
ListRule,
ALL_RULE_TYPES,
USER_RULE_TYPES,
SERVER_RULE_TYPES,
ROOM_RULE_TYPES,
)
from twisted.internet import defer
from synapse.module_api import run_as_background_process
logger = logging.getLogger("synapse.contrib." + __name__)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
13130,
383,
24936,
13,
2398,
5693,
327,
13,
40,
13,
34,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
... | 3.130137 | 292 |
import RPi.GPIO as GPIO
import time
pin = 15
GPIO.setmode(GPIO.BOARD)
GPIO.setup(pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
try:
state = GPIO.input(pin)
while True:
if GPIO.input(pin) != state:
state = GPIO.input(pin)
if state == 1:
print "it's open!"
else:
print "closed"
time.sleep(1)
except KeyboardInterrupt:
print(" Terminating..")
finally:
GPIO.cleanup()
| [
11748,
25812,
72,
13,
16960,
9399,
355,
50143,
198,
11748,
640,
628,
198,
11635,
796,
1315,
198,
198,
16960,
9399,
13,
2617,
14171,
7,
16960,
9399,
13,
8202,
9795,
8,
198,
16960,
9399,
13,
40406,
7,
11635,
11,
50143,
13,
1268,
11,
2... | 2.280702 | 171 |
import argparse
import threading
import time
# import uuid
from http.server import BaseHTTPRequestHandler, HTTPServer
from typing import Tuple, Union
import consul
import yaml
from consul.base import Check
from logger import create_info_logger
from utils.network import find_open_port, get_ip_address
logger = create_info_logger("registry", "registry.log")
config = None
current_service_id = None
def verify_connection(cfg: EndpointConfig) -> bool:
"""
Verify consul connection
Exceptions throw such as ConnectionError will be captured
"""
if cfg is None:
raise Exception("Configuration is required")
port = cfg.Port
host = cfg.Host
logger.debug('Verifying Consul connection to %s:%s', host, port)
try:
client = consul.Consul(host=host, port=port)
client.agent.self()
return True
except Exception:
pass
return False
def createClient(cfg: EndpointConfig, verify: bool = True) -> Tuple[consul.Consul, bool]:
"""
Create new consul client
"""
if cfg is None:
raise Exception("Configuration is required but got None")
try:
port = cfg.Port
host = cfg.Host
logger.info('Consul Host: %s Port: %s ', host, port)
client = consul.Consul(host=host, port=port)
online = False
if verify:
online = verify_connection(cfg)
logger.debug('Consul online : %s', online)
return client, online
except Exception:
pass
return None, False
def register(service_host, service_port, service_id=None) -> Union[None, str]:
"""
Register new service in consul
"""
logger.info('Registering ServiceHost: %s Port: %s ',
service_host, service_port)
c, online = createClient(config, True)
if not online:
logger.debug('Consul service is offline')
return None
service_name = 'traefik-system-ingress'
service_url = f'http://{service_host}:{service_port}/api'
# TODO : Service ID generation needs to be configurable
# Create new service id, otherwise we will re-register same id
if service_id is None:
# service_id = f'{service_name}@{service_port}#{uuid.uuid4()}'
host = get_ip_address()
service_id = f'{service_name}@{host}:{service_port}'
# service_id = f'{service_name}@{service_port}'
logger.info('Service url: %s', service_url)
logger.info('Service id: %s', service_id)
# TODO: De-registration needs to be configurable
c.agent.service.register(
name=service_name,
service_id=service_id,
port=service_port,
address=service_host,
# check=Check.http(service_url, '10s', deregister='10m'),
check=Check.http(service_url, '10s'),
tags=[
"traefik.enable=true",
"traefik.consulcatalog.connect=false",
"traefik.http.routers.traefik-system-ingress.entrypoints=marie",
"traefik.http.routers.traefik-system-ingress.service=traefik-system-ingress",
"traefik.http.routers.traefik-system-ingress.rule=HostRegexp(`{host:.+}`)",
"traefik.http.services.traefik-system-ingress.loadbalancer.server.scheme=http",
])
return service_id
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# parser.add_argument('--debug-server', type=bool, default=False, required=False, help='Should we start debug webserver')
# parser.add_argument('--port', type=int, default=-1, help='Port number to export (-1 dynamic)')
# parser.add_argument('--ip', type=str, default='127.0.0.1', help='Service IP to expose, blank for dynamic')
# parser.add_argument('--watchdog-interval', type=int, default=60, help='watchdog interval checkin seconds')
parser.add_argument('--config', type=str, default='./config/marie-debug.yml', help='Configuration file')
opt = parser.parse_args()
# Load config
with open(opt.config, "r") as yamlfile:
data = yaml.load(yamlfile, Loader=yaml.FullLoader)
logger.info(f"Config read successfully : {opt.config}")
print(data)
enabled = bool(data['RegistryEnabled'])
if not enabled:
logger.info("registry not enabled, exiting...")
exit()
config = EndpointConfig()
config.Host = data['ConsulEndpoint']['Host']
config.Port = int(data['ConsulEndpoint']['Port'])
config.Scheme = data['ConsulEndpoint']['Scheme']
hostName = data['ServiceEndpoint']['Host']
serverPort = int(data['ServiceEndpoint']['Port'])
watchdog_interval = int(data['WatchdogInterval'])
debug_server = bool(data['DebugWebserver'])
if hostName is None or hostName == '':
hostName = get_ip_address()
if serverPort == -1:
serverPort = find_open_port()
current_service_id = register(
service_host=hostName, service_port=serverPort, service_id=None)
logger.info('Registered service: %s', current_service_id)
watchdog_task = threading.Thread(
target=_target, daemon=debug_server).start()
if debug_server:
start_webserver(hostName, serverPort)
| [
11748,
1822,
29572,
198,
11748,
4704,
278,
198,
11748,
640,
198,
198,
2,
1330,
334,
27112,
198,
6738,
2638,
13,
15388,
1330,
7308,
40717,
18453,
25060,
11,
38288,
18497,
198,
6738,
19720,
1330,
309,
29291,
11,
4479,
198,
198,
11748,
762... | 2.549282 | 2,019 |
from django.shortcuts import render
from django.views.generic import FormView, TemplateView
from .forms import TestForm
from django.contrib import messages
from quantityfield import ureg
# Create your views here.
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
198,
6738,
42625,
14208,
13,
33571,
13,
41357,
1330,
5178,
7680,
11,
37350,
7680,
198,
198,
6738,
764,
23914,
1330,
6208,
8479,
198,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
62... | 3.639344 | 61 |
from __future__ import absolute_import
import json
from engineauth.models import User
from engineauth.strategies.oauth import OAuthStrategy
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
11748,
33918,
198,
198,
6738,
3113,
18439,
13,
27530,
1330,
11787,
198,
6738,
3113,
18439,
13,
2536,
2397,
444,
13,
12162,
1071,
1330,
440,
30515,
13290,
4338,
628
] | 3.837838 | 37 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.mport threading
from unittest import mock
from oslo_config import cfg
import oslotest.base
from designate.notification_handler import fake
from designate.sink import service
from designate.tests import fixtures
from designate.tests import test_notification_handler
CONF = cfg.CONF
| [
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
921,
743,
7330,
198,
2,
257,
4866,
286,
262,
13789,
379,
198,
2,... | 3.811927 | 218 |
from ._element import Element
__all__ = [
"Element",
]
| [
6738,
47540,
30854,
1330,
11703,
198,
198,
834,
439,
834,
796,
685,
198,
220,
220,
220,
366,
20180,
1600,
198,
60,
198
] | 2.727273 | 22 |
#!/usr/bin/env python
#########################################################################################
#
# Parser for PropSeg binary.
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2015 Polytechnique Montreal <www.neuro.polymtl.ca>
# Authors: Benjamin De Leener
# Modified: 2015-03-03
#
# About the license: see the file LICENSE.TXT
#########################################################################################
# TODO: remove temp files in case rescaled is not "1"
import os
import pathlib
import sys
import logging
import numpy as np
from scipy import ndimage as ndi
from spinalcordtoolbox.image import Image, add_suffix, zeros_like, convert
from spinalcordtoolbox.utils.shell import SCTArgumentParser, Metavar, ActionCreateFolder, display_viewer_syntax
from spinalcordtoolbox.utils.sys import init_sct, run_proc, printv, set_global_loglevel
from spinalcordtoolbox.utils.fs import tmp_create, rmtree, extract_fname, mv, copy
from spinalcordtoolbox.centerline import optic
from spinalcordtoolbox.reports.qc import generate_qc
from spinalcordtoolbox.scripts import sct_image
logger = logging.getLogger(__name__)
def check_and_correct_segmentation(fname_segmentation, fname_centerline, folder_output='', threshold_distance=5.0,
remove_temp_files=1, verbose=0):
"""
This function takes the outputs of isct_propseg (centerline and segmentation) and check if the centerline of the
segmentation is coherent with the centerline provided by the isct_propseg, especially on the edges (related
to issue #1074).
Args:
fname_segmentation: filename of binary segmentation
fname_centerline: filename of binary centerline
threshold_distance: threshold, in mm, beyond which centerlines are not coherent
verbose:
Returns: None
"""
printv('\nCheck consistency of segmentation...', verbose)
# creating a temporary folder in which all temporary files will be placed and deleted afterwards
path_tmp = tmp_create(basename="propseg")
im_seg = convert(Image(fname_segmentation))
im_seg.save(os.path.join(path_tmp, "tmp.segmentation.nii.gz"), mutable=True, verbose=0)
im_centerline = convert(Image(fname_centerline))
im_centerline.save(os.path.join(path_tmp, "tmp.centerline.nii.gz"), mutable=True, verbose=0)
# go to tmp folder
curdir = os.getcwd()
os.chdir(path_tmp)
# convert input to RPI (and store original info to use when converting back at the end)
fname_seg_absolute = os.path.abspath(fname_segmentation)
image_input_orientation = im_seg.orientation
sct_image.main("-i tmp.segmentation.nii.gz -setorient RPI -o tmp.segmentation_RPI.nii.gz -v 0".split())
sct_image.main("-i tmp.centerline.nii.gz -setorient RPI -o tmp.centerline_RPI.nii.gz -v 0".split())
# go through segmentation image, and compare with centerline from propseg
im_seg = Image('tmp.segmentation_RPI.nii.gz')
im_centerline = Image('tmp.centerline_RPI.nii.gz')
# Get size of data
printv('\nGet data dimensions...', verbose)
nx, ny, nz, nt, px, py, pz, pt = im_seg.dim
# extraction of centerline provided by isct_propseg and computation of center of mass for each slice
# the centerline is defined as the center of the tubular mesh outputed by propseg.
centerline, key_centerline = {}, []
for i in range(nz):
slice = im_centerline.data[:, :, i]
if np.any(slice):
x_centerline, y_centerline = ndi.measurements.center_of_mass(slice)
centerline[str(i)] = [x_centerline, y_centerline]
key_centerline.append(i)
minz_centerline = np.min(key_centerline)
maxz_centerline = np.max(key_centerline)
mid_slice = int((maxz_centerline - minz_centerline) / 2)
# for each slice of the segmentation, check if only one object is present. If not, remove the slice from segmentation.
# If only one object (the spinal cord) is present in the slice, check if its center of mass is close to the centerline of isct_propseg.
slices_to_remove = [False] * nz # flag that decides if the slice must be removed
for i in range(minz_centerline, maxz_centerline + 1):
# extraction of slice
slice = im_seg.data[:, :, i]
distance = -1
label_objects, nb_labels = ndi.label(slice) # count binary objects in the slice
if nb_labels > 1: # if there is more that one object in the slice, the slice is removed from the segmentation
slices_to_remove[i] = True
elif nb_labels == 1: # check if the centerline is coherent with the one from isct_propseg
x_centerline, y_centerline = ndi.measurements.center_of_mass(slice)
slice_nearest_coord = min(key_centerline, key=lambda x: abs(x - i))
coord_nearest_coord = centerline[str(slice_nearest_coord)]
distance = np.sqrt(((x_centerline - coord_nearest_coord[0]) * px) ** 2 +
((y_centerline - coord_nearest_coord[1]) * py) ** 2 +
((i - slice_nearest_coord) * pz) ** 2)
if distance >= threshold_distance: # threshold must be adjusted, default is 5 mm
slices_to_remove[i] = True
# Check list of removal and keep one continuous centerline (improve this comment)
# Method:
# starting from mid-centerline (in both directions), the first True encountered is applied to all following slices
slice_to_change = False
for i in range(mid_slice, nz):
if slice_to_change:
slices_to_remove[i] = True
elif slices_to_remove[i]:
slice_to_change = True
slice_to_change = False
for i in range(mid_slice, 0, -1):
if slice_to_change:
slices_to_remove[i] = True
elif slices_to_remove[i]:
slice_to_change = True
for i in range(0, nz):
# remove the slice
if slices_to_remove[i]:
im_seg.data[:, :, i] *= 0
# saving the image
im_seg.save('tmp.segmentation_RPI_c.nii.gz')
# replacing old segmentation with the corrected one
sct_image.main('-i tmp.segmentation_RPI_c.nii.gz -setorient {} -o {} -v 0'.
format(image_input_orientation, fname_seg_absolute).split())
os.chdir(curdir)
# display information about how much of the segmentation has been corrected
# remove temporary files
if remove_temp_files:
# printv("\nRemove temporary files...", verbose)
rmtree(path_tmp)
def func_rescale_header(fname_data, rescale_factor, verbose=0):
"""
Rescale the voxel dimension by modifying the NIFTI header qform. Write the output file in a temp folder.
:param fname_data:
:param rescale_factor:
:return: fname_data_rescaled
"""
import nibabel as nib
img = nib.load(fname_data)
# get qform
qform = img.header.get_qform()
# multiply by scaling factor
qform[0:3, 0:3] *= rescale_factor
# generate a new nifti file
header_rescaled = img.header.copy()
header_rescaled.set_qform(qform)
# the data are the same-- only the header changes
img_rescaled = nib.nifti1.Nifti1Image(img.get_data(), None, header=header_rescaled)
path_tmp = tmp_create(basename="propseg")
fname_data_rescaled = os.path.join(path_tmp, os.path.basename(add_suffix(fname_data, "_rescaled")))
nib.save(img_rescaled, fname_data_rescaled)
return fname_data_rescaled
def propseg(img_input, options_dict):
"""
:param img_input: source image, to be segmented
:param options_dict: arguments as dictionary
:return: segmented Image
"""
arguments = options_dict
fname_input_data = img_input.absolutepath
fname_data = os.path.abspath(fname_input_data)
contrast_type = arguments.c
contrast_type_conversion = {'t1': 't1', 't2': 't2', 't2s': 't2', 'dwi': 't1'}
contrast_type_propseg = contrast_type_conversion[contrast_type]
# Starting building the command
cmd = ['isct_propseg', '-t', contrast_type_propseg]
if arguments.o is not None:
fname_out = arguments.o
else:
fname_out = os.path.basename(add_suffix(fname_data, "_seg"))
folder_output = str(pathlib.Path(fname_out).parent)
cmd += ['-o', folder_output]
if not os.path.isdir(folder_output) and os.path.exists(folder_output):
logger.error("output directory %s is not a valid directory" % folder_output)
if not os.path.exists(folder_output):
os.makedirs(folder_output)
if arguments.down is not None:
cmd += ["-down", str(arguments.down)]
if arguments.up is not None:
cmd += ["-up", str(arguments.up)]
remove_temp_files = arguments.r
verbose = int(arguments.v)
# Update for propseg binary
if verbose > 0:
cmd += ["-verbose"]
# Output options
if arguments.mesh is not None:
cmd += ["-mesh"]
if arguments.centerline_binary is not None:
cmd += ["-centerline-binary"]
if arguments.CSF is not None:
cmd += ["-CSF"]
if arguments.centerline_coord is not None:
cmd += ["-centerline-coord"]
if arguments.cross is not None:
cmd += ["-cross"]
if arguments.init_tube is not None:
cmd += ["-init-tube"]
if arguments.low_resolution_mesh is not None:
cmd += ["-low-resolution-mesh"]
# TODO: Not present. Why is this here? Was this renamed?
# if arguments.detect_nii is not None:
# cmd += ["-detect-nii"]
# TODO: Not present. Why is this here? Was this renamed?
# if arguments.detect_png is not None:
# cmd += ["-detect-png"]
# Helping options
use_viewer = None
use_optic = True # enabled by default
init_option = None
rescale_header = arguments.rescale
if arguments.init is not None:
init_option = float(arguments.init)
if init_option < 0:
printv('Command-line usage error: ' + str(init_option) + " is not a valid value for '-init'", 1, 'error')
sys.exit(1)
if arguments.init_centerline is not None:
if str(arguments.init_centerline) == "viewer":
use_viewer = "centerline"
elif str(arguments.init_centerline) == "hough":
use_optic = False
else:
if rescale_header is not 1:
fname_labels_viewer = func_rescale_header(str(arguments.init_centerline), rescale_header, verbose=verbose)
else:
fname_labels_viewer = str(arguments.init_centerline)
cmd += ["-init-centerline", fname_labels_viewer]
use_optic = False
if arguments.init_mask is not None:
if str(arguments.init_mask) == "viewer":
use_viewer = "mask"
else:
if rescale_header is not 1:
fname_labels_viewer = func_rescale_header(str(arguments.init_mask), rescale_header)
else:
fname_labels_viewer = str(arguments.init_mask)
cmd += ["-init-mask", fname_labels_viewer]
use_optic = False
if arguments.mask_correction is not None:
cmd += ["-mask-correction", str(arguments.mask_correction)]
if arguments.radius is not None:
cmd += ["-radius", str(arguments.radius)]
# TODO: Not present. Why is this here? Was this renamed?
# if arguments.detect_n is not None:
# cmd += ["-detect-n", str(arguments.detect_n)]
# TODO: Not present. Why is this here? Was this renamed?
# if arguments.detect_gap is not None:
# cmd += ["-detect-gap", str(arguments.detect_gap)]
# TODO: Not present. Why is this here? Was this renamed?
# if arguments.init_validation is not None:
# cmd += ["-init-validation"]
if arguments.nbiter is not None:
cmd += ["-nbiter", str(arguments.nbiter)]
if arguments.max_area is not None:
cmd += ["-max-area", str(arguments.max_area)]
if arguments.max_deformation is not None:
cmd += ["-max-deformation", str(arguments.max_deformation)]
if arguments.min_contrast is not None:
cmd += ["-min-contrast", str(arguments.min_contrast)]
if arguments.d is not None:
cmd += ["-d", str(arguments["-d"])]
if arguments.distance_search is not None:
cmd += ["-dsearch", str(arguments.distance_search)]
if arguments.alpha is not None:
cmd += ["-alpha", str(arguments.alpha)]
# check if input image is in 3D. Otherwise itk image reader will cut the 4D image in 3D volumes and only take the first one.
image_input = Image(fname_data)
image_input_rpi = image_input.copy().change_orientation('RPI')
nx, ny, nz, nt, px, py, pz, pt = image_input_rpi.dim
if nt > 1:
printv('ERROR: your input image needs to be 3D in order to be segmented.', 1, 'error')
path_data, file_data, ext_data = extract_fname(fname_data)
path_tmp = tmp_create(basename="label_vertebrae")
# rescale header (see issue #1406)
if rescale_header is not 1:
fname_data_propseg = func_rescale_header(fname_data, rescale_header)
else:
fname_data_propseg = fname_data
# add to command
cmd += ['-i', fname_data_propseg]
# if centerline or mask is asked using viewer
if use_viewer:
from spinalcordtoolbox.gui.base import AnatomicalParams
from spinalcordtoolbox.gui.centerline import launch_centerline_dialog
params = AnatomicalParams()
if use_viewer == 'mask':
params.num_points = 3
params.interval_in_mm = 15 # superior-inferior interval between two consecutive labels
params.starting_slice = 'midfovminusinterval'
if use_viewer == 'centerline':
# setting maximum number of points to a reasonable value
params.num_points = 20
params.interval_in_mm = 30
params.starting_slice = 'top'
im_data = Image(fname_data_propseg)
im_mask_viewer = zeros_like(im_data)
# im_mask_viewer.absolutepath = add_suffix(fname_data_propseg, '_labels_viewer')
controller = launch_centerline_dialog(im_data, im_mask_viewer, params)
fname_labels_viewer = add_suffix(fname_data_propseg, '_labels_viewer')
if not controller.saved:
printv('The viewer has been closed before entering all manual points. Please try again.', 1, 'error')
sys.exit(1)
# save labels
controller.as_niftii(fname_labels_viewer)
# add mask filename to parameters string
if use_viewer == "centerline":
cmd += ["-init-centerline", fname_labels_viewer]
elif use_viewer == "mask":
cmd += ["-init-mask", fname_labels_viewer]
# If using OptiC
elif use_optic:
image_centerline = optic.detect_centerline(image_input, contrast_type, verbose)
fname_centerline_optic = os.path.join(path_tmp, 'centerline_optic.nii.gz')
image_centerline.save(fname_centerline_optic)
cmd += ["-init-centerline", fname_centerline_optic]
if init_option is not None:
if init_option > 1:
init_option /= (nz - 1)
cmd += ['-init', str(init_option)]
# enabling centerline extraction by default (needed by check_and_correct_segmentation() )
cmd += ['-centerline-binary']
# run propseg
status, output = run_proc(cmd, verbose, raise_exception=False, is_sct_binary=True)
# check status is not 0
if not status == 0:
printv('Automatic cord detection failed. Please initialize using -init-centerline or -init-mask (see help)',
1, 'error')
sys.exit(1)
# build output filename
fname_seg = os.path.join(folder_output, fname_out)
fname_centerline = os.path.join(folder_output, os.path.basename(add_suffix(fname_data, "_centerline")))
# in case header was rescaled, we need to update the output file names by removing the "_rescaled"
if rescale_header is not 1:
mv(os.path.join(folder_output, add_suffix(os.path.basename(fname_data_propseg), "_seg")),
fname_seg)
mv(os.path.join(folder_output, add_suffix(os.path.basename(fname_data_propseg), "_centerline")),
fname_centerline)
# if user was used, copy the labelled points to the output folder (they will then be scaled back)
if use_viewer:
fname_labels_viewer_new = os.path.join(folder_output, os.path.basename(add_suffix(fname_data,
"_labels_viewer")))
copy(fname_labels_viewer, fname_labels_viewer_new)
# update variable (used later)
fname_labels_viewer = fname_labels_viewer_new
# check consistency of segmentation
if arguments.correct_seg:
check_and_correct_segmentation(fname_seg, fname_centerline, folder_output=folder_output, threshold_distance=3.0,
remove_temp_files=remove_temp_files, verbose=verbose)
# copy header from input to segmentation to make sure qform is the same
printv("Copy header input --> output(s) to make sure qform is the same.", verbose)
list_fname = [fname_seg, fname_centerline]
if use_viewer:
list_fname.append(fname_labels_viewer)
for fname in list_fname:
im = Image(fname)
im.header = image_input.header
im.save(dtype='int8') # they are all binary masks hence fine to save as int8
return Image(fname_seg)
if __name__ == "__main__":
init_sct()
main(sys.argv[1:])
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
29113,
29113,
14468,
7804,
2,
198,
2,
198,
2,
23042,
263,
329,
8772,
41030,
13934,
13,
198,
2,
198,
2,
16529,
19351,
6329,
198,
2,
15069,
357,
66,
8,
1853,
12280,
23873,
2350,
12871,... | 2.438349 | 7,218 |
# <<BEGIN-copyright>>
# Copyright 2021, Lawrence Livermore National Security, LLC.
# See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
# <<END-copyright>>
from fudge import outputChannel as outputChannelModule
from fudge.productData.distributions import angular as angularModule
#
# XYs1d
#
angularModule.XYs1d.toENDL = toENDL
#
# XYs2d
#
angularModule.XYs2d.toENDL = toENDL
#
# isotropic2d
#
angularModule.isotropic2d.toENDL = toENDL
#
# recoil
#
angularModule.recoil.toENDL = toENDL
#
# twoBodyForm
#
angularModule.twoBodyForm.toENDL = toENDL
#
# form
#
angularModule.form.toENDL = toENDL
| [
2,
9959,
33,
43312,
12,
22163,
4766,
4211,
198,
2,
15069,
33448,
11,
13914,
45036,
3549,
2351,
4765,
11,
11419,
13,
198,
2,
4091,
262,
1353,
12,
5715,
27975,
38162,
9947,
2393,
329,
3307,
13,
198,
2,
220,
198,
2,
30628,
55,
12,
34... | 2.582329 | 249 |
game = {
'TRAINING': True,
'WND_WIDTH': 500,
'WND_HEIGHT': 500,
'SCOREBAR_HEIGHT': 30,
# colors
'WHITE': (255, 255, 255),
'BLACK': (0, 0, 0),
# game objects
'BALL_SZ': 9,
'PAD_H': 45,
'PAD_W': 15
}
game['BALL_XSPD'] = game['WND_WIDTH'] / 160
game['BALL_XSTR'] = 0.5 * game['WND_WIDTH']
game['BALL_YSTR'] = 0.5 * (game['WND_HEIGHT'] - game['SCOREBAR_HEIGHT']) + SCOREBAR_HEIGHT
game['PAD_SPEED'] = game['WINDOW_HEIGHT'] / 105
game['PAD_START'] = (game['WND_WIDTH'] - game['SCOREBAR_HEIGHT']) / 2
game['AI_PAD_X'] = game['WND_WIDTH'] - game['PADDLE_W'] - 10
game['PLY_PAD_X'] = 10
DQN = {
'STATE_SIZE': 8,
'ACT_SIZE': 3
}
| [
6057,
796,
1391,
198,
220,
220,
220,
705,
51,
3861,
1268,
2751,
10354,
6407,
11,
628,
220,
220,
220,
705,
54,
8575,
62,
54,
2389,
4221,
10354,
5323,
11,
198,
220,
220,
220,
705,
54,
8575,
62,
13909,
9947,
10354,
5323,
11,
198,
220... | 1.918079 | 354 |
"""
Write a program that accepts 10 integers from a user into an array,
and count the number of occurrences of all present prime numbers.
"""
if __name__ == "__main__":
values = list()
for i in range(10):
x = int(input(f"Enter the list value { i + 1 }: "))
values.append(x)
count_occurrences(values)
| [
37811,
198,
220,
220,
220,
19430,
257,
1430,
326,
18178,
838,
37014,
422,
257,
2836,
656,
281,
7177,
11,
220,
198,
220,
220,
220,
290,
954,
262,
1271,
286,
40279,
286,
477,
1944,
6994,
3146,
13,
198,
37811,
198,
198,
361,
11593,
367... | 2.653846 | 130 |
"""
Language: Python
Written by: Mostofa Adib Shakib
Video Explanation: https://www.youtube.com/watch?v=CWDQJGaN1gY
Further Reading: https://www.geeksforgeeks.org/binary-indexed-tree-or-fenwick-tree-2/
https://www.topcoder.com/community/competitive-programming/tutorials/binary-indexed-trees/
Binary Index Tree or Fenwick Tree
The size of the BITree is one more than the size of the input array
Time Complexity:
Construction: O(nlogn)
Update BIT: O(logn)
Get Sum (0 to n): O(logn)
getParent:
=> Find 2's complement
=> "AND" the previous numbr with the original number
=> "Subtract" the previous number from the original number
getSum:
=> Find 2's complement
=> "AND" the previous numbr with the original number
=> "Add" the previous number from the original number
"""
# Returns sum of arr[0..index]. This function assumes
# that the array is preprocessed and partial sums of
# array elements are stored in BITree[].
# Updates a node in Binary Index Tree (BITree) at given index
# in BITree. The given value 'val' is added to BITree[i] and
# all of its ancestors in tree.
# Constructs and returns a Binary Indexed Tree for given array of size n. | [
37811,
198,
32065,
25,
11361,
198,
25354,
416,
25,
4042,
1659,
64,
1215,
571,
35274,
571,
198,
10798,
50125,
341,
25,
3740,
1378,
2503,
13,
11604,
13,
785,
14,
8340,
30,
85,
28,
34,
22332,
48,
41,
35389,
45,
16,
70,
56,
198,
13518... | 3.022333 | 403 |
import json
from collections import OrderedDict
from pathlib import Path
| [
11748,
33918,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
6738,
3108,
8019,
1330,
10644,
198
] | 4.294118 | 17 |
#!/usr/bin/python3
# Author: Kenta Ishii
# SPDX short identifier: BSD-3-Clause
# ./dmx512.py
import RPi.GPIO as gpio
import threading
class DMX512:
"""Dependency:RPi.GPIO, threading"""
if __name__ == '__main__':
import sys
import time
import signal
version_info = "DMX512 Alpha"
signal.signal(signal.SIGINT, handle_sigint)
argv = sys.argv
if len(argv) == 1:
time_delay = 4
else:
time_delay = float(argv[1])
print(sys.version)
# Call Class
dmx512 = DMX512([12,16,19,20,21,26], 6, 13)
# Initialization of Flushing Method
list_data = [0x1F, 0x14, 0x1B, 0x11, 0x00, 0x13]
thread1 = dmx512.start_tx(list_data, 0, 6, time_delay)
thread1.join()
# Set Initial Values and Start
list_data = [1] * 1026
thread1 = dmx512.start_tx(list_data, 0, 1026, time_delay)
thread1.join()
# Start DMX512 Transmission
list_data = [0x1D, 0x1A]
thread1 = dmx512.start_tx(list_data, 0, 2, time_delay)
thread1.join()
status_gpio_eop_toggle = dmx512.eop_toggle()
count = 2
while True:
list_data = [count] * 1026
thread1 = dmx512.start_tx(list_data, 0, 1026, time_delay)
thread1.join()
count += 1
if count > 0xF:
count = 0;
break
while True:
if status_gpio_eop_toggle != dmx512.eop_toggle():
status_gpio_eop_toggle = dmx512.eop_toggle()
break
#if gpio.event_detected(num_gpio_eop_toggle) == 1:
# break
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
198,
2,
6434,
25,
8758,
64,
24303,
4178,
198,
2,
30628,
55,
1790,
27421,
25,
347,
10305,
12,
18,
12,
2601,
682,
198,
2,
24457,
36020,
87,
25836,
13,
9078,
198,
198,
11748,
25812,
72,
... | 2.030144 | 763 |
#!/usr/bin/env python
# coding: utf-8
# In[4]:
import serial
import time
import re
from datetime import datetime
import subprocess
import os
import urllib.request
# In[5]:
#Generic AT
CR = '\r\n'
ENABLE_AT='ATE1'
#Filesystem AT commands
UPLOAD_FILE='AT+QFUPL'
DELETE_FILE='AT+QFDEL'
LIST_FILES='AT+QFLST'
LIST_FILES_RAM=bytes('AT+QFLST="RAM:*"\r\n','utf-8')
#GPS AT commands
GPS_ENGINE='AT+QGPS'
XTRA='AT+QGPSXTRA'
XTRA_TIME='AT+QGPSXTRATIME'
XTRA_DATA='AT+QGPSXTRADATA'
END_SESSION='AT+QGPSEND'
# In[6]:
# In[7]:
# In[8]:
# In[9]:
# In[10]:
# In[11]:
# In[12]:
# In[13]:
# In[20]:
# In[15]:
# In[16]:
if __name__ == "__main__" and '__file__' in globals():
configure_xtra_gnss()
exit(0)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
2,
554,
58,
19,
5974,
628,
198,
11748,
11389,
198,
11748,
640,
198,
11748,
302,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
850,
... | 1.986911 | 382 |
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
email = ''
password = ''
auth = ''
start_date = '2020-05-01'
end_date = '2020-11-30'
driver = webdriver.Chrome("C:/workspace/python/chromedriver/chromedriver.exe")
driver.implicitly_wait(10)
driver.get("https://hd.mobileindex.com/member/login?url=https%3A%2F%2Fhd.mobileindex.com%2F")
import pandas as pd
dates = pd.date_range(start = start_date,end = end_date).tolist()
dates = [pd.Timestamp.strftime(x, '%Y-%m-%d') for x in dates]
log_in_mobile_index(driver)
go_to_rank(driver)
for date in dates:
get_daily_rank(driver, date)
| [
6738,
384,
11925,
1505,
1330,
3992,
26230,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
11321,
13,
13083,
1330,
26363,
198,
11748,
640,
198,
198,
12888,
796,
10148,
198,
28712,
796,
10148,
198,
18439,
796,
10148,
198,
9688,
62,
44... | 2.569672 | 244 |
import discord
from discord.ext import commands, tasks
import os, sys, inspect
import asyncio
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parent_dir = os.path.dirname(current_dir)
sys.path.insert(0, parent_dir)
import botlib
# import wowapi
DEVMODE = os.getenv("DEVMODE") == "TRUE" # Boolean flag for devmode
ENVVERSION = os.getenv("ENV_VERSION") # Local .env or server vars
COMMAND_PREFIX = os.getenv("COMMAND_PREFIX") # Bot command prefix
class Core(commands.Cog):
"""
Core bot discord functions
"""
## On_Ready event for cog
@commands.Cog.listener()
# region HelpCommand
# HELP
# @bot.command()
# async def help(ctx):
# author = ctx.message.author
# embed = discord.Embed(color=discord.Color.orange())
# embed.set_author(name="Help")
# # embed.add_field(
# # name=".ping", value="Returns Pong to check bot latency.", inline=False
# # )
# embed.add_field(
# name=".mats or .raidmats",
# value="Current Auction House pricing on common raid mats.",
# inline=False,
# )
# embed.add_field(
# name=".lpc or .legendaries",
# value=".lpc [armorType] - Auction House pricing on legendary base armors.",
# inline=False,
# )
# embed.add_field(
# name=".tc",
# value=".tc - Shows current Twisting Corridors achievement for team.",
# inline=False,
# )
# embed.add_field(
# name=".gvault or .gv",
# value="Shows current Great Vault loot from M+ keys.",
# inline=False,
# )
# embed.add_field(
# name=".bestruns or .br",
# value="Shows best timed mythic runs for season, all members.",
# inline=False,
# )
# embed.add_field(
# name=".team or .raidteam",
# value="team [update] - List current team members data. Update is Optional.",
# inline=False,
# )
# embed.add_field(
# name=".add_member",
# value="add_member <playername> [<realm>] Add new member. Realm defaults to silver-hand.",
# inline=False,
# )
# embed.add_field(
# name=".remove_member",
# value="remove_member <playername> Remove member.",
# inline=False,
# )
# embed.add_field(
# name=".change_member_role",
# value="change_member_role <playername> Change member role.",
# inline=False,
# )
# embed.add_field(
# name=".rules", value="Guild rules to live by. Esp rule #1.", inline=False
# )
# embed.add_field(
# name=".clean",
# value="Cleans all AzsocamiBot messages and commands from channel.",
# inline=False,
# )
# embed.add_field(
# name=".cleanbot",
# value="Cleans certain bot messages and commands from channel.",
# inline=False,
# )
# embed.add_field(
# name=".changelog",
# value="AzsocamiBot change log.",
# inline=False,
# )
# embed.add_field(
# name=".version",
# value="AzsocamiBot version info.",
# inline=False,
# )
# await ctx.send(embed=embed)
# if author.name.lower() == "aaryn":
# embed2 = discord.Embed(color=discord.Color.orange())
# embed2.set_author(name="Admin Only Commands")
# # embed2.add_field(
# # name=".db_members", value="ADMIN: List members database rows.", inline=False
# # )
# embed2.add_field(
# name=".get_table_contents",
# value="ADMIN: get_table_contents <tablename> List table contents.",
# inline=False,
# )
# embed2.add_field(
# name=".get_table_structure",
# value="ADMIN: get_table_structure <tablename> List table structure.",
# inline=False,
# )
# embed2.add_field(
# name=".add_item",
# value="ADMIN: add_item <ItemID> Add itemid to raidmats.",
# inline=False,
# )
# embed2.add_field(
# name=".remove_item",
# value="ADMIN: remove_item <ItemID> Remove itemid from raidmats.",
# inline=False,
# )
# await ctx.send(embed=embed2)
# endregion
@commands.command()
async def rules(self, ctx):
""" Rules to live by """
msg = """
**Rule #1: It's Ben's fault. Always.**
Rule #2: Be respectful to one another.
Rule #3: No Politics and No Religion talk.
**Rule #4: Keep voice chatter to a minimum during boss pulls.**
Rule #5: Thou shall not upset thy tank or thy healer.
"""
await ctx.send(msg)
@commands.command()
async def ping(self, ctx):
""" Generic latency test for bot """
await ctx.send(f"🏓 Pong with {str(round(self.client.latency, 2))} seconds.")
@commands.command(name="whoami", hidden=True)
@commands.command()
async def clean(self, ctx, number=50):
""" Clean <number=50> AzsocamiBot commands and responses from channel """
mgs = []
number = int(number)
cleaned = 0
async for x in ctx.message.channel.history(limit=number):
if x.author.id == self.client.user.id:
mgs.append(x)
cleaned += 1
# print(x)
if x.content[:1] == COMMAND_PREFIX:
mgs.append(x)
cleaned += 1
# print(x.content[:1])
await ctx.message.channel.delete_messages(mgs)
print(f"Removed {cleaned} messages and commands.")
@commands.command()
async def cleanbot(self, ctx, number=50):
""" Clean <number=50> bot commands and responses from channel """
mgs = []
number = int(number)
cleaned = 0
# M+ bot, this bot,
botsList = [378005927493763074, self.client.user.id]
prefixList = [".", "*", "!", ";"]
async for x in ctx.message.channel.history(limit=number):
if x.author.id in botsList:
mgs.append(x)
cleaned += 1
# print(x)
elif x.content[:1] in prefixList:
mgs.append(x)
cleaned += 1
# print(x.content[:1])
await ctx.message.channel.delete_messages(mgs)
print(f"Removed {cleaned} messages and commands.")
###################################################################
###################################################################
## ##
## BACKGROUND TASKS ##
## ##
###################################################################
###################################################################
@tasks.loop(minutes=15)
###################################################################
###################################################################
## ##
## NOT IMPLEMENTED YET ##
## ##
###################################################################
###################################################################
# region NotImplemented
# @commands.command()
# @commands.is_owner()
# async def status(self, ctx):
# msg = f"AzsocamiBot version {VERSION}, released {VERSIONDATE}.\n"
# # msg += "Bot running as "
# # if TESTBOT:
# # msg += "TEST BOT.\n"
# # else:
# # msg += "PRODUCTION BOT.\n"
# # msg += f"Server Timezone: {time.tzname}\n"
# # msg += f"Server Time: {datetime.datetime.now().strftime(TIMEFORMAT)}\n"
# msg += f"Bot Local Time: {botlib.localNow()}\n"
# msg += f"Bot source is at https://github.com/bryanpalmer/AzsocamiBot\n"
# msg += f"Bot running on heroku.com\n"
# await ctx.send(msg)
# endregion
## Initialize cog
| [
11748,
36446,
198,
6738,
36446,
13,
2302,
1330,
9729,
11,
8861,
198,
11748,
28686,
11,
25064,
11,
10104,
198,
11748,
30351,
952,
198,
198,
14421,
62,
15908,
796,
28686,
13,
6978,
13,
15908,
3672,
7,
418,
13,
6978,
13,
397,
2777,
776,
... | 2.076312 | 4,154 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
ZetCode PyQt5 tutorial
This program creates a quit
button. When we press the button,
the application terminates.
author: Jan Bodnar
website: zetcode.com
last edited: January 2015
"""
import sys
from PyQt5.QtWidgets import QWidget, QPushButton, QApplication
from PyQt5.QtCore import QCoreApplication
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_()) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
57,
316,
10669,
9485,
48,
83,
20,
11808,
220,
198,
198,
1212,
1430,
8075,
257,
11238,
198,
16539... | 2.445545 | 202 |
from storages.backends.s3boto3 import S3Boto3Storage
| [
6738,
336,
273,
1095,
13,
1891,
2412,
13,
82,
18,
65,
2069,
18,
1330,
311,
18,
33,
2069,
18,
31425,
628,
198
] | 2.5 | 22 |
import pandas as pd
import numpy as np; np.random.seed(0)
import seaborn as sns; sns.set_theme()
from matplotlib import pyplot as plt
file=pd.read_csv('/home/Documents/Acads/CompGenomics/Proj_ComparitiveGenomics/vf_merged_results.tsv', sep='\t')
VF=list(file['Virulence factor'])
Is=list(file['Isolate'])
genes=['eae', 'iss', 'tir', 'espA', 'espB', 'espJ', 'etpD', 'iha', 'ehxA', 'nleA', 'nleB', 'astA', 'espP', 'gad', 'katP', 'nleC', 'toxB', 'espF','efa1' , 'stx1A', 'stx1B','espI']
isolates=['CGT1009', 'CGT1020', 'CGT1058', 'CGT1084', 'CGT1174', 'CGT1197', 'CGT1219', 'CGT1238', 'CGT1283', 'CGT1317', 'CGT1323', 'CGT1327', 'CGT1342', 'CGT1394', 'CGT1408', 'CGT1417', 'CGT1428', 'CGT1436', 'CGT1440', 'CGT1447', 'CGT1459', 'CGT1473', 'CGT1493', 'CGT1500', 'CGT1511', 'CGT1519', 'CGT1531', 'CGT1568', 'CGT1600', 'CGT1602', 'CGT1606', 'CGT1615', 'CGT1621', 'CGT1777', 'CGT1778', 'CGT1783', 'CGT1795', 'CGT1808', 'CGT1833', 'CGT1834', 'CGT1837', 'CGT1841', 'CGT1858', 'CGT1946', 'CGT1960', 'CGT1976', 'CGT1985', 'CGT1989', 'CGT1991', 'CGT1992']
isolates_tree=['CGT1837', 'CGT1602', 'CGT1317', 'CGT1447', 'CGT1323', 'CGT1992', 'CGT1976', 'CGT1621', 'CGT1473', 'CGT1327', 'CGT1778', 'CGT1197', 'CGT1238', 'CGT1615', 'CGT1219', 'CGT1020', 'CGT1009', 'CGT1783', 'CGT1394', 'CGT1833', 'CGT1600', 'CGT1408', 'CGT1568', 'CGT1500', 'CGT1342', 'CGT1858', 'CGT1283', 'CGT1991', 'CGT1985', 'CGT1841', 'CGT1459', 'CGT1795', 'CGT1531', 'CGT1436', 'CGT1519', 'CGT1440', 'CGT1834', 'CGT1058', 'CGT1606', 'CGT1511', 'CGT1417', 'CGT1084', 'CGT1777', 'CGT1960', 'CGT1808', 'CGT1428', 'CGT1989', 'CGT1174', 'CGT1946', 'CGT1493']
toplot=[[0 for j in range(22)] for i in range(50)]
for i in range(50):
print(i)
if isolates_tree[i] !='CGT1992':
neighbour=isolates[isolates.index(isolates_tree[i])+1]
setofgenes=VF[Is.index(isolates_tree[i]):Is.index(neighbour)]
else:
setofgenes=VF[Is.index(isolates_tree[i]):]
for j in range(22):
if genes[j] in setofgenes:
toplot[i][j]=1
xlabels=genes
ylabels=isolates_tree
ax = sns.heatmap(toplot, xticklabels=xlabels, yticklabels=ylabels)
plt.show()
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
26,
45941,
13,
25120,
13,
28826,
7,
15,
8,
198,
11748,
384,
397,
1211,
355,
3013,
82,
26,
3013,
82,
13,
2617,
62,
43810,
3419,
198,
6738,
2603,
29487,
8019,
1330,
... | 2 | 1,048 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
Postprocessing.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '176c06ceefb5f555205e72b20c962740cc0ec183'
import os
import traceback
from qgis.PyQt.QtWidgets import QApplication
from qgis.PyQt.QtCore import QCoreApplication
from qgis.core import (Qgis,
QgsProject,
QgsProcessingFeedback,
QgsProcessingUtils,
QgsMapLayerType,
QgsWkbTypes,
QgsMessageLog,
QgsProviderRegistry,
QgsExpressionContext,
QgsExpressionContextScope)
from processing.core.ProcessingConfig import ProcessingConfig
from processing.gui.RenderingStyles import RenderingStyles
def set_layer_name(layer, context_layer_details):
"""
Sets the name for the given layer, either using the layer's file name
(or database layer name), or the name specified by the parameter definition.
"""
use_filename_as_layer_name = ProcessingConfig.getSetting(ProcessingConfig.USE_FILENAME_AS_LAYER_NAME)
if use_filename_as_layer_name or not context_layer_details.name:
source_parts = QgsProviderRegistry.instance().decodeUri(layer.dataProvider().name(), layer.source())
layer_name = source_parts.get('layerName', '')
# if source layer name exists, use that -- else use
if layer_name:
layer.setName(layer_name)
else:
path = source_parts.get('path', '')
if path:
layer.setName(os.path.splitext(os.path.basename(path))[0])
elif context_layer_details.name:
# fallback to parameter's name -- shouldn't happen!
layer.setName(context_layer_details.name)
else:
layer.setName(context_layer_details.name)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
17174,
17174,
4557,
8162,
198,
220,
220,
220,
2947,
36948,
13,
9078,
198,
220,
220,
220,
41436,
12,
198,
220,
220,
220,
7536,
220,
220,
220,
220,
220,... | 2.276137 | 1,253 |
"""
Functions to plot Pyrado policies
"""
import numpy as np
import torch.nn as nn
from matplotlib import ticker, colorbar
from matplotlib import pyplot as plt
from typing import Any
import pyrado
from pyrado.plotting.utils import AccNorm
from pyrado.policies.adn import ADNPolicy
from pyrado.policies.base import Policy
from pyrado.policies.neural_fields import NFPolicy
from pyrado.utils.data_types import EnvSpec
from pyrado.utils.input_output import ensure_no_subscript, ensure_math_mode, print_cbt
def _annotate_img(img,
data: [list, np.ndarray] = None,
thold_lo: float = None,
thold_up: float = None,
valfmt: str = '{x:.2f}',
textcolors: tuple = ('white', 'black'),
**textkw: Any):
"""
Annotate a given image.
.. note::
The text color changes based on thresholds which only make sense for symmetric color maps.
:param mg: AxesImage to be labeled.
:param data: data used to annotate. If None, the image's data is used.
:param thold_lo: lower threshold for changing the color
:param thold_up: upper threshold for changing the color
:param valfmt: format of the annotations inside the heat map. This should either use the string format method, e.g.
'$ {x:.2f}', or be a :class:matplotlib.ticker.Formatter.
:param textcolors: two color specifications. The first is used for values below a threshold,
the second for those above.
:param textkw: further arguments passed on to the created text labels
"""
if not isinstance(data, (list, np.ndarray)):
data = img.get_array()
# Normalize the threshold to the images color range
if thold_lo is None:
thold_lo = data.min()*0.5
if thold_up is None:
thold_up = data.max()*0.5
# Set default alignment to center, but allow it to be overwritten by textkw
kw = dict(horizontalalignment='center', verticalalignment='center')
kw.update(textkw)
# Get the formatter in case a string is supplied
if isinstance(valfmt, str):
valfmt = ticker.StrMethodFormatter(valfmt)
# Loop over the data and create a text for each 'pixel'.
texts = []
for i in range(data.shape[0]):
for j in range(data.shape[1]):
kw.update(color=textcolors[thold_lo < data[i, j] < thold_up]) # if true then use second color
text = img.axes.text(j, i, valfmt(data[i, j], None), **kw)
texts.append(text)
def render_policy_params(policy: Policy,
env_spec: EnvSpec,
cmap_name: str = 'RdBu',
ax_hm: plt.Axes = None,
annotate: bool = True,
annotation_valfmt: str = '{x:.2f}',
colorbar_label: str = '',
xlabel: str = None,
ylabel: str = None,
) -> plt.Figure:
"""
Plot the weights and biases as images, and a color bar.
.. note::
If you want to have a tight layout, it is best to pass axes of a figure with `tight_layout=True` or
`constrained_layout=True`.
:param policy: policy to visualize
:param env_spec: environment specification
:param cmap_name: name of the color map, e.g. 'inferno', 'RdBu', or 'viridis'
:param ax_hm: axis to draw the heat map onto, if equal to None a new figure is opened
:param annotate: select if the heat map should be annotated
:param annotation_valfmt: format of the annotations inside the heat map, irrelevant if annotate = False
:param colorbar_label: label for the color bar
:param xlabel: label for the x axis
:param ylabel: label for the y axis
:return: handles to figures
"""
if not isinstance(policy, nn.Module):
raise pyrado.TypeErr(given=policy, expected_type=nn.Module)
cmap = plt.get_cmap(cmap_name)
# Create axes and subplots depending on the NN structure
num_rows = len(list(policy.parameters()))
fig = plt.figure(figsize=(14, 10), tight_layout=False)
gs = fig.add_gridspec(num_rows, 2, width_ratios=[14, 1]) # right column is the color bar
ax_cb = fig.add_subplot(gs[:, 1])
# Accumulative norm for the colors
norm = AccNorm()
for i, (name, param) in enumerate(policy.named_parameters()):
# Create current axis
ax = plt.subplot(gs[i, 0])
ax.set_title(name.replace('_', '\_'))
# Convert the data and plot the image with the colors proportional to the parameters
if param.ndim == 3:
# For example convolution layers
param = param.flatten(0)
print_cbt(f'Flattened the first dimension of the {name} parameter tensor.', 'y')
data = np.atleast_2d(param.detach().numpy())
img = plt.imshow(data, cmap=cmap, norm=norm, aspect='auto', origin='lower')
if annotate:
_annotate_img(
img,
thold_lo=0.75*min(policy.param_values).detach().numpy(),
thold_up=0.75*max(policy.param_values).detach().numpy(),
valfmt=annotation_valfmt
)
# Prepare the ticks
if isinstance(policy, ADNPolicy):
if name == 'obs_layer.weight':
ax.set_xticks(np.arange(env_spec.obs_space.flat_dim))
ax.set_yticks(np.arange(env_spec.act_space.flat_dim))
ax.set_xticklabels(ensure_no_subscript(env_spec.obs_space.labels))
ax.set_yticklabels(ensure_math_mode(env_spec.act_space.labels))
elif name in ['obs_layer.bias', 'nonlin_layer.log_weight', 'nonlin_layer.bias']:
ax.set_xticks(np.arange(env_spec.act_space.flat_dim))
ax.set_xticklabels(ensure_math_mode(env_spec.act_space.labels))
ax.yaxis.set_major_locator(ticker.NullLocator())
ax.yaxis.set_minor_formatter(ticker.NullFormatter())
elif name == 'prev_act_layer.weight':
ax.set_xticks(np.arange(env_spec.act_space.flat_dim))
ax.set_yticks(np.arange(env_spec.act_space.flat_dim))
ax.set_xticklabels(ensure_math_mode(env_spec.act_space.labels))
ax.set_yticklabels(ensure_math_mode(env_spec.act_space.labels))
elif name in ['_log_tau', '_log_kappa', '_log_capacity']:
ax.xaxis.set_major_locator(ticker.NullLocator())
ax.yaxis.set_major_locator(ticker.NullLocator())
ax.xaxis.set_minor_formatter(ticker.NullFormatter())
ax.yaxis.set_minor_formatter(ticker.NullFormatter())
else:
ax.xaxis.set_major_locator(ticker.MaxNLocator(integer=True))
ax.yaxis.set_major_locator(ticker.MaxNLocator(integer=True))
elif isinstance(policy, NFPolicy):
if name == 'obs_layer.weight':
ax.set_xticks(np.arange(env_spec.obs_space.flat_dim))
ax.yaxis.set_major_locator(ticker.NullLocator())
ax.set_xticklabels(ensure_no_subscript(env_spec.obs_space.labels))
ax.yaxis.set_minor_formatter(ticker.NullFormatter())
elif name in ['_log_tau', '_potentials_init', 'resting_level', 'obs_layer.bias', 'conv_layer.weight',
'nonlin_layer.log_weight', 'nonlin_layer.bias']:
ax.xaxis.set_major_locator(ticker.NullLocator())
ax.yaxis.set_major_locator(ticker.NullLocator())
ax.xaxis.set_minor_formatter(ticker.NullFormatter())
ax.yaxis.set_minor_formatter(ticker.NullFormatter())
elif name == 'act_layer.weight':
ax.xaxis.set_major_locator(ticker.NullLocator())
ax.set_yticks(np.arange(env_spec.act_space.flat_dim))
ax.xaxis.set_minor_formatter(ticker.NullFormatter())
ax.set_yticklabels(ensure_math_mode(env_spec.act_space.labels))
else:
ax.xaxis.set_major_locator(ticker.MaxNLocator(integer=True))
ax.yaxis.set_major_locator(ticker.MaxNLocator(integer=True))
# Add the color bar (call this within the loop to make the AccNorm scan every image)
colorbar.ColorbarBase(ax_cb, cmap=cmap, norm=norm, label=colorbar_label)
# Increase the vertical white spaces between the subplots
plt.subplots_adjust(hspace=.7, wspace=0.1)
# Set the labels
if xlabel is not None:
ax_hm.set_xlabel(xlabel)
if ylabel is not None:
ax_hm.set_ylabel(ylabel)
return fig
| [
37811,
198,
24629,
2733,
284,
7110,
27958,
4533,
4788,
198,
37811,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
6738,
2603,
29487,
8019,
1330,
4378,
263,
11,
3124,
5657,
198,
6738,
2603,
29487,
80... | 2.163877 | 4,003 |
from ....models.models import Topic
from ....permissions.permissions import Permissions
from ...generics.delete import DeleteAction
from ...util.default_schema import DefaultSchema
from ...util.register import register_action
@register_action("topic.delete")
class TopicDelete(DeleteAction):
"""
Action to delete simple topics that can be shown in the agenda.
"""
model = Topic()
schema = DefaultSchema(Topic()).get_delete_schema()
permission = Permissions.AgendaItem.CAN_MANAGE
| [
6738,
19424,
27530,
13,
27530,
1330,
47373,
198,
6738,
19424,
525,
8481,
13,
525,
8481,
1330,
2448,
8481,
198,
6738,
2644,
8612,
873,
13,
33678,
1330,
23520,
12502,
198,
6738,
2644,
22602,
13,
12286,
62,
15952,
2611,
1330,
15161,
27054,
... | 3.538462 | 143 |
import os
import re
import tempfile
from subprocess import call
from colorama import Fore
from getgauge.api import get_step_value
registry = Registry()
| [
11748,
28686,
198,
11748,
302,
198,
11748,
20218,
7753,
198,
6738,
850,
14681,
1330,
869,
198,
198,
6738,
3124,
1689,
1330,
4558,
198,
198,
6738,
651,
70,
559,
469,
13,
15042,
1330,
651,
62,
9662,
62,
8367,
628,
628,
628,
198,
198,
... | 3.354167 | 48 |
# Generated by Django 2.2.24 on 2022-03-07 10:56
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
1731,
319,
33160,
12,
3070,
12,
2998,
838,
25,
3980,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.8 | 30 |
# import resource
from typing import List, Tuple
from matflow.database.ServerData import ServerData
from matflow.frontendapi import keys
from matflow.hardwareadministration.Server import Server
import requests
from requests.auth import HTTPBasicAuth
| [
2,
1330,
8271,
198,
6738,
19720,
1330,
7343,
11,
309,
29291,
198,
198,
6738,
2603,
11125,
13,
48806,
13,
10697,
6601,
1330,
9652,
6601,
198,
6738,
2603,
11125,
13,
8534,
437,
15042,
1330,
8251,
198,
6738,
2603,
11125,
13,
10424,
1574,
... | 4.2 | 60 |
"""Calculations and Fucntions."""
import setup as set
def player_statcalc(player):
"""Print this."""
player_prop = []
player_res = []
player_num = []
for letter in player:
prob = set.letter_probs[letter]
resIndex = set.resource_position[letter]
res = set.resource_index[resIndex]
num = set.letter_num[letter]
player_prop.append(prob)
player_res.append(res)
player_num.append(num)
print(player_prop)
print(player_res)
print(player_num)
def player_odds(player):
"""Print this."""
total = 0.0
if "B" in player:
total += 1
if "D" in player or "Q" in player:
total += 2
if "J" in player or "N" in player:
total += 3
if "A" in player or "O" in player:
total += 4
if "C" in player or "P" in player:
total += 5
if "E" in player or "K" in player:
total += 5
if "G" in player or "M" in player:
total += 4
if "F" in player or "L" in player:
total += 3
if "I" in player or "R" in player:
total += 2
if "H" in player:
total += 1
return total / 36
def player_resOdds(player, resNum):
"""Print this."""
resource_list = []
for letter in player:
if set.resource_position[letter] == resNum:
resource_list.append(letter)
return str(player_odds(resource_list))
def add_settle(player, settles):
"""Print me."""
print(settles)
for letter in settles:
player.append(letter)
def dice_roll(roll):
"""Print me."""
for letter in set.letter_num:
if set.letter_num[letter] == roll:
for ownership in set.red_settle:
if ownership == letter:
set.red_hand.append(set.resource_position[letter])
for ownership in set.blue_settle:
if ownership == letter:
set.blue_hand.append(set.resource_position[letter])
for ownership in set.orange_settle:
if ownership == letter:
set.orange_hand.append(set.resource_position[letter])
for ownership in set.white_settle:
if ownership == letter:
set.white_hand.append(set.resource_position[letter])
def card_remove(player, cards):
"""Print me."""
print(cards)
for card in cards:
player.remove(card)
def game_odds(resNum):
"""Print me."""
resource_list = []
for letter in set.resource_position:
if set.resource_position[letter] == resNum:
resource_list.append(letter)
print(resource_list)
return str(player_odds(resource_list))
| [
37811,
9771,
3129,
602,
290,
376,
1229,
429,
507,
526,
15931,
198,
198,
11748,
9058,
355,
900,
628,
198,
4299,
2137,
62,
14269,
9948,
66,
7,
7829,
2599,
198,
220,
220,
220,
37227,
18557,
428,
526,
15931,
198,
220,
220,
220,
2137,
62... | 2.217355 | 1,210 |
import petl as etl, psycopg2 as pg, pymysql as mysql
from config import dbConfig
SOURCE_DB_DBO = pg.connect(dbConfig["source"])
STAGING_DB_DBO = pg.connect(dbConfig["staging"])
DW_DB_DBO = mysql.connect(
host= dbConfig["data_warehouse"]["host"],
user= dbConfig["data_warehouse"]["user"],
password= dbConfig["data_warehouse"]["password"],
database= dbConfig["data_warehouse"]["database"]
)
STAGING_DB_DBO.cursor().execute('DROP TABLE IF EXISTS users;')
source_users_table = etl.fromdb(SOURCE_DB_DBO, 'SELECT userid, username, email, role FROM tuser;')
#petl.io.db.todb(table, dbo, tablename, schema=None, commit=True, create=False, drop=False, constraints=True, metadata=None, dialect=None, sample=1000)
etl.todb(source_users_table, STAGING_DB_DBO, "users", create= True)
staging_users_table = etl.fromdb(STAGING_DB_DBO, 'SELECT username, email FROM users;')
#for MySQL the statement SET SQL_MODE=ANSI_QUOTES is required to ensure MySQL uses SQL-92 standard quote characters.
DW_DB_DBO.cursor().execute('SET SQL_MODE=ANSI_QUOTES')
#table should be exists in db for append data.
etl.appenddb(staging_users_table, DW_DB_DBO, "user")
#etl.todb(staging_users_table, DW_DB_DBO, "user", create= True)
| [
11748,
4273,
75,
355,
2123,
75,
11,
17331,
22163,
70,
17,
355,
23241,
11,
279,
4948,
893,
13976,
355,
48761,
198,
6738,
4566,
1330,
20613,
16934,
220,
198,
198,
47690,
62,
11012,
62,
35,
8202,
796,
23241,
13,
8443,
7,
9945,
16934,
1... | 2.678414 | 454 |
import os
import pickle
import random
import numpy as np
np.set_printoptions(threshold=np.inf)
import torch
from torch.utils.data import Dataset, DataLoader
if __name__ == '__main__':
test_data_path = "/Work18/2021/fuyanjie/exp_data/exp_nnsslm/test_data_dir/test_data_frame_level_gcc"
test_data = DataLoader(SSLR_Dataset(test_data_path), batch_size=64, shuffle=True, num_workers=4) # train_data.shape (batch_x, batch_y)
for (batch_x, batch_y, batch_z) in test_data:
print(f'batch_x.shape {batch_x.shape}', flush=True)
print(f'batch_y.shape {batch_y.shape}', flush=True)
print(f'batch_z.shape {batch_z.shape}', flush=True)
print(f'batch_z {batch_z}', flush=True)
| [
11748,
28686,
198,
11748,
2298,
293,
198,
11748,
4738,
198,
198,
11748,
299,
32152,
355,
45941,
198,
37659,
13,
2617,
62,
4798,
25811,
7,
400,
10126,
28,
37659,
13,
10745,
8,
198,
11748,
28034,
198,
6738,
28034,
13,
26791,
13,
7890,
1... | 2.360927 | 302 |
import os
import cryspy
| [
11748,
28686,
198,
11748,
3960,
2777,
88,
198
] | 3 | 8 |
from django.views.generic import TemplateView
from directory.models import Organisation
from resources.models import Resource
from .mixins import ResourcesViewMixin
| [
6738,
42625,
14208,
13,
33571,
13,
41357,
1330,
37350,
7680,
198,
198,
6738,
8619,
13,
27530,
1330,
30801,
198,
6738,
4133,
13,
27530,
1330,
20857,
198,
198,
6738,
764,
19816,
1040,
1330,
13864,
7680,
35608,
259,
628,
198
] | 4.447368 | 38 |
# GY171204
from .utils import *
from .needle import *
| [
2,
402,
56,
1558,
1065,
3023,
198,
198,
6738,
764,
26791,
1330,
1635,
198,
6738,
764,
31227,
293,
1330,
1635,
198
] | 2.619048 | 21 |
import random
from fastapi.routing import APIRouter
from fastapi.responses import PlainTextResponse
from sovereign import XDS_TEMPLATES, __versionstr__
from sovereign import discovery
from sovereign.sources import match_node, extract_node_key
from sovereign.utils.mock import mock_discovery_request
router = APIRouter()
@router.get('/healthcheck', summary='Healthcheck (Does the server respond to HTTP?)')
@router.get('/deepcheck', summary='Deepcheck (Can the server render a random template?)')
@router.get('/version', summary='Display the current version of Sovereign')
| [
11748,
4738,
198,
6738,
3049,
15042,
13,
81,
13660,
1330,
3486,
4663,
39605,
198,
6738,
3049,
15042,
13,
16733,
274,
1330,
28847,
8206,
31077,
198,
6738,
18901,
1330,
1395,
5258,
62,
51,
3620,
6489,
29462,
11,
11593,
9641,
2536,
834,
19... | 3.694268 | 157 |
from io import BytesIO
from tarfile import TarInfo
def list_tar_files(tar_ball):
"""
`getmembers()` requires scaning the entire file before returning the first value.
Avoid that by making a looping iterator.
"""
tar_info = tar_ball.next()
while tar_info is not None:
tar_file = tar_ball.extractfile(tar_info)
if tar_file is not None:
pass
yield tar_info, tar_file
tar_info = tar_ball.next()
pass
def read_lines_from_tar_file(tar_file):
"""
Read the tar file returning the lines
"""
txt = tar_file.read()
txt = txt.decode('utf-8')
return txt.splitlines()
def write_lines_to_tarball(tar_ball, tar_info, lines):
"""
Writes the relevant lines to the tar ball
"""
txt = '\n'.join(lines)
txt = txt.encode('utf-8')
with BytesIO(txt) as tar_file:
info = TarInfo(name = tar_info.name)
info.size = len(txt)
tar_ball.addfile(info, fileobj = tar_file)
pass | [
6738,
33245,
1330,
2750,
4879,
9399,
198,
6738,
13422,
7753,
1330,
14110,
12360,
198,
198,
4299,
1351,
62,
18870,
62,
16624,
7,
18870,
62,
1894,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
4600,
1136,
30814,
3419,
63,
4433,
6... | 2.330233 | 430 |
import time
import sys
class ProgressBar(object):
"""
This object display a simple command line progress bar to wait in style.
"""
class Timer(object):
"""
This class is a simple timer for the sake of simplicity. This also provides
simple statistics. This work with the python 'with statement'.
"""
total = 0
t = None
n = 0
def start(self):
"""
Record the current time
"""
if self.t is None:
self.t = time.time()
else:
raise RuntimeError("Timer already started")
def stop(self, *args):
"""
Stop the timer and record the execution
"""
if self.t is not None:
self.total += time.time() - self.t
self.n += 1
self.t = None
else:
raise RuntimeError("Timer not started")
__enter__ = start
__exit__ = stop
def mean(self):
"""
Return the average runtime of this timer
"""
return self.total / self.n
def reset(self):
"""
Reset the statistics
"""
self.n = self.total = 0
if __name__ == "__main__":
a = ProgressBar()
for i in range(100):
time.sleep(0.02)
a()
| [
11748,
640,
198,
11748,
25064,
628,
198,
4871,
18387,
10374,
7,
15252,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
770,
2134,
3359,
257,
2829,
3141,
1627,
4371,
2318,
284,
4043,
287,
3918,
13,
198,
220,
220,
220,
37227,
628,
... | 2.259392 | 559 |
import forumsweats.discordbot as discordbot
from ..commandparser import Member
import discord
from forumsweats import db
name = 'sendbobux'
aliases = ('sendkromer', 'transmitkromer')
args = '<member> <amount>'
async def run(message, member: Member = None, amount: int = 0):
'Sends some of your bobux to another user.'
if not member:
return await message.channel.send('Invalid member')
if not amount or amount <= 0:
return await message.channel.send('Invalid amount')
sender_bobux = await db.get_bobux(message.author.id)
bobux_in_auctions = await db.get_bobux_in_auctions_for_user(message.author.id)
currency_name = 'kromer' if 'kromer' in message.command_name else 'bobux'
if sender_bobux - amount < bobux_in_auctions:
return await message.channel.send(f'You can\'t send {amount} {currency_name}, because you have {bobux_in_auctions:,} in auctions')
if sender_bobux < amount:
return await message.channel.send(f'You don\'t have enough {currency_name}')
if message.author.id == member.id:
return await message.channel.send(f'You sent **{amount}** {currency_name} to yourself. Nothing happened.')
await db.change_bobux(message.author.id, -amount)
await db.change_bobux(member.id, amount)
reciever_bobux = await db.get_bobux(member.id)
await message.channel.send(
embed=discord.Embed(
description=f'Ok, <@{member.id}> now has **{reciever_bobux:,}** {currency_name}. You now have **{sender_bobux-amount:,}** {currency_name}.'
)
)
await discordbot.check_bobux_roles(member.id, reciever_bobux)
| [
11748,
14216,
732,
1381,
13,
15410,
585,
13645,
355,
36446,
13645,
198,
6738,
11485,
21812,
48610,
1330,
10239,
198,
11748,
36446,
198,
6738,
14216,
732,
1381,
1330,
20613,
198,
198,
3672,
796,
705,
21280,
65,
672,
2821,
6,
198,
7344,
1... | 2.769928 | 552 |
from eventlet.support import six
assert six.PY3, 'This is a Python 3 module'
| [
6738,
1785,
1616,
13,
11284,
1330,
2237,
198,
30493,
2237,
13,
47,
56,
18,
11,
705,
1212,
318,
257,
11361,
513,
8265,
6,
198
] | 3.208333 | 24 |
# -*- coding: utf-8 -*-
"""
装饰器
"""
import asyncio
from asyncio import iscoroutinefunction
from functools import wraps
from loguru import logger
import traceback
from .concurrency import run_function
from .helpers import spider_sleep
from .url import get_location_from_history
from ..request import Request
from ..response import Response
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
35318,
165,
98,
108,
161,
247,
101,
198,
37811,
198,
198,
11748,
30351,
952,
198,
6738,
30351,
952,
1330,
318,
10215,
28399,
8818,
198,
6738,
1257,
310,
1014... | 3.382353 | 102 |
from testing.postgresql import Postgresql
import pytest
from app import create_app
from Model import db as _db
from Model import Client, FeatureRequest
from configTest import SQLALCHEMY_DATABASE_URI as db_url
@pytest.yield_fixture(scope='session')
@pytest.fixture(scope='session')
@pytest.yield_fixture(scope='session')
@pytest.fixture(scope='session', autouse=False)
| [
6738,
4856,
13,
7353,
34239,
13976,
1330,
2947,
34239,
13976,
198,
11748,
12972,
9288,
198,
198,
6738,
598,
1330,
2251,
62,
1324,
198,
6738,
9104,
1330,
20613,
355,
4808,
9945,
198,
6738,
9104,
1330,
20985,
11,
27018,
18453,
198,
6738,
... | 3.007874 | 127 |
import pytest
from mvlearn.cluster.base_kmeans import BaseKMeans
| [
11748,
12972,
9288,
198,
6738,
285,
85,
35720,
13,
565,
5819,
13,
8692,
62,
74,
1326,
504,
1330,
7308,
42,
5308,
504,
628
] | 2.869565 | 23 |
"""
Raspagem de dados do Portal Investidor para encontrar detalhes sobre operações
feitas pelo Tesouro Direto.
PRECISA DE UMA LISTA DE PROTOCOLOS PARA RODAR!!!!!!!
Siga as instruções:
1. Navegue a https://portalinvestidor.tesourodireto.com.br/Consulta
2. Preencha os Filtros e clique em Aplicar
3. Vá até a transação mais antiga, no FIM da lista
4. Abaixo de todos os ítens à direita, clique e segure o mouse
5. Segurando o clique, mova o mouse para cima, até ele ficar no espaço em
branco logo acima e ligeiramente à esquerda do primeiro "Investimento".
Você deve ter todo o texto somente das operações selecionado.
7. Copie e cole em um editor de texto.
8. Cada ítem deve ser algo assim:
Investimento
03/01/2020
Nº de protocolo - XXXXXXXX
CORRETORA XXXX
Status
REALIZADO
VER DETALHES
9. Salve o arquivo e edite a varíavel logo abaixo para apontar para ele
Antes de rodar, crie a pasta "td" no local onde vai rodar o scraper.
"""
OPS_FILE = ""
import re
import os
import scrapy
from bs4 import BeautifulSoup
BASE_URL = 'https://portalinvestidor.tesourodireto.com.br'
USER = os.environ["PORTAL_INVESTIDOR_USER"]
PASS = os.environ["PORTAL_INVESTIDOR_PASS"]
REMOTE_PROTOCOLS = []
ALL_PROTOCOLS = []
with open(OPS_FILE, "r") as f:
for line in f:
line = line.split(' - ')
if len(line) > 1:
line = re.search(r'^\d+', line[1])
if line:
ALL_PROTOCOLS.append(line.group())
def authentication_failed(response):
""" Verifica se login falhou """
pass
# soup = BeautifulSoup(response.body, 'html.parser')
# if soup(text=re.compile('Valor líquido total')):
# return True
# return False
class PortalInvestidorSpider(scrapy.Spider):
"""
Spider which crawls Portal Investidor to find all Tesouro Direto \
transactions
"""
name = 'portalinvestidor'
start_urls = [BASE_URL]
| [
37811,
198,
49,
5126,
363,
368,
390,
9955,
418,
466,
25663,
7488,
312,
273,
31215,
2207,
756,
20040,
1062,
282,
956,
523,
4679,
27296,
16175,
127,
113,
274,
198,
5036,
21416,
16176,
78,
10696,
280,
305,
34177,
1462,
13,
198,
198,
47,
... | 2.259932 | 881 |
"""String module test parameters"""
from .helpers import parametrize
LIST_TEST_INT = list(range(1, 6))
LIST_TEST_STR = [str(x) for x in LIST_TEST_INT]
STR_TEST_SPACES = ' '.join(LIST_TEST_STR)
STR_TEST_COMMAS = ','.join(LIST_TEST_STR)
STR_TEST_CSV = '1 2, 3\t4, \t5'
ALPHA_SIMPLE = 'abcdefghijklmnopqrstuvwxyz'
ALPHA_COMPLEX = 'åbçdéfghîjklmnöpqrštùvwxyz'
def pad(val: str, spaces: int = 2) -> str:
"""Pad val on left and right with `spaces` whitespace chars
Arguments:
val {str} -- string to pad
Keyword Arguments:
spaces {int} -- number of spaces to pad on either side (default: {2})
Returns:
str -- padded string
"""
pad_str = ' ' * spaces
return '{0}{1}{0}'.format(pad_str, val)
def param_cast():
"""Parametrize `test_cast`"""
names = 'val,expected'
vals = (
(1, '1', ),
('a', 'a'),
(LIST_TEST_INT, LIST_TEST_STR),
('12345', LIST_TEST_STR), # sorry!
)
ids = ('solo-int', 'solo-str', 'list-int', 'oops', )
return parametrize(names, vals, ids)
def param_define_split_join():
"""Parametrize `test_define_split` and `test_define_join`"""
names = 'sep,str_val,list_val'
vals = (
(' ', STR_TEST_SPACES, LIST_TEST_STR),
(',', STR_TEST_COMMAS, LIST_TEST_STR),
)
ids = ('spaces', 'commas', )
return parametrize(names, vals, ids)
def param_strip():
"""Parametrize `test_strip`"""
names = 'val,expected'
vals = (
(STR_TEST_SPACES, STR_TEST_SPACES),
(' {}'.format(STR_TEST_SPACES), STR_TEST_SPACES),
('{} '.format(STR_TEST_COMMAS), STR_TEST_COMMAS),
(' {} '.format(STR_TEST_CSV), STR_TEST_CSV),
(LIST_TEST_STR, LIST_TEST_STR),
([pad(num) for num in LIST_TEST_STR], LIST_TEST_STR),
)
ids = ('1-none', '1-left', '1-right', '1-center', '[]-none', '[]-both', )
return parametrize(names, vals, ids)
def param_filter_empty():
"""Parametrize `test_filter_empty`"""
names = 'val,expected'
vals = (
(LIST_TEST_STR, LIST_TEST_STR),
(('1', '', '2', '', '', '3', '4', '5'), LIST_TEST_STR),
)
ids = ('none', 'some', )
return parametrize(names, vals, ids)
def param_split():
"""Parametrize `test_split`"""
names = 'val,kwargs,expected'
vals = (
(
LIST_TEST_INT,
{},
LIST_TEST_STR,
),
(
'|'.join(LIST_TEST_STR),
{},
['|'.join(LIST_TEST_STR)],
),
(
'|'.join(LIST_TEST_STR),
{'sep': r'\|'},
LIST_TEST_STR,
),
(
' 1| 2 | 3 | 4 | 5 ',
{'sep': r'\|', },
LIST_TEST_STR,
),
(
' 1| 2 | 3 | 4 | 5 ',
{'sep': r'\|', 'strip_text': False, },
[' 1', ' 2 ', ' 3 ', ' 4 ', ' 5 '],
),
(
' 1|| 2 | | 3 | | 4 | | 5 ',
{'sep': r'\|', },
LIST_TEST_STR,
),
(
' 1|| 2 | | 3 | | 4 | | 5 ',
{'sep': r'\|', 'allow_empty': True},
['1', '', '2', '', '3', '', '4', '', '5'],
),
(
' 1|| 2 | | 3 | | 4 | | 5 ',
{'sep': r'\|', 'strip_text': False, 'allow_empty': True},
[' 1', '', ' 2 ', ' ', ' 3 ', ' ', ' 4 ', ' ', ' 5 '],
),
)
ids = (
'defaults',
'sep_default',
'sep_custom',
'strip_default',
'strip_disabled',
'allow_default',
'allow_enabled',
'all_options',
)
return parametrize(names, vals, ids)
def param_simplify():
"""Parametrize `test_simplify`"""
names = 'val,expected'
vals = (
(ALPHA_SIMPLE, ALPHA_SIMPLE),
(ALPHA_COMPLEX, ALPHA_SIMPLE),
)
ids = ('simple', 'complex', )
return parametrize(names, vals, ids)
| [
37811,
10100,
8265,
1332,
10007,
37811,
198,
6738,
764,
16794,
364,
1330,
5772,
316,
380,
2736,
198,
198,
45849,
62,
51,
6465,
62,
12394,
796,
1351,
7,
9521,
7,
16,
11,
718,
4008,
198,
45849,
62,
51,
6465,
62,
18601,
796,
685,
2536,... | 1.829178 | 2,166 |
from flask import Flask, jsonify
from . import hello, probes
app = create_app()
| [
6738,
42903,
1330,
46947,
11,
33918,
1958,
198,
198,
6738,
764,
1330,
23748,
11,
33124,
628,
628,
198,
1324,
796,
2251,
62,
1324,
3419,
198
] | 3.4 | 25 |
# Copyright 2018 Christoph Heindl.
#
# Licensed under MIT License
# ============================================================
import tensorflow as tf
import tfmpl
import numpy as np
| [
2,
15069,
2864,
1951,
2522,
679,
521,
75,
13,
198,
2,
198,
2,
49962,
739,
17168,
13789,
198,
2,
46111,
4770,
2559,
18604,
198,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
256,
38353,
489,
198,
11748,
299,
32152,
355,
45941,... | 4.348837 | 43 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-10-16 15:36
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
940,
319,
1584,
12,
940,
12,
1433,
1315,
25,
2623,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
... | 2.964286 | 84 |
#!/usr/local/bin/python3
import brotli
import glob
import hashlib
import json
import machfs
import os
import struct
import sys
import typing
import urllib.request
import zipfile
LIBRARY_DIR = os.path.join(os.path.dirname(__file__), "..", "Library")
CACHE_DIR = os.path.join("/tmp", "infinite-mac-cache")
DEBUG = os.getenv("DEBUG", "0") == "1"
input_path = sys.argv[1]
output_dir = sys.argv[2]
manifest_dir = sys.argv[3]
DISK_SIZE = 200 * 1024 * 1024
CHUNK_SIZE = 256 * 1024
chunk_count = 0
total_size = 0
input_file_name = os.path.basename(input_path)
import_folders = get_import_folders()
hash = hashlib.sha256()
sys.stderr.write("Chunking and compressing %s" % input_file_name)
sys.stderr.flush()
with open(input_path, "rb") as input_file:
v = machfs.Volume()
v.read(input_file.read(), preserve_desktopdb=True)
v.name = "Infinite HD"
for folder_path, folder in import_folders.items():
parent_folder_path, folder_name = os.path.split(folder_path)
parent = traverse_folders(v["Library"], parent_folder_path)
parent[folder_name] = folder
flat = v.write(
size=DISK_SIZE,
align=512,
desktopdb=False,
bootable=True,
)
brotli_quality = 0 if DEBUG else 11
for i in range(0, DISK_SIZE, CHUNK_SIZE):
chunk = flat[i:i + CHUNK_SIZE]
total_size += len(chunk)
chunk_compressed = brotli.compress(chunk, quality=brotli_quality)
chunk_path = os.path.join(output_dir,
f"{input_file_name}.{chunk_count}.br")
# Use compressed version for the version hash so that if we change the
# compression quality we can trigger a re-download.
hash.update(chunk_compressed)
with open(chunk_path, "wb+") as chunk_file:
chunk_file.write(chunk_compressed)
chunk_count += 1
sys.stderr.write(".")
sys.stderr.flush()
sys.stderr.write("\n")
manifest_path = os.path.join(manifest_dir, f"{input_file_name}.json")
with open(manifest_path, "w+") as manifest_file:
json.dump(
{
"totalSize": total_size,
"chunkCount": chunk_count,
"chunkSize": CHUNK_SIZE,
"version": hash.hexdigest()
},
manifest_file,
indent=4)
| [
2,
48443,
14629,
14,
12001,
14,
8800,
14,
29412,
18,
198,
198,
11748,
1379,
83,
4528,
198,
11748,
15095,
198,
11748,
12234,
8019,
198,
11748,
33918,
198,
11748,
3235,
9501,
198,
11748,
28686,
198,
11748,
2878,
198,
11748,
25064,
198,
11... | 2.237817 | 1,026 |
# Copyright (c) 2014 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
STATUS_OK = 'ok'
STATUS_INFO = 'info'
STATUS_DEBUG = 'debug'
STATUS_WARNING = 'warning'
STATUS_ERROR = 'error'
STATUS_NOT_FOUND = 'not_found'
MSG_GENERAL_ERROR = '13690601492'
MSG_INVALID_VDM_ID = '14227341325'
MSG_INVALID_MOVER_ID = '14227341323'
MSG_FILESYSTEM_NOT_FOUND = "18522112101"
MSG_FILESYSTEM_EXIST = '13691191325'
MSG_VDM_EXIST = '13421840550'
MSG_SNAP_EXIST = '13690535947'
MSG_INTERFACE_NAME_EXIST = '13421840550'
MSG_INTERFACE_EXIST = '13691781136'
MSG_INTERFACE_INVALID_VLAN_ID = '13421850371'
MSG_INTERFACE_NON_EXISTENT = '13691781134'
MSG_JOIN_DOMAIN = '13157007726'
MSG_UNJOIN_DOMAIN = '13157007723'
IP_ALLOCATIONS = 2
CONTENT_TYPE_URLENCODE = {'Content-Type': 'application/x-www-form-urlencoded'}
XML_HEADER = '<?xml version="1.0" encoding="UTF-8" standalone="yes"?>'
XML_NAMESPACE = 'http://www.emc.com/schemas/celerra/xml_api'
CIFS_ACL_FULLCONTROL = 'fullcontrol'
CIFS_ACL_READ = 'read'
SSH_DEFAULT_RETRY_PATTERN = r'Error 2201:.*: unable to acquire lock\(s\)'
| [
2,
15069,
357,
66,
8,
1946,
412,
9655,
10501,
13,
198,
2,
1439,
6923,
33876,
13,
198,
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
220,
220,
220... | 2.469697 | 660 |
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from scipy.optimize import curve_fit
from scipy.optimize import minimize
from scipy.signal import find_peaks
from scipy.stats import pearsonr as pearsonr
from scipy.special import erf as erf
"""Fitting functions for multi-Gaussian fitting.
"""
def fit_wrapper(x,*args):
"""
This wrapper sets up the variables for the fit function.
It allows for a variable numbers of Gaussians to be fitted.
Calls multi_gaussian_fit_function
Args:
x (array): x is independent variable x, such that y=f(x).
args: variable length argument list. args[0:n_gauss] are the amplitudes of the gaussians to be fitted. args[n_gauss:2*n_gauss] are the horizontal offsets of the gaussians to be fitted. args[2*n_gauss:3*n_gauss] are the standard deviations of the gaussians to be fitted. args[-1] is the vertical offset parameter
Returns
multi_gaussian_fit_function(x,h,mu,sigma,vertical_offset)
"""
n_gauss = (len(args)-1)//3 # number of gaussians that we're fitting
h = args[0:n_gauss]
mu = args[n_gauss:2*n_gauss]
sigma = args[2*n_gauss:3*n_gauss]
vertical_offset = args[-1]
return multi_gaussian_fit_function(x,h,mu,sigma,vertical_offset)
def multi_gaussian_fit_function(x,h,mu,sigma,vertical_offset):
"""
Returns a function that is comprised of an offset h and the
sum of gaussians with variable amplitudes, offsets, and standard
deviations (widths)
Args:
x (array): independent variable, such that y=f(x).
h (list): initial guesses for the amplitudes of the gaussians
mu (list): initial guesses for the translational offsets of the gaussians
sigma (list): initial guesses for standard deviations of gaussians
vertical_offset (list): initial guess for vertical offset h
Returns:
fit (array): a function which consists of the sum of multiple gaussians and a vertical offset
"""
# fit function starts with vertical offset
fit = np.zeros(len(x)) + vertical_offset
# iterate through each amplitude/translational offset/standard deviation set & add them to the fit function
for amp,offset,std in zip(h,mu,sigma):
fit += amp*np.exp( -(x-offset)**2 / (2*std**2) )
return fit
def initial_guess(initial_amplitude,initial_translational_offset,initial_stddev,initial_vertical_offset):
"""
Create array with amplitude, phase and initial offset to be used in the curve fit
Args:
initial_amplitude (array): guess for the initial values of the amplitudes of the gaussians
initial_translational_offset (array): guess for the initial values of the translational offsets of the gaussians
initial_stddev (array): guess for the initial values of the standard deviations of the gaussians
initial_vertical_offset (float): guess for the initial values of the vertical offset
Returns:
p0 (array): lists the initial_amplitude, initial_translational_offset, initial_stddev, initial_vertical_offset in the correct format for the curve fit.
"""
#p0=[]
#for a,mu,stddev in zip(initial_amplitude,initial_translational_offset,initial_stddev):
# p0.append([a,mu,stddev])
#p0.append(initial_vertical_offset)
p0 = [i for i in initial_amplitude]\
+ [i for i in initial_translational_offset]\
+ [i for i in initial_stddev]\
+ [initial_vertical_offset]
return p0
def bound_maker(amplitude_bounds,translational_offset_bounds,stddev_bounds,vertical_offset_bounds,number_gaussians):
"""
Create tuple with lower and upper bounds to be used in the curve fit
Args:
amplitude_bounds (tuple): bounds on the amplitudes of the gaussians
translational_offset_bounds (tuple): bounds on the translational offsets of the gaussians
stddev_bounds (tuple): bounds on the standard deviations of the gaussians
vertical_offset_bounds (tuple): bounds on the vertical offset of the gaussians
number_gaussians (int): the number of gaussians in the fit
Returns:
bounds (tuple): lists the bounds on the parameters used in the multigaussian fits
"""
lower = [amplitude_bounds[0]]*number_gaussians + [translational_offset_bounds[0]]*number_gaussians + [stddev_bounds[0]]*number_gaussians + [vertical_offset_bounds[0]]
upper = [amplitude_bounds[1]]*number_gaussians + [translational_offset_bounds[1]]*number_gaussians + [stddev_bounds[1]]*number_gaussians + [vertical_offset_bounds[1]]
bounds = (lower, upper)
return bounds
def bound_maker_subsequent(t, y, y_fit, popt, num_new_gaussians, amplitude_bounds, stddev_bounds, vertical_offset_bounds, new_translational_offset, noise_avg):
"""
Makes the bounds vector for fits after the first. Takes into account the previous fitted values
Args:
t (array): time grid of burst
y (array): burst
y_fit (array): previous fit to the burst
popt (arrapy): the results from the multi gaussian curve fit of the previous fit
num_new_gaussians (int): the number of gaussians to be added to the new fit
amplitude_bounds (array): bounds on the amplitudes of the gaussians
stddev_bounds (array): bounds on the standard deviations of the gaussians
vertical_offset_bounds (array): bounds on the vertical offset
new_translational_offset (tuple):
Returns:
bounds (tuple): lists the bounds on the parameters used in the multigaussian fits
"""
num_gaussians_old = int((len(popt)-1)/3)
amplitudes = popt[:num_gaussians_old]
translational_offsets = popt[num_gaussians_old:2*num_gaussians_old]
widths = popt[2*num_gaussians_old:3*num_gaussians_old]
vert_offset = popt[-1]
lower_amp = np.append(amplitudes-np.abs(amplitudes)*.2, [-.75*(np.max(y)-noise_avg)]*num_new_gaussians)
upper_amp = np.append(amplitudes+np.abs(amplitudes)*.2, [1.2*(np.max(y)-noise_avg)]*num_new_gaussians)
# limit the movement of the previously fitted gaussians.
lower_translational = np.append(translational_offsets*.8, [0]*num_new_gaussians)
upper_translational = np.append(translational_offsets*1.2, [np.max(t)]*num_new_gaussians)
if num_new_gaussians == 1:
lower_translational = np.append(translational_offsets*.8, [new_translational_offset[-1]*.5])
upper_translational = np.append(translational_offsets*1.2, [new_translational_offset[-1]*1.5])
lower_translational[lower_translational<0] = 0
upper_translational[upper_translational>np.max(t)] = .9*np.max(t)
lower_stddev = np.append([stddev_bounds[0]]*num_gaussians_old, [stddev_bounds[0]]*num_new_gaussians)
upper_stddev = np.append([stddev_bounds[1]]*num_gaussians_old, [stddev_bounds[1]]*num_new_gaussians)
# make into array
lower = np.concatenate((lower_amp, lower_translational, lower_stddev, [vertical_offset_bounds[0]]))
upper = np.concatenate((upper_amp, upper_translational, upper_stddev, [vertical_offset_bounds[1]]))
bounds = (lower, upper)
return bounds
def calculate_r2(y, y_fit):
"""
Calculates r2, the percentage of variability of the dependent variable that's
been accounted for. (how well the regression predicts the data)
Args:
y (array): data
yfit (array): is the fit to the data, evaluated using the same time axis as y
Returns:
r2 (float): characterizes how well y_fit predicts y
"""
#ss_res = np.sum((y-y_fit)**2) #residual sum of squares
#ss_tot = np.sum((y-np.mean(y))**2) #total sum of squares
#r2 = 1-(ss_res/ss_tot) #r squared
r2 = pearsonr(y, y_fit)[0]
return r2
def calculate_rmse(targets, predictions):
"""
Calculates root mean square error (RMSE) between targets and predictions
Args:
targets (array): actual values
predictions (array): predicted values
Returns:
rmse (float): root mean square error
"""
n = len(predictions)
return np.linalg.norm(predictions - targets) / np.sqrt(n)
def calculate_max_error(targets, predictions):
"""
Returns maximum absolute value of difference between target and predictions.
Args:
targets (array): actual values
predictions (array): predicted values
Returns:
rmse (float): root mean square error
"""
return np.max(np.abs(targets-predictions))
def gaussian_generator(npoints,std):
"""
Make a gaussian f npoints long with standard deviation std
Args:
npoints (int): length of Gaussian
std (float): standard deviation of Gaussian
Returns:
g (array): Gaussian
"""
g = signal.gaussian(npoints,std=std)
return g
def rect_generator(npoints,width,area):
"""
Make rect for correlation that has a height such that the height*width=area.
Args:
npoints (int): length of Gaussian
width (float): width of rect
area (float): area of rect. Dictates rect height via height = area/width
Returns:
r (array): rect function
"""
r = np.zeros(npoints)
r[int(np.floor(npoints/2-width/2+1)):int(np.ceil(npoints/2+width/2))] = area/(np.floor(width/2)*2) # do this flooring thing because the width gets rounded and we want the area constant always.
return r
def seed_initial_offsets_peaks(y, noise, rect_area=500, prominence_knockdown_factor=0.03):
"""
Generate the locations of the seeds for the initial fit. Place a seed at each of the peaks.
Determine peak location from smoothed version of signal. Smooth signal by cross correlating it with rect.
Args:
y (array): signal
noise (float): noise level of y
rect_area (float): area of rect, where area=width * height
prominence_knockdown_factor (float): used to set the prominence for find_peaks as a function of the max height of xc_r
Returns:
peaks (array): list of the initial peaks
"""
max_snr = np.max(y/noise)
if max_snr>10:
pass
elif max_snr<5:
prominence_knockdown_factor = .09
else:
prominence_knockdown_factor = .06
length = len(y)
r = rect_generator(length,length/35, rect_area)
xc_r = signal.correlate(y,r)[length//2:-length//2] # cross correlation of signal and rect
peaks, _ = find_peaks(xc_r, prominence=(np.max(xc_r)-noise*rect_area)*prominence_knockdown_factor)
#plt.figure()
#plt.plot(y, label='y')
#plt.plot(xc_r/100, label='resid')
#plt.plot(peaks, xc_r[15]/100*np.ones(len(peaks)), 'kx')
#print(peaks)
#plt.legend()
return peaks
def initial_seed(t, y, noise, max_num_gaussians=8, rect_area=500):
"""
Makes seeds for the first fit.
Calls seed_initial_offsets_peaks
Args:
t (array): time corresponding to signal
y (array): signal
noise (float): noise level of y
max_num_gaussians (int): the maximum number of initial seeds
rect_area (float): area of rect, where area=width * height
prominence_knockdown_factor (float): used to set the prominence for find_peaks as a function of the max height of xc_r
Returns:
initial_translational_offset (array): a list of the initial conditions for the horizontal offsets, mu
initial_amplitude (array): a list of the initial conditions for the amplitudes, A
"""
peak_locns = seed_initial_offsets_peaks(y, noise, rect_area=rect_area) # use as initial mus
peak_values = y[peak_locns] # use as initial amplitudes
#plt.figure()
#plt.plot(y)
#plt.plot(peak_locns, peak_values, 'x')
if len(peak_values)>max_num_gaussians:
sorted_values = np.argsort(peak_values)[:max_num_gaussians]
peak_values = peak_values[sorted_values]
peak_locns = peak_locns[sorted_values]
initial_translational_offset = t[peak_locns]
initial_amplitude = peak_values-noise
#because we subtract the noise from the initial amplitudes, some might be negative. get rid of those.
positive_value_locations = np.argwhere(initial_amplitude>0)
initial_amplitude = initial_amplitude[positive_value_locations].flatten()
initial_translational_offset = initial_translational_offset[positive_value_locations].flatten()
return initial_translational_offset, initial_amplitude
def calculate_effective_length(model, fitted_vert_offset, delta_t, max_normalized_height=1):
"""
Effective length is area divided by max height.
Here, this is the length of a rectangle with same max height as the signal
Args:
model (array): signal
fitted_vert_offset (float): h in the multigaussian fitting equation
delta_t (float): time discretization
max_normalized_height (float): maximum height of the signal
Returns:
effective_length (float): effective length
"""
area = np.sum(model-fitted_vert_offset)
effective_length = area/(np.max(model)-fitted_vert_offset)*max_normalized_height*delta_t
return effective_length
def calculate_burst_duration(y_fit, fitted_vert_offset, delta_t, lower_thresh=0.1, upper_thresh=0.9):
"""
calculate the duration of the burst between the lower and upper
thresholds of the cumulative sum of the signal
Args:
y_fit (array): values of fitted burst
fitted_vert_offset (float): h in the multigaussian fitting equation
delta_t (float): time discretization
lower_thresh (float): lower fraction of signal to include in calculation
upper_thresh (float): upper fraction of signal to include in calculation
Returns:
duration (float): time of signal between indices set by lower_thresh and upper_thresh operating on the integrated area of the signal
"""
try:
cs = np.cumsum(y_fit-fitted_vert_offset)
csm = np.max(cs)
lower_index = np.argwhere(cs>(lower_thresh*csm))[0]
upper_index = np.argwhere(cs<(upper_thresh*csm))[-1]
duration = (upper_index-lower_index) * delta_t
except:
print("problem calculating the duration")
duration = [0]
return duration[0]
def make_weights(y, g_length=100):
"""
Makes the weighting function for the curve fitting operation.
Weights the signal to bias its larger magnitude components and to diminish the effect of the small components (i.e. the tails)
Generates the weights from a smoothed copy of the burst, where this smoothed copy is made by cross correlating the signal with a gaussian.
Args:
y (array): signal
g_length (int): length of Gaussian
Returns:
sigma (array): weights for the curve fitting scheme
"""
# make weights
length = len(y)
g = gaussian_generator(length,g_length)
xc_g = signal.correlate(y,g)[int(np.ceil(length/2-1)):-int(np.floor(length/2))]#[int(np.ceil(length/2)):-int(np.ceil(length/2))]
weight = xc_g/np.max(xc_g)
sigma = 1/np.sqrt(weight)
return sigma
def eval_fit(y, y_fit, t, popt, delta_t):
"""
Calculates metrics which characterize the efficacy of the fit.
Args:
y (array): signal
y_fit (array): fitted version of the signal
t (array): times corresponding to y and y_fit
popt (array): results of curve fit
delta_t float): time discretization
Returns:
r2 (float): percentage of variability of the burst that's been accounted for in the fit. (how well the regression predicts the data)
rmse (float): root mean square error between fit and signal
max_error (float): maximum absolute value of difference between y and y_fit
max_error_normalized (float): max_error/max(y)
duration (float): time of signal between indices set by lower_thresh and upper_thresh operating on the integrated area of the signal
"""
fitted_vert_offset = popt[-1]
#delta_t = t[1]-t[0]
# calculate r^2
r2 = calculate_r2(y,y_fit)
# calculate rmse
rmse = calculate_rmse(y,y_fit)
# calculate max error
max_error = calculate_max_error(y,y_fit)
max_error_normalized = max_error/np.max(y)
# calculate duration of burst in the middle 80% of it
duration = calculate_burst_duration(y_fit, fitted_vert_offset, delta_t)
return r2, rmse, max_error, max_error_normalized, duration
def package_fit_data(r2, rmse, max_error, max_error_normalized,
time_eff, duration, popt, ang_vel,
orbit_radius, x_offsets, y_offsets, number_gaussians,
y, noise_avg, max_num_gaussians, dirctn, initial_number_gaussians, t):
"""
Save data from fits for use in later modules.
Args:
r2 (float): percentage of variability of the burst that's been accounted for in the fit. (how well the regression predicts the data)
rmse (float): root mean square error between fit and signal
max_error (float): maximum absolute value of difference between y and y_fit
max_error_normalized (float): max_error/max(y)
time_eff (float): rect effective time of signal. time of a rectangle with same max height as the signal
duration (float): time of signal between indices set by lower and upper 10% of the integrated area of signal
popt (array): output of curve fit
ang_vel (float): actual angular velocity Omega
orbit_radius (float): actual orbit radius R
x_offsets (float): actual x component of distance between orbit center and light center, D
y_offsets (float): actual y component of distance between orbit center and light center, D
number_gaussians (int): number of gaussians used to parameterize burst
y (array): burst
noise_avg (float): average noise value
max_num_gaussians (int): maximum number of gaussians to be included in the fit
dirctn (int): +1 or -1, clockwise or counter clockwise
initial_number_gaussians (int): number of Gaussians used in initial fit
t (array): time corresponding to y
Returns:
data (array): contains many of the arguments and several other metrics, packaged for pickling for use in next module.
"""
try:
# fit parameters
h = popt[0:number_gaussians] # amplitude
mu = popt[number_gaussians:2*number_gaussians] # offset
sigma = popt[2*number_gaussians:3*number_gaussians] # width
# to save, we want distances relative to location of first gaussian
sorted_indices = np.argsort(mu)
#print('length of mu:', len(mu), 'length of popt:', len(popt), 'number of gaussians', number_gaussians,'length of h:', len(h))
h_save, mu_save, sigma_save = np.zeros(max_num_gaussians), np.zeros(max_num_gaussians), np.zeros(max_num_gaussians)
h_save[:number_gaussians] = h[sorted_indices]
mu_save[:number_gaussians] = mu[sorted_indices]-mu[sorted_indices[0]] # subtract smalles from all to get relative offsets
sigma_save[:number_gaussians] = sigma[sorted_indices]
vert_offset_save = popt[-1]
D = np.sqrt(x_offsets**2+y_offsets**2)
theta = np.arctan2(y_offsets, x_offsets)
max_SNR = np.max(y)/noise_avg
avg_SNR = np.mean(y)/noise_avg
if dirctn == 1:
clockwise = [0]
counterclockwise = [1]
else:
clockwise = [1]
counterclockwise = [0]
data = np.concatenate([[ang_vel],[rmse],
[r2],[max_error],[max_error_normalized],[time_eff],[duration],
h_save,mu_save,sigma_save,[vert_offset_save],[t[1]-t[0]],[orbit_radius], [x_offsets],
[y_offsets], [D], [theta], [max_SNR], [avg_SNR], clockwise, counterclockwise, [int(initial_number_gaussians)]])
return data
except Exception as excptn:
print("***\n***\nsomething went wrong in package_fit_data\n***\n***")
print(excptn)
return
def seed_later_offsets_peaks(y, noise, rect_area=100, prominence_knockdown_factor=0.03):
"""
Makes seeds for fits following the first fit.
Args:
y (array): signal
noise (float): noise level of y
rect_area (float): area of rect, where area=width * height
prominence_knockdown_factor (float): used to set the prominence for find_peaks as a function of the max height of xc_r
Returns:
peaks (array): a list of positions with which to seed the horizontal offsets, mu
"""
length = len(y)
r = rect_generator(length,length/25, rect_area)
xc_r = signal.correlate(y,r)[length//2:-length//2]
peaks, _ = find_peaks(xc_r, prominence=(np.max(xc_r)-noise*rect_area)*prominence_knockdown_factor)
"""
plt.figure()
plt.plot(y, label='y')
plt.plot(xc_r/100, label='resid')
plt.legend()
"""
return peaks
def subsequent_seeding(t, y, y_fit, popt, number_gaussians, noise_avg):
"""
Make the seeds for fits after the first fit.
Args:
t (array): times corresponding to y
y (array): burst
y_fit (array): fitted burst
popt (arary): output of previous curve fit
number_gaussians (array): number of gaussians in the fit
noise_avg (float): average value of the noise
Returns:
new_translational_offset (array): initial guesses for the translational offsets of the gaussians
new_amplitude (array): initial guesses for the amplitudes of the gaussians
"""
residual = np.abs(y-y_fit)
# find spot with largest residual; record its amplitude
peaks = seed_later_offsets_peaks(residual, noise_avg)
peak_to_use = peaks[np.argmax(residual[peaks])]
"""
plt.figure(78)
plt.plot(y, label='data')
plt.plot(y_fit, label='fitted')
plt.plot(residual, label="|residual|")
plt.plot(peak_to_use, residual[peak_to_use], 'x')
"""
new_gaussian_translational_offset = t[peak_to_use]
new_gaussian_amplitude = residual[peak_to_use]
#if new_gaussian_amplitude < 30:
# new_gaussian_amplitude = 100
#new_translational_offset = np.append(initial_translational_offset, new_gaussian_translational_offset)
# use the previously fitted peaks as initial conditions
fitted_translational_offsets = popt[number_gaussians:number_gaussians*2]
new_translational_offset = np.append(fitted_translational_offsets, new_gaussian_translational_offset)
fitted_amplitudes = popt[:number_gaussians]
new_amplitude = np.append(fitted_amplitudes, new_gaussian_amplitude)
return new_translational_offset, new_amplitude
def fitting_function(selxn, t, y, noise_avg, noise_thresh, ang_vel, orbit_radius, x_offsets, y_offsets, dirctn, max_num_gaussians=8):
"""
Performs multi Gaussian fits. Initializes first fit based on number of peaks in smoothed copy of burst. The residual of this fit is compared to the noise threshold. Until the absolute value of the residual is smaller than the noise threshold or until more than max_num_gaussians Gaussians are needed to parameterize the fit, subsequent fits place new Gaussians at locations which have large residuals. A great deal of care is taken in this function to standardize the weighting and initial conditions of the fits since the Gaussians inherently are not orthogonal. The goal is to produce fits with Gaussians which appear physical (aren't extremely tall and narrow or short and wide). The fits may not converge, or more gaussians than max_num_gaussians may be required to fit the function. In such cases, the fitting function passes the burst without returning a fit.
Args:
selxn (int): burst number being fitted
t (array): times corresponding to y
y (array): burst being fitted
noise_avg (float): average value of the noise
noise_thresh (float): average value of the noise + standard deviation of noise
ang_vel (float): actual angular velocity of underlying simulation Omega
orbit_radius (float): actual orbit radius R
x_offsets (float): actual x component of distance between orbit center and light center, D
y_offsets (float): actual y component of distance between orbit center and light center, D
dirctn (int): +1 or -1 corresponding to direction of rotatoin
max_num_gaussians (int): maximum number of gaussians used to fit the burst
Returns:
data (array): contains many of the arguments and several other metrics, packaged for pickling for use in next module.
"""
# for initial fit, use peak finding to determine the number,
# location, and initaial amplitudes of the Gaussians.
initial_translational_offset, initial_amplitude = initial_seed(t, y, noise_avg)
number_gaussians = len(initial_translational_offset)
initial_number_gaussians = len(initial_translational_offset)
if number_gaussians > max_num_gaussians:
print("too many peaks were found initially: number_gaussians>max_number_gaussians.")
return
#calculate rect effective time to be used in the initial standard dev. condition.
delta_t = t[1]-t[0]
time_eff = calculate_effective_length(y, noise_avg, delta_t) #instead of fitted_vert_offset, use noise_avg (we haven't yet fitted any fitted_vert_offset)
#print("rect effective time: ", time_eff)
initial_stddev_denominator = 1#np.random.randint(40, 60, 1)
initial_stddev = [time_eff/9] * number_gaussians#[np.max(t)/initial_stddev_denominator] * number_gaussians
initial_vertical_offset = noise_avg
p0 = initial_guess(initial_amplitude,
initial_translational_offset,
initial_stddev,
initial_vertical_offset)
#print("initial guesses: current time_eff-based stddev is ", initial_stddev[0], 'previous one was ', np.max(t)/50)
# initialize curve fitting bounds
amplitude_bounds = (0,np.max(y)-noise_avg*.25)
translational_offset_bounds = (0,np.max(t)) ### maybe make these somewhat closer to the seeds
stddev_bounds = (np.max(t)/150,np.max(t)/2)
vertical_offset_bounds = (2*noise_avg-noise_thresh,noise_thresh)# noise_thresh=mean+std, noise_avg=mean. so mean-std=2*noise_avg-noise_thresh
bounds = bound_maker(amplitude_bounds,translational_offset_bounds,stddev_bounds,vertical_offset_bounds,number_gaussians)
# make weights for fit
sigma = make_weights(y)
# limit the max number of function evaluations
max_nfev = int(30*len(t))
# try first fit
try:
popt,pcov = curve_fit(lambda t,*p0:fit_wrapper(t,*p0),t,y,p0=p0,bounds=bounds,x_scale=np.max(t),sigma=sigma,max_nfev=max_nfev,absolute_sigma=False)
except Exception as e:
"""plt.figure()
plt.plot(t,y)"""
print('p0:', p0)
print('bounds', bounds)
print('problem in first fit:', e)
return
##### function will only reach this location if initial fit converged.
# calculate residual
y_fit = fit_wrapper(t,*popt)
residual = y-y_fit
"""
plt.figure()
plt.plot(t, y, label="data")
plt.plot(t, y_fit, label="1st fit")
plt.plot(t, np.abs(residual), label="|residual|")
plt.plot([0, np.max(t)], [noise_thresh, noise_thresh], 'k--', label="threshold")
plt.plot([0, np.max(t)], [noise_avg, noise_avg], 'k--', label="mean noise")
plt.legend()
"""
"""
print(noise_thresh)
print(np.any(np.abs(residual)>noise_thresh))
print(number_gaussians<max_num_gaussians)
"""
# compare residual to noise threshold to determine whether or not
# another Gaussian should be added. Only add another Gaussian if
# there are no more than max_num_gaussians Gaussians already.
std_dev_residual_previous = np.std(y)
std_dev_residual_new = np.std(residual)
#print('std dev of residual is: ', std_dev_residual_new)
while (np.any(np.abs(residual)>noise_thresh*1.1)) & (number_gaussians<max_num_gaussians) | (std_dev_residual_new<std_dev_residual_previous*.8):
# try subsequent fit
# add in another gausian
new_translational_offset, new_amplitude = subsequent_seeding(t, y, y_fit, popt, number_gaussians, noise_avg)
old_stddev = popt[number_gaussians*2:number_gaussians*3]
initial_stddev = np.append(old_stddev, time_eff/8)
initial_vertical_offset = popt[-1]
p0 = initial_guess(new_amplitude,
new_translational_offset,
initial_stddev,
initial_vertical_offset)
# initialize curve fitting bounds
num_new_gaussians = 1
bounds = bound_maker_subsequent(t, y, y_fit, popt, num_new_gaussians, amplitude_bounds, stddev_bounds, vertical_offset_bounds, new_translational_offset, noise_avg)
# try curve fit again
try:
popt,pcov = curve_fit(lambda t,*p0:fit_wrapper(t,*p0),t,y,p0=p0,bounds=bounds,x_scale=np.max(t),sigma=sigma,max_nfev=max_nfev,absolute_sigma=False)
except: # if first fit fails to converge, end fitting
print(selxn, "one of the subsequent fits failed to converge")
return
y_fit = fit_wrapper(t,*popt)
residual = y-y_fit
number_gaussians += 1
"""
plt.plot(t, y_fit, label="new fit")
plt.plot(t, np.abs(residual), label="|new residual|")
plt.legend()
"""
std_dev_residual_previous = std_dev_residual_new
std_dev_residual_new = np.std(residual)
#print('std dev of residual is: ', std_dev_residual_new)
if (np.any(np.abs(residual)<noise_thresh*1.1) & (number_gaussians<=max_num_gaussians)):
print(selxn, "WORKED")
# package data for ML input.
r2, rmse, max_error, max_error_normalized, duration = eval_fit(y, y_fit, t, popt, delta_t)
data = package_fit_data(r2, rmse, max_error, max_error_normalized,
time_eff, duration, popt, ang_vel,
orbit_radius, x_offsets, y_offsets, number_gaussians, y,
noise_avg, max_num_gaussians, dirctn, initial_number_gaussians, t)
return data
else:
print(selxn, "max number of gaussians reached, but fit not within noise threshold")
return
return
"""
**********************
Fitting functions for erf-rect-erfs (use these when features within the illuminating beam have top hat intensity profiles)
**********************
"""
def error_function(x, x0, w):
"""
Error function with equation y=0.5*(1+erf(np.sqrt(2)*(x-x0)/w))
Args:
x: array of independent variable (x) values
x0: error function offset
w: error function width
Retunrs:
y: computed error function
"""
y = 0.5*(1+erf(np.sqrt(2)*(x-x0)/w))
return y
def error_function_complimentary(x, x0, w):
"""
Complimentary error function with equation y=0.5*(1-erf(np.sqrt(2)*(x-x0)/w))
Args:
x: data x values
x0: error function offset
w: error function width
Returns:
y: computed error function
"""
y = 0.5*(1-erf(np.sqrt(2)*(x-x0)/w))
return y
def fit_wrapper_erfrecterf(x,*args):
"""
This wrapper sets up the variables for the fit function.
It allows for a variable numbers of erf-rect-erfs to be fitted.
Calls erf_rect_fit_function
Args:
x (array): x is independent variable x, such that y=f(x).
args: variable length argument list. args[0:n_erfs] are the amplitudes of the erf-rect-erfs to be fitted. Each erf-rect-erf feature has an erf and a complimentary erf. args[n_erfs:2*n_erfs] are the horizontal offsets of the erf to be fitted. args[2*n_erfs:3*n_erfs] are the widths of the erf to be fitted. args[3*n_erfs:4*n_erfs] and args[4*n_erfs:5*n_erfs] are the horizontal offsets and widths of the complimentary erf to be fitted. args[-1] is the vertical offset parameter
Returns
erf_rect_fit_function(x,a,mu0,sigma0,mu1,sigma1,vertical_offset)
"""
n_erfs = (len(args)-1)//5 # number of erf-rect-erf features that we're fitting
a = args[0:n_erfs]
mu0 = args[n_erfs:2*n_erfs]
sigma0 = args[2*n_erfs:3*n_erfs]
mu1 = args[3*n_erfs:4*n_erfs]
sigma1 = args[4*n_erfs:5*n_erfs]
vertical_offset = args[-1]
return erf_rect_fit_function(x, a, mu0, sigma0, mu1, sigma1, vertical_offset)
def erf_rect_fit_function(x,a,mu0,sigma0,mu1,sigma1,vertical_offset):
"""
Returns a function that is comprised of erf-rect-erf features. Each feature has a top-hat profile
generated as the sum of an error function at one time and a complimentary error function at a later time.
Args:
x (array): independent variable, such that y=f(x).
a (list): initial guesses for the amplitudes of the erf-rect-erfs
mu0 (list): initial guesses for the translational offsets of the erfs
sigma0 (list): initial guesses for standard deviations of erfs
mu1 (list): initial guesses for the translational offsets of the complimentary erfs
sigma1 (list): initial guesses for standard deviations of the complimentary erfs
vertical_offset (list): initial guess for vertical offset h
Returns:
fit (array): a function which consists of the sum of multiple gaussians and a vertical offset
Returns a function that is comprised of an offset h and the
sum of gaussians with variable amplitudes, offsets, and standard
deviations (widths)
Args:
x (array): independent variable, such that y=f(x).
h (list): initial guesses for the amplitudes of the gaussians
mu (list): initial guesses for the translational offsets of the gaussians
sigma (list): initial guesses for standard deviations of gaussians
vertical_offset (list): initial guess for vertical offset h
Returns:
fit (array): a function which consists of the sum of multiple gaussians and a vertical offset
"""
# initialize fi function & add the vertical offset to the fit function
fit = np.zeros(len(x))+vertical_offset
# iterate through each erf-rect-erf and add it to the fit function
for amp, offset0, std0, offset1, std1 in zip(a, mu0, sigma0, mu1, sigma1):
fit += amp*(error_function(x, offset0, std0) + error_function_complimentary(x, offset1, std1) -1 )
return fit
def initial_guess_erfrecterf(initial_amplitude,initial_translational_offset0,initial_stddev0,initial_translational_offset1,initial_stddev1,initial_vertical_offset):
"""
Create array with amplitude, standard deviation, translational offsets, and vertical offset to be used in the curve fit
Args:
initial_amplitude (array): guess for the initial values of the amplitudes of the erf-rect-erf
initial_translational_offset0 (array): guess for the initial values of the translational offsets of the erf
initial_stddev0 (array): guess for the initial values of the standard deviations of the erf
initial_translational_offset1 (array): guess for the initial values of the translational offsets of the complimentary erf
initial_stddev1 (array): guess for the initial values of the standard deviations of the complimentary erf
initial_vertical_offset (float): guess for the initial values of the vertical offset
Returns:
p0 (array): lists the initial_amplitude, initial_translational_offset of the erf, initial_stddev of the erf, initial_translational_offset of the complimentary erf, initial_stddev of the complimentary erf, initial_vertical_offset in the correct format for the curve fit.
"""
p0 = [i for i in initial_amplitude]\
+ [i for i in initial_translational_offset0]\
+ [i for i in initial_stddev0]\
+ [i for i in initial_translational_offset1]\
+ [i for i in initial_stddev1]\
+ [initial_vertical_offset]
return p0
def bound_maker_erfrecterf(amplitude_bounds,translational_offset_bounds,stddev_bounds,vertical_offset_bounds,number_erfs):
"""
Create tuple with lower and upper bounds to be used in the curve fit
Args:
amplitude_bounds (tuple): bounds on the amplitudes of the gaussians
translational_offset_bounds (tuple): bounds on the translational offsets of the gaussians
stddev_bounds (tuple): bounds on the standard deviations of the gaussians
vertical_offset_bounds (tuple): bounds on the vertical offset of the gaussians
number_erfs (int): the number of erf-rect-erf features in the fit
Returns:
bounds (tuple): lists the bounds on the parameters used in the erf-rect-erf fits
"""
lower = [amplitude_bounds[0]]*number_erfs + [translational_offset_bounds[0]]*number_erfs + [stddev_bounds[0]]*number_erfs + [translational_offset_bounds[0]]*number_erfs + [stddev_bounds[0]]*number_erfs + [vertical_offset_bounds[0]]
upper = [amplitude_bounds[1]]*number_erfs + [translational_offset_bounds[1]]*number_erfs + [stddev_bounds[1]]*number_erfs + [translational_offset_bounds[1]]*number_erfs + [stddev_bounds[1]]*number_erfs + [vertical_offset_bounds[1]]
bounds = (lower, upper)
return bounds
def bound_maker_subsequent_erfrecterf(t, y, y_fit, popt, num_new_erfs, amplitude_bounds, sigma0_bounds, sigma1_bounds, vertical_offset_bounds, new_mu0, new_mu1):
"""
Makes the bounds vector for fits after the first. Takes into account the previous fitted values
Args:
t (array): time grid of burst
y (array): burst
y_fit (array): previous fit to the burst
popt (arrapy): the results from the multi gaussian curve fit of the previous fit
num_new_erfs (int): the number of gaussians to be added to the new fit
amplitude_bounds (array): bounds on the amplitudes of the gaussians
sigma0_bounds (array): bounds on the standard deviations of the erf
sigma1_bounds (array): bounds on the standard deviations of the complimentary erf
vertical_offset_bounds (array): bounds on the vertical offset
new_mu0 (array): new value for the translational position of the erf
new_mu1 (array): new value for the translational position of the complimentary erf
Returns:
bounds (tuple): lists the bounds on the parameters used in the erf-rect-erf fits
"""
amplitudes = popt[0:num_erfs_old]
mu0 = popt[num_erfs_old:2*num_erfs_old]
sigma0 = popt[2*num_erfs_old:3*num_erfs_old]
mu1 = popt[3*num_erfs_old:4*num_erfs_old]
sigma1 = popt[4*num_erfs_old:5*num_erfs_old]
vertical_offset = popt[-1]
lower_amp = np.append(amplitudes-np.abs(amplitudes)*.2, [0]*num_new_erfs)
upper_amp = np.append(amplitudes+np.abs(amplitudes)*.4, [1.2*(np.max(y)-noise_avg)]*num_new_erfs)
# limit the movement of the previously fitted erf-rect-erfs.
lower_mu0 = np.append(mu0*.8, [0]*num_new_erfs)
upper_mu0 = np.append(mu0*1.2, [np.max(t)]*num_new_erfs)
lower_mu1 = np.append(mu1*.8, [0]*num_new_erfs)
upper_mu1 = np.append(mu1*1.2, [np.max(t)]*num_new_erfs)
if num_new_erfs == 1:
lower_mu0 = np.append(mu0*.8, [new_mu0[-1]*.5])
upper_mu0 = np.append(mu0*1.2, [new_mu0[-1]*1.5])
lower_mu1 = np.append(mu1*.8, [new_mu1[-1]*.5])
upper_mu1 = np.append(mu1*1.2, [new_mu1[-1]*1.5])
lower_mu0[lower_mu0<0] = 0
lower_mu1[lower_mu1<0] = 0
upper_mu0[upper_mu0>np.max(t)] = .9*np.max(t)
upper_mu1[upper_mu1>np.max(t)] = .9*np.max(t)
lower_sigma0 = np.append([sigma0_bounds[0]]*num_erfs_old, [sigma0_bounds[0]]*num_new_erfs)
lower_sigma1 = np.append([sigma1_bounds[0]]*num_erfs_old, [sigma1_bounds[0]]*num_new_erfs)
upper_sigma0 = np.append([sigma0_bounds[1]]*num_erfs_old, [sigma0_bounds[1]]*num_new_erfs)
upper_sigma1 = np.append([sigma1_bounds[1]]*num_erfs_old, [sigma1_bounds[1]]*num_new_erfs)
# make into array
lower = np.concatenate((lower_amp, lower_mu0, lower_sigma0, lower_mu1, lower_sigma1, [vertical_offset_bounds[0]]))
upper = np.concatenate((upper_amp, upper_mu0, upper_sigma0, upper_mu1, upper_sigma1, [vertical_offset_bounds[1]]))
bounds = (lower, upper)
return bounds
def find_edges(y, trigger_height):
"""
Simple zero-crossing algorithm to locate the rising and falling edges of a signal. If the signal is noisy around the location of the threshold, then multiple rising and falling edges may be detected where only one should be detected. If this happens, try smoothing the signal beforehand or selecting only a single of the set of falsely identified edge positions.
Args:
y (array): signal of interest
trigger_height (float): the height at which a rising or falling edge is detected
Returns:
potential_rising_edges (list): list of rising edges at which to seed erf-rect-erfs
potential_falling_edges (list): list of falling edges at which to seed erf-rect-erfs
"""
potential_falling_edge, potential_rising_edge = [], []
for num, (i,j) in enumerate(zip(y[:-1], y[1:])):
if (i>trigger_height) and (j<trigger_height):
potential_falling_edge.append(num)
if (i< trigger_height) and (j>trigger_height):
potential_rising_edge.append(num)
return potential_rising_edge, potential_falling_edge
def seed_initial_offsets_edges(y, noise_level):
"""
Seed the starts and the edges
Args:
y (array): signal of interest
noise_level (float): the mean noise level of the signal. The threshold for finding the edges is based on this value
Returns:
rising_edges (list): list of rising edges at which to seed erf-rect-erfs
falling_edges (list): list of falling edges at which to seed erf-rect-erfs
"""
threshold = noise_level*2
rising_edges, falling_edges = find_edges(y, threshold)
return rising_edges, falling_edges
def seed_initial_offsets_edges_smoothed(y, noise):
"""
Seed the starts and the edges
Inputs:
y (array): signal of interest
noise (float): the mean noise level of the signal. The threshold for finding the edges is based on this value
Returns:
rising_edges (list): list of rising edges at which to seed erf-rect-erfs
falling_edges (list): list of falling edges at which to seed erf-rect-erfs
"""
# find the major peaks
threshold = np.max(y)*.25
# Find edges of smoothed signal
area = 4000
length = len(y)//50
width = len(y)
r = rect_generator(length,width,area)
xc_r = signal.correlate(y,r)[length//2:-length//2]
normalized_xc_r = xc_r/np.max(xc_r)*np.max(y)
rising_edges, falling_edges = find_edges(normalized_xc_r, threshold)
""" plt.figure()
plt.plot(y)
plt.plot([0, len(y)], [threshold,threshold], 'm')
print(rising_edges)"""
return rising_edges, falling_edges, xc_r
def initial_seed_erfrecterf(t, y, noise):
"""
Makes seeds for the first fit.
Calls seed_initial_offsets_peaks
Args:
t (array): time corresponding to signal
y (array): signal
noise (float): noise level of y
Returns:
initial_translational_offset (array): a list of the initial conditions for the horizontal offsets, mu
initial_amplitude (array): a list of the initial conditions for the amplitudes, A
"""
rising_edges, falling_edges, xc_r = seed_initial_offsets_edges_smoothed(y, noise)
#initial_translational_offset = t[peak_locns]
initial_amplitudes = []
for r,f in zip(rising_edges, falling_edges):
initial_amplitudes.append(y[int((f-r)//2+r)]-noise)
#print("initial amplitudes:", initial_amplitudes)
initial_amplitudes = np.asarray(initial_amplitudes)
initial_mu0 = t[rising_edges]
initial_mu1 = t[falling_edges]
return initial_mu0, initial_mu1, initial_amplitudes
def seed_later_offsets_peaks_erfrecterf(y, noise_level):
"""
Seed the starts and the edges of the erf-rect-erf features
Args:
y (array): signal of interest
noise_level (float): the mean noise level of the signal. The threshold for finding the edges is based on this value
Returns:
rising_edge (int): rising edges at which to seed erf-rect-erfs which corresponds to the location of the largest residual
falling_edge (int): falling edges at which to seed erf-rect-erfs which corresponds to the location of the largest residual
"""
threshold = noise_level
rising_edges, falling_edges = find_edges(y, threshold)
# find the location with the lartest peak
peak_val = []
for r,f in zip(rising_edges, falling_edges):
peak_val.append(np.abs(y[(f-r)//2+r]))
if not peak_val: #if peak_val is empty
threshold = noise_level*.5
rising_edges, falling_edges = find_edges(y, threshold)
for r,f in zip(rising_edges, falling_edges):
peak_val.append(np.abs(y[(falling_edge-rising_edge)//2+rising_edge]))
if not peak_val:
return
else:
biggest_residual_location = np.argmax(peak_val)
return rising_edges[biggest_residual_location], falling_edges[biggest_residual_location]
def subsequent_seeding_erfrecterf(t, y, y_fit, popt, number_erfs, noise_threshold):
"""
Make the seeds for fits after the first fit.
Args:
t (array): times corresponding to y
y (array): burst
y_fit (array): fitted burst
popt (arary): output of previous curve fit
number_erfs (array): number of erf-rect-erf features in the fit
noise_avg (float): average value of the noise
Returns:
new_translational_offset (array): initial guesses for the translational offsets of the erf-rect-erf features
new_amplitude (array): initial guesses for the amplitudes of the erf-rect-erf features
"""
residual = np.abs(y-y_fit)
plt.figure()
plt.plot(residual)
plt.plot(y)
plt.plot(y_fit)
# find spot with largest residual; record its amplitude
try:
rising_edge, falling_edge = seed_later_offsets_peaks_erfrecterf(residual, noise_threshold)
print(rising_edge, falling_edge)
mu0_new = t[rising_edge]
mu1_new = t[falling_edge]
print('falling edge is ',falling_edge)
a_new = y[(falling_edge-rising_edge)//2+rising_edge]-noise_threshold
sigma0_new = 5
sigma1_new = 5
"""
plt.figure(78)
plt.plot(y, label='data')
plt.plot(y_fit, label='fitted')
plt.plot(residual, label="|residual|")
plt.plot(peak_to_use, residual[peak_to_use], 'x')
"""
# use the previously fitted peaks as initial conditions
fitted_a = popt[:number_erfs]
new_a = np.append(fitted_a, a_new)
fitted_mu0 = popt[number_erfs:2*number_erfs]
new_mu0 = np.append(fitted_mu0, mu0_new
)
fitted_sigma0 = popt[2*number_erfs:3*number_erfs]
new_sigma0 = np.append(fitted_sigma0, sigma0_new)
fitted_mu1 = popt[3*number_erfs:4*number_erfs]
new_mu1 = np.append(fitted_mu1, mu1_new)
fitted_sigma1 = popt[4*number_erfs:5*number_erfs]
new_sigma1 = np.append(fitted_sigma1, sigma1_new)
except Exception as e:
print("Exception in subsequent_seeding", e)
return
return new_a, new_mu0, new_sigma0, new_mu1, new_sigma1
def package_fit_data_erfrecterf(r2, rmse, max_error, max_error_normalized,
time_eff, duration, popt, fr,
number_erfrecterfs,
y, noise_avg, max_num_erfrecterfs, initial_number_erfrecterfs):
"""
Save data from fits for use in later modules.
Args:
r2 (float): percentage of variability of the burst that's been accounted for in the fit. (how well the regression predicts the data)
rmse (float): root mean square error between fit and signal
max_error (float): maximum absolute value of difference between y and y_fit
max_error_normalized (float): max_error/max(y)
time_eff (float): rect effective time of signal. time of a rectangle with same max height as the signal
duration (float): time of signal between indices set by lower and upper 10% of the integrated area of signal
popt (array): output of curve fit
fr (float): flow rate
number_erfrecterfs (int): number of erf-rect-erf features used to parameterize burst
y (array): burst
noise_avg (float): average noise value
max_num_erfrecterfs (int): maximum number of gaussians to be included in the fit
initial_number_erfrecterfs (int): number of Gaussians used in initial fit
Returns:
data (array): contains many of the arguments and several other metrics, packaged for pickling for use in next module.
"""
try:
# fit parameters
a = popt[0:number_erfrecterfs] # amplitude
mu0 = popt[number_erfrecterfs:2*number_erfrecterfs] # offset
sigma0 = popt[2*number_erfrecterfs:3*number_erfrecterfs] # width
mu1 = popt[3*number_erfrecterfs:4*number_erfrecterfs] # offset
sigma1 = popt[4*number_erfrecterfs:5*number_erfrecterfs] # width
# to save, we want distances relative to location of first erfrecterf
sorted_indices = np.argsort(mu0)
#print('length of mu:', len(mu), 'length of popt:', len(popt), 'number of gaussians', number_gaussians,'length of h:', len(h))
a_save, mu0_save, sigma0_save, mu1_save, sigma1_save = np.zeros(max_num_erfrecterfs), np.zeros(max_num_erfrecterfs), np.zeros(max_num_erfrecterfs), np.zeros(max_num_erfrecterfs), np.zeros(max_num_erfrecterfs)
a_save[:number_erfrecterfs] = a[sorted_indices]
mu0_save[:number_erfrecterfs] = mu0[sorted_indices]-mu0[sorted_indices[0]] # subtract smalles from all to get relative offsets
sigma0_save[:number_erfrecterfs] = sigma0[sorted_indices]
mu1_save[:number_erfrecterfs] = mu1[sorted_indices]-mu0[sorted_indices[0]] # subtract smalles from all to get relative offsets
sigma1_save[:number_erfrecterfs] = sigma1[sorted_indices]
vert_offset_save = popt[-1]
max_SNR = np.max(y)/noise_avg
avg_SNR = np.mean(y)/noise_avg
data = np.concatenate([[fr],[rmse],
[r2],[max_error],[max_error_normalized],[time_eff],[duration],
a_save,mu0_save,sigma0_save,mu1_save, sigma1_save,[vert_offset_save],[t[1]-t[0]],
[max_SNR], [avg_SNR], [int(initial_number_erfrecterfs)]])
return data
except Exception as e:
print('Exception:', e)
print("***\n***\nsomething went wrong in package_fit_data\n***\n***")
return
def fitting_function_erfrecterf(selxn, t, y, noise_avg, noise_thresh, fr, max_num_erfrecterfs=4):
"""
Performs erf-rect-erf fits. Initializes first fit based on number of edges in smoothed copy of burst. The residual of this fit is compared to the noise threshold. Until the absolute value of the residual is smaller than the noise threshold or until more than max_num_erfrecterfs features are needed to parameterize the fit, subsequent fits place new Gaussians at locations which have large residuals. A great deal of care is taken in this function to standardize the weighting and initial conditions of the fits since the erf-rect-erf features inherently are not orthogonal. The goal is to produce fits with Gaussians which appear physical (aren't extremely tall and narrow or short and wide). The fits may not converge, or more features than max_num_erfrecterfs may be required to fit the function. In such cases, the fitting function passes the burst without returning a fit.
Args:
selxn (int): burst number being fitted
t (array): times corresponding to y
y (array): burst being fitted
noise_avg (float): average value of the noise
noise_thresh (float): average value of the noise + standard deviation of noise
fr (float): actual flow rate underlying simulation
max_num_erfrecterfs (int): maximum number of erf-rect-erf features used to fit the burst
Returns:
data (array): contains many of the arguments and several other metrics, packaged for pickling for use in next module.
"""
# check that there are enough points above the noise threshold to actually do a fit
if np.shape(np.argwhere(y>noise_avg + 3*(noise_thresh-noise_avg)))[0]<12:
print("not enough of the burst has an intensity greater than 2x the noise threshold ")
return
# for initial fit, use peak finding to determine the number,
# location, and initaial amplitudes of the Gaussians.
initial_mu0, initial_mu1, initial_amplitude = initial_seed_erfrecterf(t, y, noise_thresh)
number_erfrecterfs = len(initial_mu0)
if number_erfrecterfs > max_num_erfrecterfs:
print("too many peaks were found initially: number_erfrecterfs > max_num_erfrecterfs.")
return
#calculate rect effective time to be used in the initial standard dev. condition.
delta_t = t[1]-t[0]
time_eff = calculate_effective_length(y, noise_avg, delta_t) #instead of fitted_vert_offset, use noise_avg (we haven't yet fitted any fitted_vert_offset)
#print("rect effective time: ", time_eff)
initial_sigma0 = [time_eff/5]*number_erfrecterfs
initial_sigma1 = [time_eff/5]*number_erfrecterfs
# initialize vertical offset
initial_vertical_offset = noise_avg + np.mean( [np.mean(y[:len(y)//5]), np.mean(y[4*len(y)//5:])] )
p0 = initial_guess(initial_amplitude,
initial_mu0,
initial_sigma0,
initial_mu1,
initial_sigma1,
initial_vertical_offset)
# initialize curve fitting bounds
amplitude_bounds = (noise_avg,np.max(y)-noise_avg*.25)
mu_bounds = (0,np.max(t)) ### maybe make these somewhat closer to the seeds
sigma_bounds = (np.max(t)/150,np.max(t)/2)
vertical_offset_bounds = (.95*np.min( [np.min(y[:len(y)//5]), np.min(y[4*len(y)//5:])]), noise_avg+1.25*np.max( [np.max(y[:len(y)//5]), np.max(y[4*len(y)//5:])]) )
bounds = bound_maker_erfrecterf(amplitude_bounds,mu_bounds,sigma_bounds,vertical_offset_bounds,number_erfrecterfs)
initial_number_erfrecterfs = len(initial_sigma0)
# make weights for fit
sigma = make_weights(y, g_length=50)
# limit the max number of function evaluations
max_nfev = int(30*len(t))
# try first fit
try:
popt,pcov = curve_fit(lambda t,*p0:fit_wrapper_erfrecterf(t,*p0),t,y,p0=p0,bounds=bounds,x_scale=np.max(t),sigma=sigma,max_nfev=max_nfev,absolute_sigma=False)
except Exception as e:
print('p0:', p0)
print('bounds', bounds)
print('problem in first fit:', e)
return
##### function will only reach this location if initial fit converged.
# calculate residual
y_fit = fit_wrapper_erfrecterf(t,*popt)
residual = y-y_fit
"""plt.figure()
plt.plot(t, y, label="data")
plt.plot(t, y_fit, label="1st fit")
plt.plot(t, np.abs(residual)/sigma**2, label="|residual|/sigma**2")
#plt.plot([0, np.max(t)], [noise_thresh, noise_thresh], 'k--', label="threshold")
plt.plot([0, np.max(t)], [750, 750], 'k--', label="threshold")
#plt.plot([0, np.max(t)], [noise_avg, noise_avg], 'k--', label="mean noise")
plt.legend()"""
"""
print(noise_thresh)
print(np.any(np.abs(residual)>noise_thresh))
print(number_gaussians<max_num_gaussians)
"""
# compare residual to noise threshold to determine whether or not
# another Gaussian should be added. Only add another Gaussian if
# there are no more than max_num_gaussians Gaussians already.
std_dev_residual_previous = 9999999#noise_thresh-noise_avg#np.std(y)
std_dev_residual_new = np.std(residual)
fitnum = 1
noisethresh_to_use = .05
while (np.any(np.abs(residual)/sigma**2>noisethresh_to_use)) & (number_erfrecterfs<max_num_erfrecterfs) & (std_dev_residual_new<std_dev_residual_previous*.8):
plt.figure()
plt.plot(y, label='y')
plt.plot(y_fit, label='fitted')
plt.plot((y-y_fit)/sigma**2, label='scaled residual')
plt.plot([0,len(y_fit)], [noisethresh_to_use, noisethresh_to_use], label='threshold')
plt.legend()
print('initial fit insufficient')
# try subsequent fit
# add in another gausian
fitnum += 1
print('fit number', fitnum)
try:
new_a, new_mu0, new_sigma0, new_mu1, new_sigma1 = subsequent_seeding_erfrecterf(t, y, y_fit, popt, number_erfrecterfs, noise_thresh)
initial_vertical_offset = popt[-1]
p0 = initial_guess(new_a,
new_mu0,
new_sigma0,
new_mu1,
new_sigma1,
initial_vertical_offset)
sigma0_bounds = (np.max(t)/150,np.max(t)/2)
sigma1_bounds = (np.max(t)/150,np.max(t)/2)
# initialize curve fitting bounds
num_new_erfrecterfs = 1
bounds = bound_maker_subsequent_erfrecterf(t, y, y_fit, popt, num_new_erfrecterfs, amplitude_bounds,
sigma0_bounds, sigma1_bounds, vertical_offset_bounds, new_mu0, new_mu1)
# try curve fit again
except Exception as e:
print(e)
print("$$$$$$$$$$$$$")
return
try:
popt,pcov = curve_fit(lambda t,*p0:fit_wrapper_erfrecterf(t,*p0),t,y,p0=p0,bounds=bounds,x_scale=np.max(t),sigma=sigma,max_nfev=max_nfev,absolute_sigma=False)
except: # if first fit fails to converge, end fitting
print(selxn, "one of the subsequent fits failed to converge")
return
y_fit = fit_wrapper_erfrecterf(t,*popt)
residual = y-y_fit
number_erfrecterfs += 1
print('num erfs',number_erfrecterfs)
std_dev_residual_previous = std_dev_residual_new
std_dev_residual_new = np.std(residual)
#print('std dev of residual is: ', std_dev_residual_new)
if (np.any(np.abs(residual/sigma**2)<noisethresh_to_use) & (number_erfrecterfs<=max_num_erfrecterfs)):
print(selxn, "WORKED")
# package data for ML input.
r2, rmse, max_error, max_error_normalized, duration = eval_fit(y, y_fit, t, popt, delta_t)
data = package_fit_data_erfrecterf(r2, rmse, max_error, max_error_normalized,
time_eff, duration, popt, fr,
number_erfrecterfs, y,
noise_avg, max_num_erfrecterfs, initial_number_erfrecterfs)
""" plt.figure()
plt.plot(t, y, label='signal')
plt.plot(t, y_fit, label="fit")
#plt.plot(t, np.abs(residual), label="|new residual|")
plt.legend()"""
#print(number_erfrecterfs)
return data
else:
print(selxn, "max number of erfrecterfs reached, but fit not within noise threshold")
return
return
| [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
629,
541,
88,
1330,
6737,
198,
6738,
629,
541,
88,
13,
40085,
1096,
1330,
12133,
62,
11147,
198,
6738,
629,
541,
88,
13,
40085,
... | 2.39992 | 25,090 |
from sklearn.decomposition import PCA
from sklearn.feature_extraction.text import TfidfVectorizer
import csv
outputFile = open('/home/anoukh/SentimentAnalysis/0.3uniqueTFIDFTweetVectorsSize200.csv', "wb")
writer = csv.writer(outputFile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL, escapechar=',')
with open('/home/anoukh/SentimentAnalysis/GloVe-1.2/data/0.3uniqueTweets.csv', 'rb') as f:
reader = csv.reader(f, delimiter=',', quoting=csv.QUOTE_NONE)
corpus = []
for row in reader:
for sentence in row:
corpus.append(sentence)
vectorizer = TfidfVectorizer(analyzer='word', min_df=0, max_features=500, stop_words='english',
use_idf=True)
X = vectorizer.fit_transform(corpus)
print("n_samples: %d, n_features: %d" % X.shape)
i = 0
print "Using PCA to reduce dimensions"
pca = PCA(n_components=20)
reducedDimensions = pca.fit_transform(X.toarray())
for tweet in reducedDimensions:
writer.writerow(tweet)
| [
6738,
1341,
35720,
13,
12501,
296,
9150,
1330,
4217,
32,
198,
6738,
1341,
35720,
13,
30053,
62,
2302,
7861,
13,
5239,
1330,
309,
69,
312,
69,
38469,
7509,
198,
11748,
269,
21370,
198,
22915,
8979,
796,
1280,
10786,
14,
11195,
14,
272,... | 2.472222 | 396 |
#!/usr/bin/env python
"""
* *******************************************************
* Copyright VMware, Inc. 2018. All Rights Reserved.
* SPDX-License-Identifier: MIT
* *******************************************************
*
* DISCLAIMER. THIS PROGRAM IS PROVIDED TO YOU "AS IS" WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, WHETHER ORAL OR WRITTEN,
* EXPRESS OR IMPLIED. THE AUTHOR SPECIFICALLY DISCLAIMS ANY IMPLIED
* WARRANTIES OR CONDITIONS OF MERCHANTABILITY, SATISFACTORY QUALITY,
* NON-INFRINGEMENT AND FITNESS FOR A PARTICULAR PURPOSE.
"""
__author__ = 'VMware, Inc.'
__vcenter_version__ = 'VMware Cloud on AWS'
from pprint import pprint as pp
from com.vmware.content.library_client import Item
from com.vmware.vcenter.ovf_client import LibraryItem
from com.vmware.vcenter_client import ResourcePool, Folder, Network
from com.vmware.vcenter.vm.hardware_client import Ethernet
from vmware.vapi.vsphere.client import create_vsphere_client
from samples.vsphere.common import sample_cli, sample_util
from samples.vsphere.common.id_generator import generate_random_uuid
class DeployOvfTemplate:
"""
Demonstrates the workflow to deploy an OVF library item to
a resource pool in VMware Cloud on AWS.
Note: the sample needs an existing library item with an OVF template
and an existing resource pool with resources for deploying the VM.
"""
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
37811,
198,
9,
41906,
8412,
2466,
8162,
198,
9,
15069,
37754,
11,
3457,
13,
2864,
13,
1439,
6923,
33876,
13,
198,
9,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
17168,
198,
9,
... | 3.334118 | 425 |
from unittest import TestCase, skip
from libumccr.aws import libsm
| [
6738,
555,
715,
395,
1330,
6208,
20448,
11,
14267,
198,
198,
6738,
9195,
388,
535,
81,
13,
8356,
1330,
9195,
5796,
628,
198
] | 3.043478 | 23 |
"""# API docs for Datapane Client
These docs describe the Python API for building Datapane documents, along with additional information on the Datapane Teams API.
Usage docs for Datapane can be found at https://docs.datapane.com
These objects are all available under the `datapane` module, via `import datapane as dp` (they are re-exported from `datapane.client.api`).
### Datapane Reports API
The core document APIs are available for both Datapane Community and Datapane Teams, these are found in `datapane.client.api.report`, including,
- `datapane.client.api.report.core.Report`
- Layout Blocks
- `datapane.client.api.report.blocks.Page`
- `datapane.client.api.report.blocks.Group`
- `datapane.client.api.report.blocks.Select`
- Data Blocks
- `datapane.client.api.report.blocks.Plot`
- `datapane.client.api.report.blocks.Table`
- `datapane.client.api.report.blocks.DataTable`
- `datapane.client.api.report.blocks.Media`
- `datapane.client.api.report.blocks.Formula`
- `datapane.client.api.report.blocks.BigNumber`
- `datapane.client.api.report.blocks.Text`
- `datapane.client.api.report.blocks.Code`
- `datapane.client.api.report.blocks.HTML`
### Datapane Teams
Additional API docs are found in `datapane.client.api.teams` that provide building, deployment, and sharing of data analytics apps and workflows
- `datapane.client.api.teams.File`
- `datapane.client.api.teams.Environment`
- `datapane.client.api.teams.App`
- `datapane.client.api.teams.Schedule`
..note:: These docs describe the latest version of the datapane API available on [pypi](https://pypi.org/project/datapane/)
<a href="https://pypi.org/project/datapane/">
<img src="https://img.shields.io/pypi/v/datapane?color=blue" alt="Latest release" />
</a>
"""
# flake8: noqa F401
# Internal API re-exports
import warnings
from .common import HTTPError, Resource
from .dp_object import DPObjectRef
from .report.blocks import (
Attachment,
BigNumber,
Code,
Group,
DataTable,
Divider,
Embed,
Empty,
Media,
Formula,
HTML,
Media,
Page,
Plot,
Select,
SelectType,
Text,
Table,
Toggle,
)
from .report.core import FontChoice, PageLayout, Report, ReportFormatting, ReportWidth, TextAlignment, Visibility
from .runtime import Params, Result, by_datapane, _reset_runtime, _report
from .teams import App, Environment, File, Run, Schedule
from .user import hello_world, login, logout, ping, signup
from ..utils import IncompatibleVersionError
from ..config import init
from . import builtins
| [
37811,
2,
7824,
34165,
329,
16092,
499,
1531,
20985,
198,
198,
4711,
34165,
6901,
262,
11361,
7824,
329,
2615,
16092,
499,
1531,
4963,
11,
1863,
351,
3224,
1321,
319,
262,
16092,
499,
1531,
24690,
7824,
13,
198,
198,
28350,
34165,
329,
... | 2.83913 | 920 |
#!/usr/bin/python3
#
# Copyright 2017, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from os import stat
from syscall import *
from syscalltable import *
# minimum required version of vltrace log
VLTRACE_VMAJOR = 0
VLTRACE_VMINOR = 1
VLTRACE_TAB_SIGNATURE = "VLTRACE_TAB" # signature of vltrace syscall table
VLTRACE_LOG_SIGNATURE = "VLTRACE_LOG" # signature of vltrace log
# currently only the x86_64 architecture is supported
ARCH_x86_64 = 1
Archs = ["Unknown", "x86_64"]
DO_GO_ON = 0
DO_REINIT = 1
SYSCALL_NOT_FOUND = -1
########################################################################################################################
# Converter
########################################################################################################################
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
2,
198,
2,
15069,
2177,
11,
8180,
10501,
198,
2,
198,
2,
2297,
396,
3890,
290,
779,
287,
2723,
290,
13934,
5107,
11,
351,
393,
1231,
198,
2,
17613,
11,
389,
10431,
2810,
326,
262,
1... | 3.456884 | 661 |
"""
Create tensorflow TFRecord files from images from the following directory structure:
<input-directory>
<class1>
<image1>
<image2>
...
<class2>
...
"""
import getopt
import os
import sys
import tensorflow as tf
import numpy as np
import png
from tqdm import tqdm
try:
from itertools import imap
except ImportError:
# For Python 3
imap=map
if __name__ == "__main__":
main(sys.argv[1:])
| [
37811,
198,
16447,
11192,
273,
11125,
24958,
23739,
3696,
422,
4263,
422,
262,
1708,
8619,
4645,
25,
198,
27,
15414,
12,
34945,
29,
198,
220,
220,
220,
1279,
4871,
16,
29,
198,
220,
220,
220,
220,
220,
220,
220,
1279,
9060,
16,
29,
... | 2.432432 | 185 |
import os
import numpy as np
import scipy
import cupy as cp
from cupy.cuda import Device
from cupy.cuda.runtime import getDeviceCount
from mpi4py import MPI
# import socket
from ..common import _start, _finish
# 計測開始
# 計測終了
# パラメータの初期化
| [
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
629,
541,
88,
198,
11748,
6508,
88,
355,
31396,
198,
6738,
6508,
88,
13,
66,
15339,
1330,
16232,
198,
6738,
6508,
88,
13,
66,
15339,
13,
43282,
1330,
651,
24728,
12332,
198... | 2.207207 | 111 |
import sqlite3
from skylark import Model, Field, Database
| [
11748,
44161,
578,
18,
198,
198,
6738,
1341,
2645,
668,
1330,
9104,
11,
7663,
11,
24047,
628,
628
] | 3.444444 | 18 |
#Python Program to demonstrate single inheritance
#Base class
#Derived class
#Driver's code
object=Child()
object.fun1()
object.fun2() | [
2,
37906,
6118,
284,
10176,
2060,
24155,
198,
2,
14881,
1398,
198,
2,
28532,
1572,
1398,
198,
2,
32103,
338,
2438,
198,
15252,
28,
16424,
3419,
198,
15252,
13,
12543,
16,
3419,
198,
15252,
13,
12543,
17,
3419,
220,
220,
220,
220,
22... | 3.042553 | 47 |
import numpy as np
import random
import tensorflow as tf
class QNetwork(object):
"""
Base class for QNetworks.
"""
def copy_to(self, dst_net):
"""
mn = ModelNetwork(2, 3, 0, "actor")
mn_target = ModelNetwork(2, 3, 0, "target_actor")
s=tf.InteractiveSession()
s.run( tf.initialize_all_variables() )
mn.copy_to(mn_target)
"""
v1 = self.variables()
v2 = dst_net.variables()
for i in range(len(v1)):
v2[i].assign( v1[i] ).eval()
class QNetworkNIPS(QNetwork):
"""
QNetwork used in ``Playing Atari with Deep Reinforcement Learning'', [Mnih et al., 2013].
It's a Convolutional Neural Network with the following specs:
L1: 16 8x8 filters with stride 4 + RELU
L2: 32 4x4 filters with stride 2 + RELU
L3: 256 unit Fully-Connected layer + RELU
L4: [output_size] output units, Fully-Connected
"""
class QNetworkNature(QNetwork):
"""
QNetwork used in ``Human-level control through deep reinforcement learning'', [Mnih et al., 2015].
It's a Convolutional Neural Network with the following specs:
L1: 32 8x8 filters with stride 4 + RELU
L2: 64 4x4 filters with stride 2 + RELU
L3: 64 3x3 fitlers with stride 1 + RELU
L4: 512 unit Fully-Connected layer + RELU
L5: [output_size] output units, Fully-Connected
"""
class QNetworkDueling(QNetwork):
"""
QNetwork used in ``Human-level control through deep reinforcement learning'', [Mnih et al., 2015].
It's a Convolutional Neural Network with the following specs:
L1: 32 8x8 filters with stride 4 + RELU
L2: 64 4x4 filters with stride 2 + RELU
L3: 64 3x3 fitlers with stride 1 + RELU
L4a: 512 unit Fully-Connected layer + RELU
L4b: 512 unit Fully-Connected layer + RELU
L5a: 1 unit FC + RELU (State Value)
L5b: #actions FC + RELU (Advantage Value)
L6: Aggregate V(s)+A(s,a)
"""
| [
11748,
299,
32152,
355,
45941,
198,
11748,
4738,
198,
11748,
11192,
273,
11125,
355,
48700,
628,
198,
198,
4871,
1195,
26245,
7,
15252,
2599,
198,
197,
37811,
198,
197,
14881,
1398,
329,
1195,
7934,
5225,
13,
198,
197,
37811,
628,
628,
... | 2.634807 | 701 |
#!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_data_safe_grant_facts
short_description: Fetches details about one or multiple Grant resources in Oracle Cloud Infrastructure
description:
- Fetches details about one or multiple Grant resources in Oracle Cloud Infrastructure
- Gets a list of grants for a particular user in the specified user assessment. A user grant contains details such as the
privilege name, type, category, and depth level. The depth level indicates how deep in the hierarchy of roles granted to
roles a privilege grant is. The userKey in this operation is a system-generated identifier. Perform the operation ListUsers
to get the userKey for a particular user.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
user_assessment_id:
description:
- The OCID of the user assessment.
type: str
required: true
user_key:
description:
- The unique user key. This is a system-generated identifier. ListUsers gets the user key for a user.
type: str
required: true
grant_key:
description:
- A filter to return only items that match the specified user grant key.
type: str
grant_name:
description:
- A filter to return only items that match the specified user grant name.
type: str
privilege_type:
description:
- A filter to return only items that match the specified privilege grant type.
type: str
privilege_category:
description:
- A filter to return only items that match the specified user privilege category.
type: str
depth_level:
description:
- A filter to return only items that match the specified user grant depth level.
type: int
depth_level_greater_than_or_equal_to:
description:
- A filter to return only items that are at a level greater than or equal to the specified user grant depth level.
type: int
depth_level_less_than:
description:
- A filter to return only items that are at a level less than the specified user grant depth level.
type: int
sort_order:
description:
- The sort order to use, either ascending (ASC) or descending (DESC).
type: str
choices:
- "ASC"
- "DESC"
sort_by:
description:
- The field to sort by. You can specify only one sort order (sortOrder). The default order for grantName is ascending.
type: str
choices:
- "grantName"
- "grantType"
- "privilegeCategory"
- "depthLevel"
- "key"
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: List grants
oci_data_safe_grant_facts:
# required
user_assessment_id: "ocid1.userassessment.oc1..xxxxxxEXAMPLExxxxxx"
user_key: user_key_example
# optional
grant_key: grant_key_example
grant_name: grant_name_example
privilege_type: privilege_type_example
privilege_category: privilege_category_example
depth_level: 56
depth_level_greater_than_or_equal_to: 56
depth_level_less_than: 56
sort_order: ASC
sort_by: grantName
"""
RETURN = """
grants:
description:
- List of Grant resources
returned: on success
type: complex
contains:
key:
description:
- The unique key of a user grant.
returned: on success
type: str
sample: key_example
grant_name:
description:
- The name of a user grant.
returned: on success
type: str
sample: grant_name_example
privilege_type:
description:
- The type of a user grant.
returned: on success
type: str
sample: SYSTEM_PRIVILEGE
privilege_category:
description:
- The privilege category.
returned: on success
type: str
sample: CRITICAL
depth_level:
description:
- The grant depth level of the indirect grant.
An indirectly granted role/privilege is granted to the user through another role.
The depth level indicates how deep a privilege is within the grant hierarchy.
returned: on success
type: int
sample: 56
sample: [{
"key": "key_example",
"grant_name": "grant_name_example",
"privilege_type": "SYSTEM_PRIVILEGE",
"privilege_category": "CRITICAL",
"depth_level": 56
}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.data_safe import DataSafeClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class DataSafeGrantFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: list"""
DataSafeGrantFactsHelperCustom = get_custom_class("DataSafeGrantFactsHelperCustom")
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
15069,
357,
66,
8,
12131,
11,
33448,
18650,
290,
14,
273,
663,
29116,
13,
198,
2,
770,
3788,
318,
925,
1695,
284,
345,
739,
262,
2846,
286,
262,
38644,
513,
13,
15,
5964,
393,
262,
2... | 2.50735 | 2,381 |
from ply import lex, yacc
from . import tokrules
from .parser import DPOMDP, Parser
__all__ = ['DPOMDP_Parser', 'parse']
# legacy
| [
6738,
35960,
1330,
31191,
11,
331,
4134,
198,
198,
6738,
764,
1330,
284,
38584,
5028,
198,
6738,
764,
48610,
1330,
27704,
2662,
6322,
11,
23042,
263,
198,
198,
834,
439,
834,
796,
37250,
6322,
2662,
6322,
62,
46677,
3256,
705,
29572,
... | 2.8125 | 48 |
"""
=========================================================================
CGRAFL.py
=========================================================================
CGRAFL -- running DFG nodes one by one.
Author : Cheng Tan
Date : Feb 13, 2020
"""
from pymtl3 import *
from ..lib.opt_type import *
from ..lib.messages import *
#------------------------------------------------------------------------
# Assuming that the elements in FuDFG are already ordered well.
#------------------------------------------------------------------------
| [
37811,
198,
23926,
2559,
28,
198,
39816,
3861,
3697,
13,
9078,
198,
23926,
2559,
28,
198,
39816,
3861,
3697,
1377,
2491,
360,
30386,
13760,
530,
416,
530,
13,
198,
198,
13838,
1058,
27692,
11818,
198,
220,
7536,
1058,
3158,
1511,
11,
... | 5.198113 | 106 |