text
stringlengths 2
999k
|
|---|
'''
Using parameters
=================
'''
import matplotlib.pyplot as plt
import numpy as np
import pyant
beam = pyant.Airy(
azimuth=[0, 45.0, 0],
elevation=[89.0, 80.0, 60.0],
frequency=[930e6, 230e6],
I0=10**4.81,
radius=np.linspace(10,23.0,num=20),
)
k = np.array([0,0,1.0]).T
#This is the shape and names of the parameters
print(f'beam.shape = {beam.shape}')
print(f'beam.parameters = {beam.parameters}')
#this means their values can be found trough the corresponding attributes
print(f'beam.radius = {beam.radius}')
#One needs to choose values for all parameters
#Either trough direct input into beam.gain
print(f'G = {beam.gain(k, pointing=k, frequency=314e6, radius=20.0)} ')
#pointing is the only parameter that supports also input by azimuth and elevation
print(f'G = {beam.gain(k, azimuth=20.2, elevation=89.1, frequency=314e6, radius=20.0)} ')
#Or trough indexing of the currently entered parameters
print(f'G = {beam.gain(k, ind=(0,1,10))} ')
#(indexing can also be done as a dict for more readability)
print(f'G = {beam.gain(k, ind=dict(pointing=0,frequency=1,radius=10))} ')
#Or a combination of both
print(f'G = {beam.gain(k, ind=(0,None,10), frequency=333e6)} ')
print('-- exceptions --')
#Inconsistencies raise value and type errors
#like supplying both a index and a value
try:
print(f'G = {beam.gain(k, ind=(0,1,10), frequency=333e6)} ')
except Exception as e:
print(f'Exception: "{e}"')
#or not giving values for parameters at all
try:
print(f'G = {beam.gain(k)} ')
except Exception as e:
print(f'Exception: "{e}"')
#or not giving enough values
try:
print(f'G = {beam.gain(k, ind=(0,1))} ')
except Exception as e:
print(f'Exception: "{e}"')
#or trying to index scalar parameters
beam.frequency = 930e6
#now the size will be None for this parameter
print(f'beam.shape = {beam.shape}')
#so indexing it will raise an error
try:
print(f'G = {beam.gain(k, ind=(0,1,10))} ')
except Exception as e:
print(f'Exception: "{e}"')
print('-- exceptions end --')
#while setting it to None will just use the parameter value
print(f'G = {beam.gain(k, ind=(0,None,10))} ')
#if you have all scalar parameters, no index needs to be supplied
beam.radius = 23
beam.pointing = k.copy()
print(f'G = {beam.gain(k)} ')
#this also works with size=1 parameters
beam.radius = [23]
print(f'G = {beam.gain(k)} ')
|
from models import modifyresnet18, UNet, Synthesizer
import torch
import torch.optim as optim
import torch.nn as nn
from util.datahelper import load_all_training_data, sample_from_dict #正确性未经验证
import itertools
import numpy as np
import librosa.display
import matplotlib.pyplot as plt
import os
from torch.autograd import Variable
from util.validation import spec2wave,compute_sdr
from librosa import amplitude_to_db
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
def data_in_one(inputdata): #将数据映射到0-1之间
inputdata = (inputdata-inputdata.min())/(inputdata.max()-inputdata.min())
return inputdata
def train(spec_dir, image_dir, model_dir, batch_size=8, validate_freq=200):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
video_net = modifyresnet18(batch_size).to(device) #载入网络
audio_net = UNet().to(device)
audio_net._initialize_weights()
syn_net = Synthesizer().to(device)
syn_net._initialize_weights()
#print('gpu',device)
if os.path.exists(os.path.join(model_dir, 'video_net_params.pkl')) and os.path.exists(
os.path.join(model_dir, 'audio_net_params.pkl')) and os.path.exists(
os.path.join(model_dir, 'syn_net_params.pkl')):
print('load params!')
video_net.load_state_dict(torch.load(os.path.join(model_dir, 'video_net_params.pkl')))
audio_net.load_state_dict(torch.load(os.path.join(model_dir, 'audio_net_params.pkl')))
syn_net.load_state_dict(torch.load(os.path.join(model_dir, 'syn_net_params.pkl')))
video_net.train()
audio_net.train()
syn_net.train()
optim_video = optim.SGD(video_net.parameters(), lr=0.0001, momentum=0.9) #定义优化器
optim_audio = optim.SGD(audio_net.parameters(), lr=0.001, momentum=0.9)
optim_syn = optim.SGD(syn_net.parameters(), lr=0.001, momentum=0.9)
#optim_video.zero_grad() #初始化参数梯度
#optim_audio.zero_grad()
#optim_syn.zero_grad()
[spec_data, image_data] = load_all_training_data(spec_dir, image_dir) #载入训练数据
#print('spec_data_len',len(spec_data))
#print('image_data_len',len(image_data))
#print('spec_data',spec_data)
#print('image_data',image_data)
print('data loaded')
criterion = nn.L1Loss() #binary cross entropy
running_loss = 0.0
count = 0
for num_batch in itertools.count(): #训练
if num_batch > 20000:
break
spec_input_sampled = np.zeros((2, batch_size, 256, 256), dtype='complex')
image_input_sampled = np.zeros((2, 3 * batch_size, 3, 224, 224), dtype='float32')
for i in range(batch_size):
[spec_input_mini, image_input_mini] = sample_from_dict(spec_data, image_data)
spec_input_sampled[:, i:i+1, :, :] = spec_input_mini #(2,batch_size,256,256)
image_input_sampled[:, 3*i:3*i+3, :, :, :] = image_input_mini #(2,3*batch_size,3,224,224)
#print('spec_input_sampled',spec_input_sampled.shape)
#print('image_input_sampled',image_input_sampled.shape)
spec_input1 = np.transpose(np.reshape(spec_input_sampled[0,:,:,:], (1,batch_size,256,256)), (1,0,2,3))#第一种乐器频谱
spec_input2 = np.transpose(np.reshape(spec_input_sampled[1,:,:,:], (1,batch_size,256,256)), (1,0,2,3)) #第二种乐器频谱
spec_input = spec_input1 + spec_input2 #总频谱
#print('spec_input',spec_input.shape)
spec_abs1 = np.absolute(spec_input1)
spec_abs2 = np.absolute(spec_input2)
spec_abs = spec_abs1 + spec_abs2
#spec_abs = np.absolute(spec_input) #总幅度谱
mask1_ratio = np.zeros((batch_size, 1, 256, 256), dtype='float32')
mask2_ratio = np.zeros((batch_size, 1, 256, 256), dtype='float32')
for idx_0 in range(batch_size):
for idx_2 in range(256):
for idx_3 in range(256):
if spec_abs[idx_0,0,idx_2,idx_3] == 0:
mask1_ratio[idx_0,0,idx_2,idx_3] = 0.5
mask2_ratio[idx_0,0,idx_2,idx_3] = 0.5
else:
mask1_ratio[idx_0,0,idx_2,idx_3] = spec_abs1[idx_0,0,idx_2,idx_3]/spec_abs[idx_0,0,idx_2,idx_3]
mask2_ratio[idx_0,0,idx_2,idx_3] = spec_abs2[idx_0,0,idx_2,idx_3]/spec_abs[idx_0,0,idx_2,idx_3]
#mask1_ratio = np.absolute(spec_input1)/spec_abs
#print('mask1_ratio',mask1_ratio)
#print('mask2_ratio',mask2_ratio)
#mask2_ratio = np.absolute(spec_input2)/spec_abs
spec_dp = torch.from_numpy(amplitude_to_db(spec_abs)).float().to(device) #总幅度谱取dp
#print('spec_abs',spec_abs.shape)
#mask_input2 = np.transpose(np.reshape(np.argmax(spec_input_sampled, axis=0),(1,batch_size,256,256)), (1,0,2,3)) #计算mask
#mask_input1 = np.ones((batch_size,1,256,256))-mask_input2
#mask_input1 = torch.from_numpy(mask_input1).float().to(device) #mask1
#mask_input2 = torch.from_numpy(mask_input2).float().to(device) #mask2
#print('mask_input',mask_input2.shape)
#print('mask_input',mask_input1)
optim_video.zero_grad() #初始化参数梯度
optim_audio.zero_grad()
optim_syn.zero_grad()
out_audio_net = audio_net(spec_dp) #数据通路
#print('out_audio_net',out_audio_net.shape)
image_input1 = torch.from_numpy(image_input_sampled[0,:,:,:,:]).float().to(device) #video1
#print('image_input_type',image_input1.type)
#print('image_input_grad',image_input1.grad)
#print('image_input',image_input1.shape)
image_input2 = torch.from_numpy(image_input_sampled[1,:,:,:,:]).float().to(device)
#image_input2 = (torch.from_numpy(image_input_sampled[1,:,:,:,:])).float().to(device) #video2
out1_video_net = video_net(image_input1) #送进video网络
#print('out_video_net',out1_video_net.shape)
out2_video_net = video_net(image_input2)
input1_syn_net = out1_video_net * out_audio_net #送进syn网络之前
#print('input_syn_net_0',input1_syn_net.shape)
input2_syn_net = out2_video_net * out_audio_net
input1_syn_net = torch.transpose(input1_syn_net,1,2) #转置以匹配维度
input1_syn_net = torch.transpose(input1_syn_net,2,3)
#print('input_syn_net_1',input1_syn_net.shape)
input2_syn_net = torch.transpose(input2_syn_net,1,2)
input2_syn_net = torch.transpose(input2_syn_net,2,3)
out1_syn_net = syn_net(input1_syn_net)
#print('out_syn_net_0',out1_syn_net.shape)
out2_syn_net = syn_net(input2_syn_net)
out1_syn_net = torch.transpose(out1_syn_net,2,3) #转置以匹配维度
out1_syn_net = torch.transpose(out1_syn_net,1,2)
#print('out_syn_net_1',out1_syn_net.shape)
#print(out1_syn_net)
out2_syn_net = torch.transpose(out2_syn_net,2,3)
out2_syn_net = torch.transpose(out2_syn_net,1,2) #(batch_size,1,256,256)
#print('out1_syn_net.shape',out1_syn_net)
#out1_syn_net_binary = torch.round(out1_syn_net)
#out2_syn_net_binary = torch.round(out2_syn_net)
#print('out1_syn_net_binary',out1_syn_net_binary)
#s1_estimated = out1_syn_net * torch.from_numpy(spec_abs).float().to(device) #幅度谱估计
#print('s_estimated',s1_estimated.shape)
#s2_estimated = out2_syn_net * torch.from_numpy(spec_abs).float().to(device)
#s1_estimated_np = s1_estimated.detach().numpy()
#librosa.display.specshow(librosa.amplitude_to_db(np.abs(spec_input1[0,:,:]),ref=np.max),y_axis='log', x_axis='time') #画幅度谱
#print('done!')
#print(np.shape(spec_input1))
#plt.title('Power spectrogram')
#plt.colorbar(format='%+2.0f dB')
#plt.tight_layout()
#plt.show()
#librosa.display.specshow(librosa.amplitude_to_db(np.abs(s1_estimated_np[0,0,:,:]),ref=np.max),y_axis='log', x_axis='time')
#print('done!')
#print(np.shape(spec_input1))
#plt.title('Power spectrogram')
#plt.colorbar(format='%+2.0f dB')
#plt.tight_layout()
#plt.show()
#损失函数,可能有问题
loss1 = criterion(out1_syn_net, torch.from_numpy(mask1_ratio).float().to(device))
loss2 = criterion(out2_syn_net, torch.from_numpy(mask2_ratio).float().to(device))
loss = loss1 + loss2
#out_audio_net.register_hook(print)
#out1_video_net.register_hook(print)
#out2_syn_net.register_hook(print)
#input1_syn_net.register_hook(print)
#loss1.register_hook(print)
#loss.register_hook(print)
loss.backward() #反向传播
#print('image_input_grad',image_input1.grad)
#print('out1_video_net_grad',out1_video_net.grad)
#print('loss1_type',loss1.type)
#print('loss2_type',loss2.type)
#print('loss_type',loss.type)
#print('loss1_grad',loss1.requires_grad)
#print('loss2_grad',loss2.requires_grad)
#print('loss_grad',loss.requires_grad)
#print('out2_syn_net',out2_syn_net.requires_grad)
#print('input1_syn_net',input1_syn_net.requires_grad)
#print('out2_video_net',out2_video_net.requires_grad)
#print('out_audio_net',out_audio_net.requires_grad)
optim_syn.step()
optim_audio.step()
optim_video.step() #迭代一步
#print('loss_backward',loss.backward)
running_loss += loss.item() #打印loss
if num_batch % 200 == 199:
print('[%5d] loss: %.5f' % (num_batch+1, running_loss/200))
running_loss = 0.0
if num_batch % validate_freq == 199:
out1_syn_net_np = out1_syn_net.cpu().detach().numpy()
out2_syn_net_np = out2_syn_net.cpu().detach().numpy()
sdr = [0.0,0.0]
for i in range(batch_size):
out1_syn_net_one = out1_syn_net_np[i,:,:,:]
out2_syn_net_one = out2_syn_net_np[i,:,:,:]
spec_input_one = spec_input[i,:,:,:]
esti=np.r_[out1_syn_net_one*spec_input_one, out2_syn_net_one*spec_input_one] #(2,256,256)
#print('esti.shape',esti.shape)
wav_estimated = np.stack([spec2wave(esti[0, :, :]),spec2wave(esti[1, :, :])], axis=0) #(2,nsample)
#print('wav_estimated.shape',wav_estimated.shape) #(2,nsample)
wav_gt = np.stack([spec2wave(spec_input1[i, 0, :, :]),spec2wave(spec_input2[i, 0, :, :])], axis=0)
#print('wav_gt.shape',wav_gt.shape)
[sdr,sir,sar]=compute_sdr(wav_gt,wav_estimated)
if not sdr[0] == 100:
sdr += sdr
print('validation:sdr1=%.3f sdr2=%.3f' %(sdr[0]/batch_size,sdr[1]/batch_size))
#mask1_ratio.tofile('/home/zhc/the_sound/gt1.np')
#mask2_ratio.tofile('/home/zhc/the_sound/gt2.np')
#out1_syn_net.cpu().detach().numpy().tofile('/home/zhc/the_sound/1.np')
#out2_syn_net.cpu().detach().numpy().tofile('/home/zhc/the_sound/2.np')
if not os.path.exists(model_dir):
os.mkdir(model_dir)
torch.save(video_net.state_dict(), os.path.join(model_dir, 'video_net_params.pkl'))
torch.save(audio_net.state_dict(), os.path.join(model_dir, 'audio_net_params.pkl'))
torch.save(syn_net.state_dict(), os.path.join(model_dir, 'syn_net_params.pkl'))
print("model saved to " + str(model_dir) + '\n')
if __name__ == '__main__':
#spec_dir = 'D:/stddzy/Sound_of_Pixels/audio_mini'
#image_dir = 'D:/stddzy/Sound_of_Pixels/video_mini'
#spec_dir = '/home/zhc/the_sound/audio_mini'
#image_dir = '/home/zhc/the_sound/video_mini'
spec_dir = '/home/zhc/the_sound/audio_spectrums'
image_dir = '/home/zhc/the_sound/video_frames'
model_dir = '/home/zhc/the_sound/model_params_ratio'
train(spec_dir, image_dir, model_dir)
|
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import asdict
from typing import Dict, List
from .... import oscar as mo
from ....utils import implements
from ....typing import BandType
from ..core import _CommonMeta, _ChunkMeta
from .base import AbstractMetaStore, register_meta_store
@register_meta_store
class DictMetaStore(AbstractMetaStore):
name = 'dict'
def __init__(self, session_id: str, **kw):
super().__init__(session_id)
self._store: Dict[str, _CommonMeta] = dict()
if kw: # pragma: no cover
raise TypeError(f'Keyword arguments {kw!r} cannot be recognized.')
@classmethod
@implements(AbstractMetaStore.create)
async def create(cls, config) -> Dict:
# Nothing needs to do for dict-based meta store.
# no extra kwargs.
return dict()
def _set_meta(self,
object_id: str,
meta: _CommonMeta):
self._store[object_id] = meta
@implements(AbstractMetaStore.set_meta)
@mo.extensible
async def set_meta(self,
object_id: str,
meta: _CommonMeta):
self._set_meta(object_id, meta)
@set_meta.batch
async def batch_set_meta(self, args_list, kwargs_list):
for args, kwargs in zip(args_list, kwargs_list):
self._set_meta(*args, **kwargs)
def _get_meta(self,
object_id: str,
fields: List[str] = None,
error: str = 'raise') -> Dict:
if error not in ('raise', 'ignore'): # pragma: no cover
raise ValueError('error must be raise or ignore')
try:
meta = asdict(self._store[object_id])
if fields:
return {k: meta[k] for k in fields}
return meta
except KeyError:
if error == 'raise':
raise
else:
return
@implements(AbstractMetaStore.get_meta)
@mo.extensible
async def get_meta(self,
object_id: str,
fields: List[str] = None,
error: str = 'raise') -> Dict:
return self._get_meta(object_id, fields=fields, error=error)
@get_meta.batch
async def batch_get_meta(self, args_list, kwargs_list):
metas = []
for args, kwargs in zip(args_list, kwargs_list):
metas.append(self._get_meta(*args, **kwargs))
return metas
def _del_meta(self, object_id: str):
del self._store[object_id]
@implements(AbstractMetaStore.del_meta)
@mo.extensible
async def del_meta(self,
object_id: str):
self._del_meta(object_id)
@del_meta.batch
async def batch_del_meta(self, args_list, kwargs_list):
for args, kwargs in zip(args_list, kwargs_list):
self._del_meta(*args, **kwargs)
def _add_chunk_bands(self,
object_id: str,
bands: List[BandType]):
meta = self._store[object_id]
assert isinstance(meta, _ChunkMeta)
meta.bands = list(set(meta.bands) | set(bands))
@implements(AbstractMetaStore.add_chunk_bands)
@mo.extensible
async def add_chunk_bands(self,
object_id: str,
bands: List[BandType]):
self._add_chunk_bands(object_id, bands)
@add_chunk_bands.batch
async def batch_add_chunk_bands(self, args_list, kwargs_list):
for args, kwargs in zip(args_list, kwargs_list):
self._add_chunk_bands(*args, **kwargs)
|
import _plotly_utils.basevalidators
class ShowbackgroundValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="showbackground", parent_name="layout.scene.zaxis", **kwargs
):
super(ShowbackgroundValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
**kwargs,
)
|
#!/usr/bin/env python3
import os
import numpy as np
# import kitti.tracklet_parser as tracklet_parser
# import kitti.tracklet_parser as tracklet_parser
import collections
import pandas as pd
KittiObject = collections.namedtuple('KittiObject', ['type',
'truncated',
'occluded',
'alpha',
'bbox',
'dimensions',
'location',
'location_y'])
KittiImu = collections.namedtuple(
'KittiImu', ['location', 'linear_velocity', 'linear_acceleration'])
class kitti_parser:
def __init__(self,dataPath,drive,resultsFolder):
# Set base paths
self.dataPath = dataPath
self.drive = drive
self.resultsFolder = resultsFolder
# Check if exists
if(not os.path.exists(self.dataPath+self.drive)):
print("Drive does not exist")
raise SystemExit
# Image pathsdataPath,drive,resultsFolder
try:
self.left_color_image_list = sorted(os.listdir(
self.dataPath + self.drive + '/image_02/data'), key=self.sorter)
except:
print("No image data")
raise SystemExit
# Imu paths
try:
self.imuFileList = sorted(os.listdir(
self.dataPath + self.drive + '/oxts/data/'), key=self.sorter)
except:
print("No oxts data")
raise SystemExit
# Object paths
try:
self.objectFileList = sorted(os.listdir(
self.dataPath + self.drive + '/label_2'), key=self.sorter)
except:
print("No object data, create from xml...")
try:
tracklet_parser.main(self.dataPath, self.drive)
self.objects_list = sorted(os.listdir(
self.dataPath + self.drive + '/label_2'), key=self.sorter)
except:
print("No object xml")
raise SystemExit
# Check variables
self.frame = 0
self.done = 0
# Setup data acquisition
try:
os.remove(os.path.join(self.resultsFolder,
'model_responses/model_results.csv'))
except:
pass
# Get information
self.get_road()
self.get_objects()
self.get_imu()
self.get_manual()
def get_road(self):
self.par_city = []
self.par_residential = []
self.par_road = []
road_file = open(self.dataPath + self.drive +
'/uniform_image_list.txt', "r")
lines = road_file.readlines()
self.road_types = []
for i in range(len(lines)):
road = lines[i].split('/')[0]
self.road_types.append(road)
self.par_city.append((road == 'city')*1)
self.par_residential.append((road == 'residential')*1)
self.par_road.append((road == 'road')*1)
def get_objects(self):
self.objectsList = []
for i in range(len(self.objectFileList)):
# Open file
self.object_file = open(
self.dataPath + self.drive + '/label_2/' + self.objectFileList[i], "r")
# Setup object per frame
objects = []
# Read next line
lines = self.object_file.readlines()
for object in lines:
oArgs = object.split(' ')
type = oArgs[0]
truncated = float(oArgs[1])
occluded = int(oArgs[2])
alpha = float(oArgs[3])
bbox = [float(oArgs[4]),
float(oArgs[5]),
float(oArgs[6]),
float(oArgs[7])]
dimensions = [float(oArgs[8]),
float(oArgs[9]),
float(oArgs[10])]
location = [float(oArgs[11]),
float(oArgs[12]),
float(oArgs[13])]
location_y = float(oArgs[14])
# Append object list of frame
objects.append(KittiObject(type,
truncated,
occluded,
alpha,
bbox,
dimensions,
location,
location_y))
# Close file
self.object_file.close
self.objectsList.append(objects)
def get_imu(self):
self.imuList = []
for file in self.imuFileList:
# Open file
imu_file = open(
self.dataPath + self.drive + '/oxts/data/' + file, "r")
# Create new imu msg
# imuObject = KittiImu
# Get imu data from file
line = imu_file.readline()
imuArgs = line.split(' ')
# Fill new object
location = [
float(imuArgs[0]),
float(imuArgs[1]),
float(imuArgs[2]),
float(imuArgs[5])]
linear_velocity = [
float(imuArgs[8]),
float(imuArgs[9]),
float(imuArgs[10])]
linear_acceleration = [
float(imuArgs[11]),
float(imuArgs[12]),
float(imuArgs[13])]
self.imuList.append(
KittiImu(location, linear_velocity, linear_acceleration))
# Close file
imu_file.close
def get_manual(self):
self.manual_data = pd.read_csv(
self.dataPath + self.drive + '/manual_data.csv')
def sorter(self, name):
frame = int(name.split('.')[0])
return frame
def typeSwitch(self, objType, parameters):
# Switch to type to assign weight based on...
typeSwitch = {
'Car': parameters[0],
'Van': parameters[1],
'Truck': parameters[2],
'Pedestrian': parameters[3],
'Person_sitting': parameters[4],
'Cyclist': parameters[5],
'Tram': parameters[6],
'Misc': parameters[7],
'DontCare': parameters[8],
}
return typeSwitch.get(objType, "Invalid object type")
def roadSwitch(self, roadType, parameters):
# Switch to type to assign weight based on...
roadSwitch = {
'city': parameters[9],
'residential': parameters[10],
'road': parameters[11],
}
return roadSwitch.get(roadType, "Invalid object type")
def fast_type(self, x):
par_type = []
par_alpha = []
par_occluded = []
par_truncated = []
par_size = []
for frame_objects in self.objectsList:
types = []
alpha = []
occluded = []
truncated = []
size = []
for object in frame_objects:
types.append(self.typeSwitch(object.type, x))
alpha.append(abs(object.alpha))
occluded.append(object.occluded)
truncated.append(object.truncated)
size.append(np.prod(object.dimensions))
par_alpha.append(alpha)
par_type.append(sum(types))
par_occluded.append(occluded)
par_truncated.append(truncated)
par_size.append(size)
return par_type, par_alpha, par_occluded, par_truncated,par_size
def fast_imm(self, x):
# Get variables from arguments
a = x[12]
b = x[13]
# Create empty return lists
par_total_distance = []
par_velocity = []
par_imm = []
# Get object and ego vehicle data per frame
for frame in range(len(self.imuFileList)):
# Get ego velocity
velocity = np.linalg.norm(self.imuList[frame].linear_velocity, 2)
# Construct save variables
all_imminence = []
all_distance = []
# Get object data per object in frame
for object in self.objectsList[frame]:
distance = np.linalg.norm(object.location, 2)
# Linear imminence parameter
# imm = a * distance/velocity + b
# Quadratic imminence parameter
if b == 0:
imm = np.nan
else:
imm = a*(distance/velocity)**(1/b)
if imm>50:
imm = 50
# Save paremeter per object
all_imminence.append(imm)
all_distance.append(distance)
# Save parameter values per frame
par_imm.append(sum(all_imminence))
par_velocity.append(velocity)
par_total_distance.append(all_distance)
frame += 1
return par_imm, par_velocity, par_total_distance
def fast_prob(self, x):
probability_par = []
for road in self.road_types:
probability_par.append(self.roadSwitch(road, x))
return probability_par
def get_model(self, x):
# Get individual model results
par_all_imminence, par_velocity, par_all_distance = self.fast_imm(x)
par_type, par_alpha, par_occluded, par_truncated,par_size = self.fast_type(x)
par_probability = self.fast_prob(x)
# Construct empty lists for itereation
par_combi = []
number_objects = []
sum_distance = []
min_distance = []
mean_distance = []
min_alpha = []
mean_alpha = []
max_alpha = []
mean_par_occluded = []
sum_par_occluded = []
mean_par_truncated = []
sum_par_truncated = []
mean_par_size = []
max_par_size = []
min_par_size = []
sum_par_size = []
# Get combined model results
for frame in range(len(par_all_imminence)):
sum_distance.append(sum(par_all_distance[frame]))
min_distance.append(min(par_all_distance[frame], default=0))
# Check for objects present
if len(par_all_distance[frame]) != 0:
number_objects.append(len(par_all_distance[frame]))
mean_distance.append(
sum(par_all_distance[frame])/len(par_all_distance[frame]))
min_alpha.append(min(par_alpha[frame]))
mean_alpha.append(
sum(par_alpha[frame])/len(par_alpha[frame]))
max_alpha.append(max(par_alpha[frame]))
mean_par_occluded.append(sum(par_occluded[frame])/len(par_occluded[frame]))
sum_par_occluded.append(sum(par_occluded[frame]))
mean_par_truncated.append(sum(par_truncated[frame])/len(par_truncated[frame]))
sum_par_truncated.append(sum(par_truncated[frame]))
mean_par_size.append(sum(par_size[frame])/len(par_size[frame]))
max_par_size.append(max(par_size[frame]))
min_par_size.append(min(par_size[frame]))
sum_par_size.append(sum(par_size[frame]))
else:
number_objects.append(0.0)
mean_distance.append(0.0)
min_alpha.append(0.0)
mean_alpha.append(0.0)
max_alpha.append(0.0)
mean_par_occluded.append(0.0)
sum_par_occluded.append(0.0)
mean_par_truncated.append(0.0)
sum_par_truncated.append(0.0)
mean_par_size.append(0.0)
max_par_size.append(0.0)
min_par_size.append(0.0)
sum_par_size.append(0.0)
par_combi.append(par_all_imminence[frame] +
par_type[frame] + par_probability[frame])
# Create empty dict
results = {}
# Add items to dict
results['general_frame_number'] = range(
len(self.left_color_image_list))
results['model_combination'] = par_combi
results['model_type'] = par_type
results['model_imminence'] = par_all_imminence
results['model_probability'] = par_probability
results['general_velocity'] = par_velocity
results['general_distance_sum'] = sum_distance
results['general_distance_min'] = min_distance
results['general_distance_mean'] = mean_distance
results['general_number_bjects'] = number_objects
results['manual_car_toward'] = self.manual_data.CarToward
results['manual_car_away'] = self.manual_data.CarAway
results['manual_breaklight'] = self.manual_data.Breaklight
results['alpha_min'] = min_alpha
results['alpha_mean'] = mean_alpha
results['alpha_max'] = max_alpha
results['occluded_mean'] = mean_par_occluded
results['occluded_sum'] = sum_par_occluded
results['truncated_mean'] = mean_par_truncated
results['truncated_sum'] = sum_par_truncated
results['size_mean'] = mean_par_size
results['size_max'] = max_par_size
results['size_min'] = min_par_size
results['size_sum'] = sum_par_size
results['road_road']= self.par_road
results['road_residential'] = self.par_residential
results['road_city'] = self.par_city
return results
def save_model(self, x,modelFile = 'model_results.csv'):
# Get model response
results = self.get_model(x)
# Create dataframe from dict
resultsDF = pd.DataFrame.from_dict(results)
# save dataframe as csv file
resultsDF.to_csv(os.path.join(self.resultsFolder,
'model_responses',modelFile), index=False)
if __name__ == "__main__":
# Example input
dataPath = '/dataset'
drive = '/test_images'
resultsFolder = '/home/jim/HDDocuments/university/master/thesis/results'
# Construct parser class
kp = kitti_parser(dataPath,drive,resultsFolder)
# Example parameters
# x = [0., 1.458974, 2.63547244, 0.96564807, 2.21222542, 1.65225034, 0., 0., 1.,
# 2.20176468, 2.40070779, 0.1750559,
# 0.20347586, 6.54656438]
x = [0.2, 0.4, 0.6, 1., 0.2, 1., 0.6, 0.2, 0.,
3., 1.5, 0.,
1., 0.1]
# Get model results
results = kp.get_model(x)
# Save model results
kp.save_model(x)
|
from ..errormessage import render_error
from icemac.addressbook.person import person_entity
from zope.i18n import translate
def test_errormessage__render_error__1(zcmlS):
"""It is able to render an error which does not belong to a field."""
message = render_error(
person_entity, field_name='', exc=RuntimeError('General Error!'))
assert ('Unexpected error occurred: RuntimeError: General Error!' ==
translate(message))
|
#!/usr/bin/env python
# vim:ts=4:sw=4:et:
# This script is roughly equivalent to the configure script that is
# generated from configure.ac.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# no unicode literals
import argparse
import os
import re
import shutil
import subprocess
import sys
import tempfile
import shlex
CSUFFIX = '.c'
OSUFFIX = '.o'
EXESUFFIX = '.exe'
CC = os.environ.get('CC', 'cc')
CPPFLAGS = os.environ.get('CPPFLAGS', '')
LDFLAGS = os.environ.get('LDFLAGS', '')
parser = argparse.ArgumentParser(description='Probe for system characteristics')
parser.add_argument('--cc', action='store', default=CC)
parser.add_argument('--cppflags', action='store', default=CPPFLAGS)
parser.add_argument('--ldflags', action='store', default=LDFLAGS)
parser.add_argument('--cwd', action='store', default=os.getcwd())
parser.add_argument('--configure', action='store', default='configure.ac')
parser.add_argument('--verbose', action='store_true')
args = parser.parse_args()
CC = args.cc
CPPFLAGS = shlex.split(args.cppflags)
LDFLAGS = shlex.split(args.ldflags)
os.chdir(args.cwd)
extra_flags = []
if sys.platform == 'darwin':
extra_flags += ['-framework', 'CoreServices']
# List of functions that we may optionally use if they are present on
# the system. Keep this sorted.
funcs_to_probe = [
'FSEventStreamSetExclusionPaths',
'accept4',
'backtrace',
'backtrace_symbols',
'backtrace_symbols_fd',
'fdopendir',
'getattrlistbulk',
'inotify_init',
'inotify_init1',
'kqueue',
'localeconv',
'memmem',
'mkostemp',
'openat',
'port_create',
'statfs',
'strtoll',
'sys_siglist',
]
# List of header files that we'd like to know about on the system.
# Keep this sorted.
headers_to_probe = [
'CoreServices/CoreServices.h',
'execinfo.h',
'inttypes.h',
'locale.h',
'port.h',
'sys/event.h',
'sys/inotify.h',
'sys/mount.h',
'sys/param.h',
'sys/resource.h',
'sys/socket.h',
'sys/statfs.h',
'sys/statvfs.h',
'sys/types.h',
'sys/ucred.h',
'sys/vfs.h',
'valgrind/valgrind.h',
'unistd.h',
]
def makesym(name):
return re.sub('[./]', '_', name.upper())
def emit_status(proc, what, sym):
out, err = proc.communicate()
status = proc.wait()
if args.verbose:
print('// Status: %s' % status)
for line in out.splitlines():
print('// stdout: %s' % line)
for line in err.splitlines():
print('// stderr: %s' % line)
if status == 0:
print('#define %s 1' % sym)
else:
print('#undef %s' % sym)
def check_func(name):
tmp_dir = tempfile.mkdtemp()
try:
src_file = os.path.join(tmp_dir, '%s%s' % (name, CSUFFIX))
exe_file = os.path.join(tmp_dir, '%s%s' % (name, EXESUFFIX))
with open(src_file, 'w+') as f:
f.write(
'''
int main(int argc, char**argv) {
extern int %s(void);
return %s();
}
''' % (name, name)
)
cmd = [CC] + CPPFLAGS + ['-o', exe_file, src_file] + \
extra_flags + LDFLAGS
what = '\n// Probing for function %s\n// %s' % (name, ' '.join(cmd))
proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
emit_status(proc, what, 'HAVE_%s' % makesym(name))
finally:
shutil.rmtree(tmp_dir)
def check_header(name):
tmp_dir = tempfile.mkdtemp()
try:
filename = os.path.basename(name)
src_file = os.path.join(tmp_dir, '%s%s' % (filename, CSUFFIX))
obj_file = os.path.join(tmp_dir, '%s%s' % (filename, OSUFFIX))
with open(src_file, 'w+') as f:
f.write('''
#include "%s"
''' % (name))
cmd = [CC] + CPPFLAGS + [
'-c',
'-o',
obj_file,
src_file,
] + extra_flags
what = '\n// Probing for function %s\n// %s' % (name, ' '.join(cmd))
proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
emit_status(proc, what, 'HAVE_%s' % makesym(name))
finally:
shutil.rmtree(tmp_dir)
def check_struct_members(struct_name, member_name, includes=None):
tmp_dir = tempfile.mkdtemp()
try:
filename = struct_name
src_file = os.path.join(tmp_dir, '%s%s' % (filename, CSUFFIX))
obj_file = os.path.join(tmp_dir, '%s%s' % (filename, OSUFFIX))
with open(src_file, 'w+') as f:
f.write(
'''
%s
void *probe_%s_%s(struct %s *s) {
return (void*)&s->%s;
}
''' % (includes, struct_name, member_name, struct_name, member_name)
)
cmd = [CC] + CPPFLAGS + ['-c', '-o', obj_file, src_file] + extra_flags
what = '\n// Probing for %s::%s\n// %s' % (
struct_name, member_name, ' '.join(cmd)
)
proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
emit_status(
proc, what,
'HAVE_STRUCT_%s' % makesym('%s_%s' % (struct_name, member_name))
)
finally:
shutil.rmtree(tmp_dir)
# Scrape out the version number from the configure script so that we have
# fewer places to update when we bump the version.
def extract_version():
with open(args.configure, 'r') as f:
for line in f:
res = re.match('^AC_INIT\(\[watchman\], \[([0-9.]+)\]', line)
if res:
return res.group(1)
raise Exception("couldn't find AC_INIT version line in " + args.configure)
print('// Generated by ./build/probe.py')
print('#ifndef WATCHMAN_CONFIG_H')
print('#define WATCHMAN_CONFIG_H')
print('#define PACKAGE_VERSION "%s"' % extract_version())
print('#define WATCHMAN_STATE_DIR "/var/facebook/watchman"')
print('#define WATCHMAN_CONFIG_FILE "/etc/watchman.json"')
for hname in headers_to_probe:
check_header(hname)
for fname in funcs_to_probe:
check_func(fname)
check_struct_members('statvfs', 'f_fstypename', '#include <sys/statvfs.h>')
check_struct_members('statvfs', 'f_basetype', '#include <sys/statvfs.h>')
print('#endif')
|
from __future__ import division, print_function
from threading import Thread
import os
import ConfigParser
import logging
import numpy as np
import pandas as pd
from atrial_fibrillation import AtrialFibrillation
from ventricular_tachycardia import VentricularTachycardia
from apc_pvc_helper import APC_helper
from pvc_hamilton import PVC
from respiration_AD import RespiratoryAD
from sleep_AD import SleepAD
__author__ = "Dipankar Niranjan, https://github.com/Ras-al-Ghul"
# Logging config
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.DEBUG)
class AnomalyDetector(object):
"""
implements methods to call various Anomaly Detection Algorithms
"""
def __init__(self):
self.config = ConfigParser.RawConfigParser()
dirname = os.path.dirname(os.path.realpath(__file__))
cfg_filename = os.path.join(dirname, 'anomaly_detector.cfg')
self.config.read(cfg_filename)
self.window_size =\
self.config.getint('Atrial Fibrillation', 'window_size')
self.vt_result = None
def af_anomaly_detect(self, rr_intervals, hr_quality_indices):
"""
executes the Atrial Fibrillation Anomaly detection
Input:
rr_intervals: a 2D pandas dataframe -
(refer rrinterval.txt from Hexoskin record)
first column named "hexoskin_timestamps" -
contains 'int' timestamps
second column named as "rr_int" -
contains 'double' interval data
hr_quality_indices: a 2D pandas dataframe -
(refer hr_quality.txt from Hexoskin record)
first column named "hexoskin_timestamps" -
containts 'int' timestamps
second column named as "quality_ind" -
contains 'int' quality indices,
with max value 127
Output:
returns:
if anomaly:
'dict' with follwing keys:
start_hexo_timestamp: an integer denoting timestamp of
the first record
end_hexo_timestamp: an integer denoting timestamp of
32/64/128 - last record
num_of_NEC: a small integer, higher the number,
more severe the anomaly here
data_reliability: a small integer, which denotes as a
percentage, the quality of the data
in this window
the higher the percentage, worse
the quality
window_size: a small integer, takes 32/64/128
as values
else:
None
Notes:
based on 'A Simple Method to Detect
Atrial Fibrillation Using RR Intervals'
by Jie Lian et. al.
Note the return value (if not 'None') and
check with the data_reliability and previous
data timestamps to set AFAlarmAttribute at
the health_monitor server
"""
if not (len(rr_intervals)) == self.window_size:
raise ValueError("window length of rr_intervals\
passed doesn't match config file")
if not (rr_intervals['hexoskin_timestamps'][0] >=
hr_quality_indices['hexoskin_timestamps'][0] and
rr_intervals['hexoskin_timestamps'][len(rr_intervals)-1] <=
hr_quality_indices
['hexoskin_timestamps'][len(hr_quality_indices)-1]):
pass
# raise ValueError("first rr_interval timestamp\
# and last rr_interval timestamp must lie within first \
# and last timestamp of hr_quality")
AF = AtrialFibrillation(rr_intervals, hr_quality_indices,
self.config)
return AF.get_anomaly()
def vt_anomaly_detect(self, ecg, rr_intervals,
rr_interval_status, prev_ampl):
"""
creates an object and calls the Ventricular Tachycardia
anomaly detection methods
Input:
ecg: a 2D pandas dataframe -
(refer ecg.txt from Hexoskin record)
first column named "hexoskin_timestamps" -
contains 'int' timestamps
second column named as "ecg_val" -
contains 'int' raw ecg data
rr_intervals: a 2D pandas dataframe -
(refer rrinterval.txt from Hexoskin record)
first column named "hexoskin_timestamps" -
contains 'int' timestamps
second column named as "rr_int" -
contains 'double' interval data
rr_intervals_status: a 2D pandas dataframe -
(refer rrintervalstatus from Hexoskin API)
first column named "hexoskin_timestamps" -
containts 'int' timestamps
second column named as "rr_status" -
contains 'int' quality indices.
Output:
sets:
vt_result: this is an attribute of an object of this
(Anomaly Detector) class. Its value can
be read from the caller method. Its value
is set to __zero_one_count which is
described next.
__zero_one_count - if it is the string True, it means
that analysis of next 6 seconds is
required
- if it is False, it means that next 6
second analysis is not required
- if it has an integer value then it
means that a VT event has been detected
and it has to be stored in the anomaly
database and of course next 6 second
analysis is required
Notes:
based on the following three papers:
'Ventricular Tachycardia/Fibrillation Detection
Algorithm for 24/7 Personal Wireless Heart Monitoring'
by Fokkenrood et. al.
'Real Time detection of ventricular fibrillation
and tachycardia' by Jekova et. al.
'Increase in Heart Rate Precedes Episodes of
Ventricular Tachycardia and Ventricular
Fibrillation in Patients with Implantahle
Cardioverter Defihrillators: Analysis of
Spontaneous Ventricular Tachycardia Database'
by Nemec et. al.
Refer to readme for more details
"""
__zero_one_count = True
VTobj = VentricularTachycardia(ecg, rr_intervals,
rr_interval_status, self.config)
further_analyze = VTobj.analyze_six_second()
# if initial analysis indicates that further analysis
# is not required
if not further_analyze:
__zero_one_count = False
self.vt_result = __zero_one_count
logging.info("Doing further analysis")
# perform the preprocessing
VTobj.signal_preprocess()
# call the DangerousHeartActivity detector
cur_ampl, stop_cur = VTobj.DHA_detect(prev_ampl)
# whatever be the results of the following stages,
# we necessarily have to analyze the next six second epoch
# if further analysis is not required
if stop_cur is True:
self.vt_result = __zero_one_count
# asystole detector
vtvfres = VTobj.asystole_detector(cur_ampl)
# to analyze next six second epoch
if vtvfres == 'VT/VF':
# A VT episode has been found
logging.info("%s" % str(vtvfres))
__zero_one_count = VTobj.zero_one_count
self.vt_result = __zero_one_count
else:
# not a VT episode
logging.info("%s" % str(vtvfres))
self.vt_result = __zero_one_count
def apc_pvc(self, init_timestamp):
"""
this is only for testing and reference purpose,
in actuality, create APC_helper object and call
directly - no need to create AD object for this
Input:
timestamp: the first timestamp
Output:
stores to the results dict of the APC class
Notes:
based on the following paper:
'Automatic detection of premature atrial
contractions in the electrocardiogram'
by Krasteva et. al.
Refer to readme for more details
"""
apcHelperObj = APC_helper()
apcHelperObj.populate_DS()
apcHelperObj.popluate_aux_structures(init_timestamp)
apcHelperObj.apcObj.absolute_arrhythmia()
def pvc_Hamilton(self, init_timestamp):
"""
this is only for testing and reference purpose,
in actuality, create PVC object and call
directly - no need to create AD object for this
Input:
timestamp: the first timestamp
Output:
stores to the results dict of the PVC class
Notes:
based on:
'Open Source ECG Analysis Software
Documentation'
by Patrick S. Hamilton
Refer to readme for more details
"""
pvcObj = PVC()
pvcObj.populate_data()
pvcObj.beat_classf_analyzer(init_timestamp)
def resp_AD(self, init_timestamp):
"""
this is only for testing and reference purpose,
in actuality, create RespiratoryAD object and call
directly - no need to create AD object for this
Input:
timestamp: the first timestamp
Output:
stores to the results dict of the RespiratoryAD class
Notes:
based on:
'http://wps.prenhall.com/wps/media/objects\
/2791/2858109/toolbox/Box15_1.pdf'
Refer to readme for more details
"""
respObj = RespiratoryAD(self.config, init_timestamp)
th1 = Thread(target=respObj.populate_DS, args=[])
th1.start()
th1.join()
th2 = Thread(target=respObj.tidal_volume_anomaly, args=[])
th2.start()
th3 = Thread(target=respObj.minute_ventilation_anomaly, args=[])
th3.start()
th4 = Thread(target=respObj.resp_variation, args=[])
th4.start()
th5 = Thread(target=respObj.resp_classf, args=[])
th5.start()
th6 = Thread(target=respObj.delete_DS, args=[])
th6.start()
def sleep_AD(self):
"""
this is only for testing and reference purpose,
in actuality, create SleepAD object and call
directly - no need to create AD object for this
Input:
None
Output:
stores to the anomaly_dict of the SleepAD class
Notes:
based on:
'https://www.sleepcycle.com/how-it-works/'
'http://blog.doctoroz.com/oz-experts/calculating-your-
perfect-bedtime-and-sleep-efficiency'
'https://api.hexoskin.com/docs/resource/sleepphase/'
'https://api.hexoskin.com/docs/resource/sleepposition/''
'https://api.hexoskin.com/docs/resource/metric/'
Refer to readme for more details
"""
SleepObj = SleepAD()
SleepObj.populate_DS()
SleepObj.get_metrics()
SleepObj.calc_woke_up_count()
SleepObj.get_possible_anomaly()
def main():
AD = AnomalyDetector()
rr_intervals = (pd.read_csv('rrinterval.txt',
sep="\t",
nrows=AD.config.getint('Atrial Fibrillation',
'window_size'),
dtype={"hexoskin_timestamps": np.int64,
"rr_int": np.float64},
header=None,
names=["hexoskin_timestamps", "rr_int"]))
hr_quality_indices = (pd.read_csv('hr_quality.txt',
sep="\t",
nrows=AD.config.
getint('Atrial Fibrillation',
'window_size')-8,
dtype={"hexoskin_timestamps": np.int64,
"quality_ind": np.int32},
header=None,
names=["hexoskin_timestamps",
"quality_ind"]))
# call the Atrial Fibrillation anomaly detection method
logging.info("%s" %
str(AD.af_anomaly_detect(rr_intervals, hr_quality_indices)))
ecg = (pd.read_csv('ecg.txt',
sep="\t",
nrows=256*6,
dtype={"hexoskin_timestamps": np.int64,
"ecg_val": np.int32},
header=None,
names=["hexoskin_timestamps", "ecg_val"]))
"""
for testing, ensure that only the relevant timestamped
rr_intervals are present in rrinterval.txt as it reads
a preset 7 rows
"""
rr_intervals = (pd.read_csv('rrinterval.txt',
sep="\t",
nrows=7,
dtype={"hexoskin_timestamps": np.int64,
"rr_int": np.float64},
header=None,
names=["hexoskin_timestamps", "rr_int"]))
"""
for testing, ensure that only the relevant timestamped
rr_status are present in rr_interval_status.txt as it
reads a preset 7 rows
"""
rr_interval_status = (pd.read_csv('rr_interval_status.txt',
sep="\t",
nrows=7,
dtype={"hexoskin_timestamps": np.int64,
"rr_status": np.int32},
header=None,
names=["hexoskin_timestamps",
"rr_status"]))
# call the Ventricular Tachycardia anomaly detection method
AD.vt_anomaly_detect(ecg, rr_intervals, rr_interval_status, 1400)
AD.apc_pvc(383021266184)
AD.pvc_Hamilton(383021266184)
AD.resp_AD(383021140185)
AD.sleep_AD()
if __name__ == '__main__':
main()
|
import random
from flask import request, redirect, Flask, render_template
import time
app = Flask(__name__)
card_group = ["spade", "plum", "square", "heart"]
card_number = ["A", "2", "3", "4", "5", "6", "7", "8", "9", "10", "J", "K", "Q"]
cards = []
for g in card_group:
for n in card_number:
cards.append(g + "_" + n)
def new_card():
rtn = []
for card in cards:
rtn.append(card)
return rtn
class Player:
def __init__(self, name):
self.name = name
self.card = []
self.credit = 0
self.balance = 2720
self.current_bet = 0
self.is_discard = False
self.show_card = False
self.last_ping = time.time()
def win(self, money):
self.balance += (self.current_bet + money)
self.current_bet = 0
def lose(self):
self.current_bet = 0
def add_bet(self, addition):
self.current_bet += addition
self.balance -= addition
def show_now(self):
self.show_card = True
def reset_state(self):
self.is_discard = False
self.show_card = False
def discard(self):
self.is_discard = True
def reset(self):
self.credit += (self.balance - 2720)
self.balance = 2720
START = "进行中"
END = "结束"
class Game:
def __init__(self):
self.players = []
self.showing_cards = []
self.state = END
self.code = "hdnb"
self.card_pool = []
self.hash = time.time()
def join(self, name, code):
print(self.hash)
if self.state == START:
return {"success": False, "msg": "当前正在进行中,请稍后加入"}
if self.code != code:
return {"success": False, "msg": "邀请码错误"}
for player in self.players:
if player.name == name:
return {"success": True, "msg": "欢迎回来"}
self.players.append(Player(name))
return {"success": True, "msg": "欢迎加入"}
def remove_player(self, name):
new_list = []
for player in self.players:
if player.name != name:
new_list.append(player)
self.players = new_list
def ping(self, name):
now = time.time()
in_list = False
for player in self.players:
# if now - player.last_ping > 10:
# self.remove_player(player.name)
if player.name == name:
in_list = True
player.last_ping = now
if not in_list:
return {"success": False}
return {"success": True}
def get_player(self, name):
for player in self.players:
if player.name == name:
return player
return None
def add_bet(self, name, addition):
addition = int(addition)
p = self.get_player(name)
if p.balance < addition:
return {"success": False, "msg": "当前筹码不足"}
p.add_bet(addition)
return {"success": True, "msg": "加注成功"}
def _pop_card(self):
n = random.randint(0, len(self.card_pool) - 1)
card = self.card_pool[n]
self.card_pool.remove(card)
return card
def start(self):
self.card_pool = new_card()
self.state = START
for p in self.players:
p.reset_state()
c1 = self._pop_card()
c2 = self._pop_card()
p.card = [c1, c2]
self.showing_cards = []
def next_card(self):
if len(self.showing_cards) == 0:
c1 = self._pop_card()
c2 = self._pop_card()
c3 = self._pop_card()
self.showing_cards = [c1, c2, c3]
elif len(self.showing_cards) < 5:
self.showing_cards.append(self._pop_card())
def reset(self):
for p in self.players:
p.reset()
class ChatRoom:
def __init__(self):
self.last_message = ""
self.message_cache = []
self.cnt = 0
def _add_message(self, content):
self.cnt += 1
def send(self, content):
pass
game = Game()
@app.route('/')
def index():
name = request.args.get("username")
if name is None:
return redirect("/login")
return render_template('index.html')
@app.route("/login")
def login():
print("?")
return render_template('login.html')
@app.route("/showing_card")
def showing_card():
return {"success": True, "card": game.showing_cards}
@app.route("/my_card")
def my_card():
name = request.args.get("username")
p = game.get_player(name)
if p is None:
return {"success": False}
return {"success": True, "card": p.card}
@app.route("/player_info")
def player_info():
rtn = []
for player in game.players:
rtn.append({
"card": player.card if player.show_card else [],
"name": player.name,
"credit": player.credit,
"current_bet": player.current_bet,
"balance": player.balance,
"is_discard": player.is_discard
})
return {"success": True, "players": rtn}
@app.route("/join")
def join():
username = request.args.get("username")
code = request.args.get("code")
return game.join(username, code)
@app.route("/ping")
def ping():
username = request.args.get("username")
return game.ping(username)
@app.route("/discard")
def discard():
username = request.args.get("username")
game.get_player(username).discard()
return {"success": True}
@app.route("/show")
def show():
username = request.args.get("username")
game.get_player(username).show_now()
return {"success": True}
@app.route("/add_bet")
def add_bet():
username = request.args.get("username")
add = request.args.get("add")
return game.add_bet(username, add)
@app.route("/start")
def start():
game.start()
return {"success": True}
@app.route("/next")
def next():
game.next_card()
return {"success": True}
@app.route("/end")
def end():
winnerRaw = request.args.get("winner")
ps = winnerRaw.split(",")
if len(ps) == 0 or winnerRaw == '':
return {"success": False, "msg": "必须选择至少一个赢家"}
pool_balance = 0
for p in game.players:
is_winner = False
for name in ps:
if p.name == name:
is_winner = True
break
if not is_winner:
pool_balance += p.current_bet
p.lose()
for name in ps:
p = game.get_player(name)
p.win(pool_balance / len(ps))
game.state = END
print(game.hash)
return {"success": True}
@app.route("/reset")
def reset():
game.reset()
return {"success": True}
@app.route("/game_state")
def game_state():
return {"success": True, "state": game.state}
@app.route("/remove")
def remove():
username = request.args.get("username")
game.remove_player(username)
return {"success": True}
|
# Generated by Django 2.1.7 on 2019-03-29 08:07
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('e_secretary', '0004_auto_20190329_1002'),
]
operations = [
migrations.AddField(
model_name='announcement',
name='date',
field=models.DateField(default=datetime.datetime.now),
),
migrations.AddField(
model_name='secr_announcement',
name='date',
field=models.DateField(default=datetime.datetime.now),
),
]
|
from django.shortcuts import render
from django.http import HttpResponse, JsonResponse
from rest_framework.parsers import JSONParser
from rest_framework import generics
# Create your views here.
from .serializers import *
class CarOwnershipView(generics.ListAPIView):
serializer_class = CarOwnershipSerializer
def get_queryset(self):
queryset = CarOwnership.objects.all()
query = self.request.query_params.get('q', None)
if query is not None:
return queryset.filter(owner__contains=query)
return queryset
class DrivingLicenseView(generics.ListAPIView):
serializer_class = DrivingLicenseSerializer
def get_queryset(self):
queryset = DrivingLicense.objects.all()
query = self.request.query_params.get('q', None)
if query is not None:
return queryset.filter(name__contains=query)
return queryset
class EmploymentView(generics.ListAPIView):
serializer_class = EmploymentSerializer
def get_queryset(self):
queryset = Employment.objects.all()
query = self.request.query_params.get('q', None)
if query is not None:
return queryset.filter(name__contains=query)
return queryset
|
from .whatlang import detect_language
|
from __future__ import unicode_literals
from django.core.exceptions import ObjectDoesNotExist
from djblets.util.decorators import augment_method_from
from djblets.webapi.decorators import webapi_login_required
from reviewboard.webapi.resources import resources
from reviewboard.webapi.resources.diff import DiffResource
class DraftDiffResource(DiffResource):
"""Provides information on pending draft diffs for a review request.
This list will only ever contain a maximum of one diff in current
versions. This is to preserve compatibility with the public
:ref:`webapi2.0-diff-resource`.
POSTing to this resource will create or update a review request draft
with the provided diff. This also mirrors the public diff resource.
"""
added_in = '2.0'
name = 'draft_diff'
uri_name = 'diffs'
model_parent_key = 'review_request_draft'
item_result_key = 'diff'
list_result_key = 'diffs'
mimetype_list_resource_name = 'diffs'
mimetype_item_resource_name = 'diff'
item_child_resources = [
resources.draft_filediff,
]
def get_parent_object(self, diffset):
return diffset.review_request_draft.get()
def has_access_permissions(self, request, diffset, *args, **kwargs):
return diffset.review_request_draft.get().is_accessible_by(
request.user)
def get_queryset(self, request, *args, **kwargs):
try:
draft = resources.review_request_draft.get_object(
request, *args, **kwargs)
except ObjectDoesNotExist:
raise self.model.DoesNotExist
return self.model.objects.filter(review_request_draft=draft)
@webapi_login_required
@augment_method_from(DiffResource)
def get_list(self, *args, **kwargs):
"""Returns the list of draft diffs on the review request.
Each diff has the target revision and list of per-file diffs
associated with it.
"""
pass
draft_diff_resource = DraftDiffResource()
|
from django.test import TestCase
from products.models import Category, Product
from users.models import User
class CategoryModelTest(TestCase):
@classmethod
def setUpClass(cls):
super(CategoryModelTest, cls).setUpClass()
Category.objects.create(category_name="Sodas")
def test_category_name_label(self):
category = Category.objects.get(category_name="Sodas")
field_label = category._meta.get_field("category_name").verbose_name
self.assertEquals(field_label, "category name")
def test_category_name_max_length(self):
category = Category.objects.get(category_name="Sodas")
max_length = category._meta.get_field("category_name").max_length
self.assertEquals(max_length, 255)
def test_object_name_is_model_name(self):
category = Category.objects.get(category_name="Sodas")
expected_object_name = category.category_name
self.assertEquals(expected_object_name, str(category))
class ProductModelTest(TestCase):
@classmethod
def setUpClass(cls):
super(ProductModelTest, cls).setUpClass()
Product.objects.create(
barcode="1125896345237",
product_name="Salade sans sauce",
nutriscore="A",
url="http://openfoodfact.fr/salade",
image_url="http://openfoodfact.fr/img_salade",
image_nut_url="http:/openfoodfact.fr/img_nut_url",
)
def test_product_barcode_label(self):
product = Product.objects.get(barcode="1125896345237")
field_label = product._meta.get_field("barcode").verbose_name
self.assertEquals(field_label, "barcode")
def test_product_barcode_max_length(self):
product = Product.objects.get(barcode="1125896345237")
max_length = product._meta.get_field("barcode").max_length
self.assertEquals(max_length, 13)
def test_product_name_label(self):
product = Product.objects.get(barcode="1125896345237")
field_label = product._meta.get_field("product_name").verbose_name
self.assertEquals(field_label, "product name")
def test_product_name_max_length(self):
product = Product.objects.get(barcode="1125896345237")
max_length = product._meta.get_field("product_name").max_length
self.assertEquals(max_length, 255)
def test_nutriscore_name(self):
product = Product.objects.get(barcode="1125896345237")
field_label = product._meta.get_field("nutriscore").verbose_name
self.assertEquals(field_label, "nutriscore")
def test_nutriscore_max_length(self):
product = Product.objects.get(barcode="1125896345237")
max_length = product._meta.get_field("nutriscore").max_length
self.assertEquals(max_length, 1)
def test_url_name(self):
product = Product.objects.get(barcode="1125896345237")
field_label = product._meta.get_field("url").verbose_name
self.assertEquals(field_label, "url")
def test_url_max_length(self):
product = Product.objects.get(barcode="1125896345237")
max_length = product._meta.get_field("url").max_length
self.assertEquals(max_length, 255)
def test_image_url_name(self):
product = Product.objects.get(barcode="1125896345237")
field_label = product._meta.get_field("image_url").verbose_name
self.assertEquals(field_label, "image url")
def test_image_url_max_length(self):
product = Product.objects.get(barcode="1125896345237")
max_length = product._meta.get_field("image_url").max_length
self.assertEquals(max_length, 255)
def test_image_nut_url_name(self):
product = Product.objects.get(barcode="1125896345237")
field_label = product._meta.get_field("image_nut_url").verbose_name
self.assertEquals(field_label, "image nut url")
def test_image_nut_url_max_length(self):
product = Product.objects.get(barcode="1125896345237")
max_length = product._meta.get_field("image_nut_url").max_length
self.assertEquals(max_length, 255)
def test_object_name_is_model_name(self):
product = Product.objects.get(barcode="1125896345237")
expected_object_name = product.product_name
self.assertEquals(expected_object_name, str(product))
class UsersTest(TestCase):
@classmethod
def setUpClass(cls):
super(UsersTest, cls).setUpClass()
cls.test_user1 = User.objects.create_user(
username="testuser1",
email="test@test.com",
password="1X<ISRUkw+tuK",
)
def test_user_name_is_email(self):
user = User.objects.get(username="testuser1")
self.assertEqual(str(user), user.email)
|
import argparse
import os
import shutil
import sys
from pathlib import Path
from typing import Any, Callable, Dict, Optional, Union
import pkg_resources
import yaml
from chia.util.path import mkdir
import chia.util.flora
def initial_config_file(filename: Union[str, Path]) -> str:
return pkg_resources.resource_string(__name__, f"initial-{filename}").decode()
def create_default_chia_config(root_path: Path, filenames=["config.yaml"]) -> None:
for filename in filenames:
default_config_file_data: str = initial_config_file(filename)
path: Path = config_path_for_filename(root_path, filename)
tmp_path: Path = path.with_suffix("." + str(os.getpid()))
mkdir(path.parent)
with open(tmp_path, "w") as f:
f.write(default_config_file_data)
try:
os.replace(str(tmp_path), str(path))
except PermissionError:
shutil.move(str(tmp_path), str(path))
def config_path_for_filename(root_path: Path, filename: Union[str, Path]) -> Path:
path_filename = Path(filename)
if path_filename.is_absolute():
return path_filename
return root_path / "config" / filename
def save_config(root_path: Path, filename: Union[str, Path], config_data: Any):
path: Path = config_path_for_filename(root_path, filename)
tmp_path: Path = path.with_suffix("." + str(os.getpid()))
with open(tmp_path, "w") as f:
yaml.safe_dump(config_data, f)
try:
os.replace(str(tmp_path), path)
except PermissionError:
shutil.move(str(tmp_path), str(path))
def load_config(
root_path: Path,
filename: Union[str, Path],
sub_config: Optional[str] = None,
exit_on_error=True,
) -> Dict:
path = config_path_for_filename(root_path, filename)
if not path.is_file():
if not exit_on_error:
raise ValueError("Config not found")
print(f"can't find {path}")
print("** please run `chia init` to migrate or create new config files **")
# TODO: fix this hack
sys.exit(-1)
r = yaml.safe_load(open(path, "r"))
if sub_config is not None:
r = r.get(sub_config)
return r
def load_config_cli(root_path: Path, filename: str, sub_config: Optional[str] = None) -> Dict:
"""
Loads configuration from the specified filename, in the config directory,
and then overrides any properties using the passed in command line arguments.
Nested properties in the config file can be used in the command line with ".",
for example --farmer_peer.host. Does not support lists.
"""
config = load_config(root_path, filename, sub_config)
flattened_props = flatten_properties(config)
parser = argparse.ArgumentParser()
for prop_name, value in flattened_props.items():
if type(value) is list:
continue
prop_type: Callable = str2bool if type(value) is bool else type(value) # type: ignore
parser.add_argument(f"--{prop_name}", type=prop_type, dest=prop_name)
for key, value in vars(parser.parse_args()).items():
if value is not None:
flattened_props[key] = value
return unflatten_properties(flattened_props)
def flatten_properties(config: Dict) -> Dict:
properties = {}
for key, value in config.items():
if type(value) is dict:
for key_2, value_2 in flatten_properties(value).items():
properties[key + "." + key_2] = value_2
else:
properties[key] = value
return properties
def unflatten_properties(config: Dict) -> Dict:
properties: Dict = {}
for key, value in config.items():
if "." in key:
add_property(properties, key, value)
else:
properties[key] = value
return properties
def add_property(d: Dict, partial_key: str, value: Any):
key_1, key_2 = partial_key.split(".", maxsplit=1)
if key_1 not in d:
d[key_1] = {}
if "." in key_2:
add_property(d[key_1], key_2, value)
else:
d[key_1][key_2] = value
def str2bool(v: Union[str, bool]) -> bool:
# Source from https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "True", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "False", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
def traverse_dict(d: Dict, key_path: str) -> Any:
"""
Traverse nested dictionaries to find the element pointed-to by key_path.
Key path components are separated by a ':' e.g.
"root:child:a"
"""
if type(d) is not dict:
raise TypeError(f"unable to traverse into non-dict value with key path: {key_path}")
# Extract one path component at a time
components = key_path.split(":", maxsplit=1)
if components is None or len(components) == 0:
raise KeyError(f"invalid config key path: {key_path}")
key = components[0]
remaining_key_path = components[1] if len(components) > 1 else None
val: Any = d.get(key, None)
if val is not None:
if remaining_key_path is not None:
return traverse_dict(val, remaining_key_path)
return val
else:
raise KeyError(f"value not found for key: {key}")
|
#
# ovirt-engine-setup -- ovirt engine setup
# Copyright (C) 2013-2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""CA plugin."""
import os
import gettext
_ = lambda m: gettext.dgettext(message=m, domain='ovirt-engine-setup')
from M2Crypto import X509
XN_FLAG_SEP_MULTILINE = 4 << 16
from otopi import constants as otopicons
from otopi import util
from otopi import plugin
from otopi import transaction
from otopi import filetransaction
from ovirt_engine import util as outil
from ovirt_engine_setup import constants as osetupcons
from ovirt_engine_setup.engine import constants as oenginecons
from ovirt_engine_setup.engine_common import constants as oengcommcons
from ovirt_engine_setup import dialog
@util.export
class Plugin(plugin.PluginBase):
"""CA plugin."""
def __init__(self, context):
super(Plugin, self).__init__(context=context)
self._enabled = False
self.uninstall_files = []
@plugin.event(
stage=plugin.Stages.STAGE_BOOT,
)
def _boot(self):
self.environment[
otopicons.CoreEnv.LOG_FILTER_KEYS
].append(
oenginecons.PKIEnv.STORE_PASS
)
@plugin.event(
stage=plugin.Stages.STAGE_INIT,
)
def _init(self):
self.environment.setdefault(
oenginecons.PKIEnv.STORE_PASS,
oengcommcons.Defaults.DEFAULT_PKI_STORE_PASS
)
self.environment.setdefault(
osetupcons.RenameEnv.FORCE_IGNORE_AIA_IN_CA,
None
)
@plugin.event(
stage=plugin.Stages.STAGE_SETUP,
)
def _setup(self):
self.environment[
osetupcons.RenameEnv.FILES_TO_BE_MODIFIED
].extend(
(
oenginecons.FileLocations.OVIRT_ENGINE_PKI_CERT_TEMPLATE[
:-len('.in')],
oenginecons.FileLocations.OVIRT_ENGINE_PKI_CERT_CONF,
)
)
@plugin.event(
stage=plugin.Stages.STAGE_LATE_SETUP,
condition=lambda self: os.path.exists(
oenginecons.FileLocations.OVIRT_ENGINE_PKI_ENGINE_CA_CERT
)
)
def _late_setup(self):
if (
X509.load_cert(
file=(
oengcommcons.FileLocations.OVIRT_ENGINE_PKI_APACHE_CA_CERT
),
format=X509.FORMAT_PEM,
).get_pubkey().get_rsa().pub() != X509.load_cert(
file=oenginecons.FileLocations.OVIRT_ENGINE_PKI_ENGINE_CA_CERT,
format=X509.FORMAT_PEM,
).get_pubkey().get_rsa().pub()
):
self.logger.warning(_('The CA certificate of Apache is changed'))
self.dialog.note(
text=_(
'{apache_ca} is different from {ca} .\n'
'It was probably replaced with a 3rd party certificate.\n'
'You might want to replace it again with a certificate\n'
'for the new host name.\n'
).format(
apache_ca=(
oengcommcons.FileLocations.
OVIRT_ENGINE_PKI_APACHE_CA_CERT
),
ca=(
oenginecons.FileLocations.
OVIRT_ENGINE_PKI_ENGINE_CA_CERT
),
)
)
else:
self._enabled = True
self.environment[
osetupcons.RenameEnv.FILES_TO_BE_MODIFIED
].extend(
(
oenginecons.FileLocations.OVIRT_ENGINE_PKI_APACHE_STORE,
oengcommcons.FileLocations.OVIRT_ENGINE_PKI_APACHE_KEY,
oengcommcons.FileLocations.OVIRT_ENGINE_PKI_APACHE_CERT,
)
)
@plugin.event(
stage=plugin.Stages.STAGE_VALIDATION,
condition=lambda self: (
self._enabled and
not self.environment[
osetupcons.RenameEnv.FORCE_IGNORE_AIA_IN_CA
]
)
)
def _aia(self):
x509 = X509.load_cert(
file=oenginecons.FileLocations.OVIRT_ENGINE_PKI_ENGINE_CA_CERT,
format=X509.FORMAT_PEM,
)
try:
authorityInfoAccess = x509.get_ext(
'authorityInfoAccess'
).get_value()
self.logger.warning(_('AIA extension found in CA certificate'))
self.dialog.note(
text=_(
'Please note:\n'
'The certificate for the CA contains the\n'
'"Authority Information Access" extension pointing\n'
'to the old hostname:\n'
'{aia}'
'Currently this is harmless, but it might affect future\n'
'upgrades. In version 3.3 the default was changed to\n'
'create new CA certificate without this extension. If\n'
'possible, it might be better to not rely on this\n'
'program, and instead backup, cleanup and setup again\n'
'cleanly.\n'
'\n'
'More details can be found at the following address:\n'
'http://www.ovirt.org/Changing_Engine_Hostname\n'
).format(
aia=authorityInfoAccess,
),
)
if not dialog.queryBoolean(
dialog=self.dialog,
name='OVESETUP_RENAME_AIA_BYPASS',
note=_('Do you want to continue? (@VALUES@) [@DEFAULT@]: '),
prompt=True,
):
raise RuntimeError(_('Aborted by user'))
except LookupError:
pass
@plugin.event(
stage=plugin.Stages.STAGE_MISC,
name=oengcommcons.Stages.RENAME_PKI_CONF_MISC,
)
def _misc_conffiles(self):
self.environment[
osetupcons.CoreEnv.REGISTER_UNINSTALL_GROUPS
].createGroup(
group='ca_pki',
description='PKI keys',
optional=True,
).addFiles(
group='ca_pki',
fileList=self.uninstall_files,
)
localtransaction = transaction.Transaction()
with localtransaction:
for config in (
oenginecons.FileLocations.OVIRT_ENGINE_PKI_CERT_TEMPLATE[
:-len('.in')],
oenginecons.FileLocations.OVIRT_ENGINE_PKI_CERT_CONF
):
with open(config, 'r') as f:
content = []
for line in f:
line = line.rstrip('\n')
if line.startswith('authorityInfoAccess'):
line = (
'authorityInfoAccess = '
'caIssuers;URI:http://%s:%s/ca.crt'
) % (
self.environment[
osetupcons.RenameEnv.FQDN
],
self.environment[
oengcommcons.ConfigEnv.PUBLIC_HTTP_PORT
],
)
content.append(line)
localtransaction.append(
filetransaction.FileTransaction(
name=config,
content=content,
modifiedList=self.uninstall_files,
),
)
@plugin.event(
stage=plugin.Stages.STAGE_MISC,
after=(
oengcommcons.Stages.RENAME_PKI_CONF_MISC,
),
condition=lambda self: self._enabled,
)
def _misc(self):
# TODO
# this implementation is not transactional
# too many issues with legacy ca implementation
# need to work this out to allow transactional
rc, stdout, stderr = self.execute(
args=(
oenginecons.FileLocations.OVIRT_ENGINE_PKI_PKCS12_EXTRACT,
'--name=%s' % 'apache',
'--passin=%s' % (
self.environment[oenginecons.PKIEnv.STORE_PASS],
),
'--cert=-',
),
)
x509 = X509.load_cert_string(
string='\n'.join(stdout).encode('ascii'),
format=X509.FORMAT_PEM,
)
subject = x509.get_subject()
subject.get_entries_by_nid(
X509.X509_Name.nid['CN']
)[0].set_data(
self.environment[
osetupcons.RenameEnv.FQDN
]
)
self.execute(
(
oenginecons.FileLocations.OVIRT_ENGINE_PKI_CA_ENROLL,
'--name=%s' % 'apache',
'--password=%s' % (
self.environment[oenginecons.PKIEnv.STORE_PASS],
),
'--subject=%s' % '/' + '/'.join(
outil.escape(s, '/\\')
for s in subject.as_text(
flags=XN_FLAG_SEP_MULTILINE,
).splitlines()
),
),
)
self.uninstall_files.extend(
(
oenginecons.FileLocations.OVIRT_ENGINE_PKI_APACHE_STORE,
oengcommcons.FileLocations.OVIRT_ENGINE_PKI_APACHE_CERT,
)
)
self.execute(
args=(
oenginecons.FileLocations.OVIRT_ENGINE_PKI_PKCS12_EXTRACT,
'--name=%s' % 'apache',
'--passin=%s' % (
self.environment[oenginecons.PKIEnv.STORE_PASS],
),
'--key=%s' % (
oengcommcons.FileLocations.OVIRT_ENGINE_PKI_APACHE_KEY,
),
),
)
self.uninstall_files.append(
oengcommcons.FileLocations.OVIRT_ENGINE_PKI_APACHE_KEY,
)
self.environment[
oengcommcons.ApacheEnv.NEED_RESTART
] = True
# vim: expandtab tabstop=4 shiftwidth=4
|
def problem340():
pass
|
db = DAL('sqlite://storage.sqlite')
from gluon.tools import *
auth = Auth(db)
auth.define_tables()
crud = Crud(db)
db.define_table('anuncio',
Field('title'),
Field('body', 'text'),
Field('price', 'decimal(5, 2)'),
Field('negociacion', 'boolean', default=False),
Field('created_on', 'datetime', default=request.now),
Field('created_by', db.auth_user, default=auth.user_id),
Field('contacto', 'string', default=None, required=False),
format='%(title)s')
db.anuncio.title.requires = IS_NOT_IN_DB(db, 'anuncio.title')
db.anuncio.body.requires = IS_NOT_EMPTY()
db.anuncio.created_by.readable = db.anuncio.created_by.writable = False
db.anuncio.created_on.readable = db.anuncio.created_on.writable = False
|
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "1.9.1"
|
# Copyright 2019 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing mixin classes for linux virtual machines.
These classes allow installation on both Debian and RHEL based linuxes.
They also handle some initial setup (especially on RHEL based linuxes
since by default sudo commands without a tty don't work) and
can restore the VM to the state it was in before packages were
installed.
To install a package on a VM, just call vm.Install(package_name).
The package name is just the name of the package module (i.e. the
file name minus .py). The framework will take care of all cleanup
for you.
"""
import abc
import collections
import copy
import logging
import os
import pipes
import posixpath
import re
import threading
import time
from typing import Dict, Set
import uuid
from absl import flags
from perfkitbenchmarker import context
from perfkitbenchmarker import disk
from perfkitbenchmarker import errors
from perfkitbenchmarker import linux_packages
from perfkitbenchmarker import os_types
from perfkitbenchmarker import regex_util
from perfkitbenchmarker import virtual_machine
from perfkitbenchmarker import vm_util
import yaml
FLAGS = flags.FLAGS
OS_PRETTY_NAME_REGEXP = r'PRETTY_NAME="(.*)"'
CLEAR_BUILD_REGEXP = r'Installed version:\s*(.*)\s*'
UPDATE_RETRIES = 5
DEFAULT_SSH_PORT = 22
REMOTE_KEY_PATH = '~/.ssh/id_rsa'
CONTAINER_MOUNT_DIR = '/mnt'
CONTAINER_WORK_DIR = '/root'
# This pair of scripts used for executing long-running commands, which will be
# resilient in the face of SSH connection errors.
# EXECUTE_COMMAND runs a command, streaming stdout / stderr to a file, then
# writing the return code to a file. An exclusive lock is acquired on the return
# code file, so that other processes may wait for completion.
EXECUTE_COMMAND = 'execute_command.py'
# WAIT_FOR_COMMAND waits on the file lock created by EXECUTE_COMMAND,
# then copies the stdout and stderr, exiting with the status of the command run
# by EXECUTE_COMMAND.
WAIT_FOR_COMMAND = 'wait_for_command.py'
_DEFAULT_DISK_FS_TYPE = 'ext4'
_DEFAULT_DISK_MOUNT_OPTIONS = 'discard'
_DEFAULT_DISK_FSTAB_OPTIONS = 'defaults'
# regex for parsing lscpu and /proc/cpuinfo
_COLON_SEPARATED_RE = re.compile(r'^\s*(?P<key>.*?)\s*:\s*(?P<value>.*?)\s*$')
flags.DEFINE_bool('setup_remote_firewall', False,
'Whether PKB should configure the firewall of each remote'
'VM to make sure it accepts all internal connections.')
flags.DEFINE_list('sysctl', [],
'Sysctl values to set. This flag should be a comma-separated '
'list of path=value pairs. Each pair will be appended to'
'/etc/sysctl.conf. The presence of any items in this list '
'will cause a reboot to occur after VM prepare. '
'For example, if you pass '
'--sysctls=vm.dirty_background_ratio=10,vm.dirty_ratio=25, '
'PKB will append "vm.dirty_background_ratio=10" and'
'"vm.dirty_ratio=25" on separate lines to /etc/sysctrl.conf'
' and then the machine will be rebooted before starting'
'the benchmark.')
flags.DEFINE_list(
'set_files',
[],
'Arbitrary filesystem configuration. This flag should be a '
'comma-separated list of path=value pairs. Each value will '
'be written to the corresponding path. For example, if you '
'pass --set_files=/sys/kernel/mm/transparent_hugepage/enabled=always, '
'then PKB will write "always" to '
'/sys/kernel/mm/transparent_hugepage/enabled before starting '
'the benchmark.')
flags.DEFINE_bool('network_enable_BBR', False,
'A shortcut to enable BBR congestion control on the network. '
'equivalent to appending to --sysctls the following values '
'"net.core.default_qdisc=fq, '
'"net.ipv4.tcp_congestion_control=bbr" '
'As with other sysctrls, will cause a reboot to happen.')
flags.DEFINE_integer('num_disable_cpus', None,
'Number of CPUs to disable on the virtual machine.'
'If the VM has n CPUs, you can disable at most n-1.',
lower_bound=1)
flags.DEFINE_integer('disk_fill_size', 0,
'Size of file to create in GBs.')
flags.DEFINE_enum('disk_fs_type', _DEFAULT_DISK_FS_TYPE,
[_DEFAULT_DISK_FS_TYPE, 'xfs'],
'File system type used to format disk.')
flags.DEFINE_integer(
'disk_block_size', None, 'Block size to format disk with.'
'Defaults to 4096 for ext4.')
flags.DEFINE_bool(
'enable_transparent_hugepages', None, 'Whether to enable or '
'disable transparent hugepages. If unspecified, the setting '
'is unchanged from the default in the OS.')
flags.DEFINE_integer(
'ssh_retries', 10, 'Default number of times to retry SSH.', lower_bound=0)
flags.DEFINE_integer(
'scp_connect_timeout', 30, 'timeout for SCP connection.', lower_bound=0)
flags.DEFINE_string(
'append_kernel_command_line', None,
'String to append to the kernel command line. The presence of any '
'non-empty string will cause a reboot to occur after VM prepare. '
'If unspecified, the kernel command line will be unmodified.')
flags.DEFINE_integer(
'tcp_max_receive_buffer', None,
'Changes the third component of the sysctl value net.ipv4.tcp_rmem. '
'This sets the maximum receive buffer for TCP socket connections in bytes. '
'Increasing this value may increase single stream TCP throughput '
'for high latency connections')
flags.DEFINE_integer(
'tcp_max_send_buffer', None,
'Changes the third component of the sysctl value net.ipv4.tcp_wmem. '
'This sets the maximum send buffer for TCP socket connections in bytes. '
'Increasing this value may increase single stream TCP throughput '
'for high latency connections')
flags.DEFINE_integer(
'rmem_max', None,
'Sets the sysctl value net.core.rmem_max. This sets the max OS '
'receive buffer size in bytes for all types of connections')
flags.DEFINE_integer(
'wmem_max', None,
'Sets the sysctl value net.core.wmem_max. This sets the max OS '
'send buffer size in bytes for all types of connections')
flags.DEFINE_boolean('gce_hpc_tools', False,
'Whether to apply the hpc-tools environment script.')
flags.DEFINE_boolean('disable_smt', False,
'Whether to disable SMT (Simultaneous Multithreading) '
'in BIOS.')
_DISABLE_YUM_CRON = flags.DEFINE_boolean(
'disable_yum_cron', True, 'Whether to disable the cron-run yum service.')
RETRYABLE_SSH_RETCODE = 255
class CpuVulnerabilities:
"""The 3 different vulnerablity statuses from vm.cpu_vulernabilities.
Example input:
/sys/devices/system/cpu/vulnerabilities/itlb_multihit:KVM: Vulnerable
Is put into vulnerability with a key of "itlb_multihit" and value "KVM"
Unparsed lines are put into the unknown dict.
"""
def __init__(self):
self.mitigations: Dict[str, str] = {}
self.vulnerabilities: Dict[str, str] = {}
self.notaffecteds: Set[str] = set()
self.unknowns: Dict[str, str] = {}
def AddLine(self, full_line: str) -> None:
"""Parses a line of output from the cpu/vulnerabilities/* files."""
if not full_line:
return
file_path, line = full_line.split(':', 1)
file_name = posixpath.basename(file_path)
if self._AddMitigation(file_name, line):
return
if self._AddVulnerability(file_name, line):
return
if self._AddNotAffected(file_name, line):
return
self.unknowns[file_name] = line
def _AddMitigation(self, file_name, line):
match = re.match('^Mitigation: (.*)', line) or re.match(
'^([^:]+): Mitigation: (.*)$', line)
if match:
self.mitigations[file_name] = ':'.join(match.groups())
return True
def _AddVulnerability(self, file_name, line):
match = re.match('^Vulnerable: (.*)', line) or re.match(
'^Vulnerable$', line) or re.match('^([^:]+): Vulnerable$', line)
if match:
self.vulnerabilities[file_name] = ':'.join(match.groups())
return True
def _AddNotAffected(self, file_name, line):
match = re.match('^Not affected$', line)
if match:
self.notaffecteds.add(file_name)
return True
@property
def asdict(self) -> Dict[str, str]:
"""Returns the parsed CPU vulnerabilities as a dict."""
ret = {}
if self.mitigations:
ret['mitigations'] = ','.join(sorted(self.mitigations))
for key, value in self.mitigations.items():
ret[f'mitigation_{key}'] = value
if self.vulnerabilities:
ret['vulnerabilities'] = ','.join(sorted(self.vulnerabilities))
for key, value in self.vulnerabilities.items():
ret[f'vulnerability_{key}'] = value
if self.unknowns:
ret['unknowns'] = ','.join(self.unknowns)
for key, value in self.unknowns.items():
ret[f'unknown_{key}'] = value
if self.notaffecteds:
ret['notaffecteds'] = ','.join(sorted(self.notaffecteds))
return ret
class BaseLinuxMixin(virtual_machine.BaseOsMixin):
"""Class that holds Linux related VM methods and attributes."""
# If multiple ssh calls are made in parallel using -t it will mess
# the stty settings up and the terminal will become very hard to use.
# Serializing calls to ssh with the -t option fixes the problem.
_pseudo_tty_lock = threading.Lock()
# TODO(user): Remove all uses of Python 2.
PYTHON_2_PACKAGE = 'python2'
def __init__(self, *args, **kwargs):
super(BaseLinuxMixin, self).__init__(*args, **kwargs)
# N.B. If you override ssh_port you must override remote_access_ports and
# primary_remote_access_port.
self.ssh_port = DEFAULT_SSH_PORT
self.remote_access_ports = [self.ssh_port]
self.primary_remote_access_port = self.ssh_port
self.has_private_key = False
self._remote_command_script_upload_lock = threading.Lock()
self._has_remote_command_script = False
self._needs_reboot = False
self._lscpu_cache = None
self._partition_table = {}
self._proccpu_cache = None
self._smp_affinity_script = None
def _Suspend(self):
"""Suspends a VM."""
raise NotImplementedError()
def _Resume(self):
"""Resumes a VM."""
raise NotImplementedError()
def _BeforeSuspend(self):
pass
def _CreateVmTmpDir(self):
self.RemoteCommand('mkdir -p %s' % vm_util.VM_TMP_DIR)
def _SetTransparentHugepages(self):
"""Sets transparent hugepages based on --enable_transparent_hugepages.
If the flag is unset (None), this is a nop.
"""
if FLAGS.enable_transparent_hugepages is None:
return
setting = 'always' if FLAGS.enable_transparent_hugepages else 'never'
self.RemoteCommand(
'echo %s | sudo tee /sys/kernel/mm/transparent_hugepage/enabled' %
setting)
self.os_metadata['transparent_hugepage'] = setting
def _SetupRobustCommand(self):
"""Sets up the RobustRemoteCommand tooling.
This includes installing python3 and pushing scripts required by
RobustRemoteCommand to this VM. There is a check to skip if previously
installed.
"""
with self._remote_command_script_upload_lock:
if not self._has_remote_command_script:
# Python3 is needed for RobustRemoteCommands
self.Install('python3')
for f in (EXECUTE_COMMAND, WAIT_FOR_COMMAND):
remote_path = os.path.join(vm_util.VM_TMP_DIR, os.path.basename(f))
if os.path.basename(remote_path):
self.RemoteCommand('sudo rm -f ' + remote_path)
self.PushDataFile(f, remote_path)
self._has_remote_command_script = True
def RobustRemoteCommand(self, command, should_log=False, timeout=None,
ignore_failure=False):
"""Runs a command on the VM in a more robust way than RemoteCommand.
This is used for long-running commands that might experience network issues
that would normally interrupt a RemoteCommand and fail to provide results.
Executes a command via a pair of scripts on the VM:
* EXECUTE_COMMAND, which runs 'command' in a nohupped background process.
* WAIT_FOR_COMMAND, which first waits on confirmation that EXECUTE_COMMAND
has acquired an exclusive lock on a file with the command's status. This
is done by waiting for the existence of a file written by EXECUTE_COMMAND
once it successfully acquires an exclusive lock. Once confirmed,
WAIT_COMMAND waits to acquire the file lock held by EXECUTE_COMMAND until
'command' completes, then returns with the stdout, stderr, and exit status
of 'command'.
Temporary SSH failures (where ssh returns a 255) while waiting for the
command to complete will be tolerated and safely retried. However, if
remote command actually returns 255, SSH will return 1 instead to bypass
retry behavior.
Args:
command: The command to run.
should_log: Whether to log the command's output at the info level. The
output is always logged at the debug level.
timeout: The timeout for the command in seconds.
ignore_failure: Ignore any failure if set to true.
Returns:
A tuple of stdout, stderr from running the command.
Raises:
RemoteCommandError: If there was a problem establishing the connection, or
the command fails.
"""
self._SetupRobustCommand()
execute_path = os.path.join(vm_util.VM_TMP_DIR,
os.path.basename(EXECUTE_COMMAND))
wait_path = os.path.join(vm_util.VM_TMP_DIR,
os.path.basename(WAIT_FOR_COMMAND))
uid = uuid.uuid4()
file_base = os.path.join(vm_util.VM_TMP_DIR, 'cmd%s' % uid)
wrapper_log = file_base + '.log'
stdout_file = file_base + '.stdout'
stderr_file = file_base + '.stderr'
status_file = file_base + '.status'
exclusive_file = file_base + '.exclusive'
if not isinstance(command, str):
command = ' '.join(command)
start_command = ['nohup', 'python3', execute_path,
'--stdout', stdout_file,
'--stderr', stderr_file,
'--status', status_file,
'--exclusive', exclusive_file,
'--command', pipes.quote(command)] # pyformat: disable
if timeout:
start_command.extend(['--timeout', str(timeout)])
start_command = '%s 1> %s 2>&1 &' % (' '.join(start_command),
wrapper_log)
self.RemoteCommand(start_command)
def _WaitForCommand():
wait_command = ['python3', wait_path,
'--status', status_file,
'--exclusive', exclusive_file] # pyformat: disable
stdout = ''
while 'Command finished.' not in stdout:
stdout, _ = self.RemoteCommand(
' '.join(wait_command), should_log=should_log, timeout=1800)
wait_command.extend([
'--stdout', stdout_file,
'--stderr', stderr_file,
'--delete',
]) # pyformat: disable
return self.RemoteCommand(' '.join(wait_command), should_log=should_log,
ignore_failure=ignore_failure)
try:
return _WaitForCommand()
except errors.VirtualMachine.RemoteCommandError:
# In case the error was with the wrapper script itself, print the log.
stdout, _ = self.RemoteCommand('cat %s' % wrapper_log, should_log=False)
if stdout.strip():
logging.warning('Exception during RobustRemoteCommand. '
'Wrapper script log:\n%s', stdout)
raise
def SetupRemoteFirewall(self):
"""Sets up IP table configurations on the VM."""
self.RemoteHostCommand('sudo iptables -A INPUT -j ACCEPT')
self.RemoteHostCommand('sudo iptables -A OUTPUT -j ACCEPT')
def SetupProxy(self):
"""Sets up proxy configuration variables for the cloud environment."""
env_file = '/etc/environment'
commands = []
if FLAGS.http_proxy:
commands.append("echo 'http_proxy=%s' | sudo tee -a %s" % (
FLAGS.http_proxy, env_file))
if FLAGS.https_proxy:
commands.append("echo 'https_proxy=%s' | sudo tee -a %s" % (
FLAGS.https_proxy, env_file))
if FLAGS.ftp_proxy:
commands.append("echo 'ftp_proxy=%s' | sudo tee -a %s" % (
FLAGS.ftp_proxy, env_file))
if commands:
self.RemoteCommand(';'.join(commands))
def SetupPackageManager(self):
"""Specific Linux flavors should override this."""
pass
def PrepareVMEnvironment(self):
super(BaseLinuxMixin, self).PrepareVMEnvironment()
self.SetupProxy()
self._CreateVmTmpDir()
self._SetTransparentHugepages()
if FLAGS.setup_remote_firewall:
self.SetupRemoteFirewall()
if self.install_packages:
self._CreateInstallDir()
if self.is_static:
self.SnapshotPackages()
self.SetupPackageManager()
self.SetFiles()
self.DoSysctls()
self._DoAppendKernelCommandLine()
self.DoConfigureNetworkForBBR()
self.DoConfigureTCPWindow()
self.UpdateEnvironmentPath()
self._DisableCpus()
self._RebootIfNecessary()
self.RecordAdditionalMetadata()
self.BurnCpu()
self.FillDisk()
def _CreateInstallDir(self):
self.RemoteCommand(
('sudo mkdir -p {0}; '
'sudo chmod a+rwxt {0}').format(linux_packages.INSTALL_DIR))
# LinuxMixins do not implement _Start or _Stop
def _Start(self):
"""Starts the VM."""
raise NotImplementedError()
def _Stop(self):
"""Stops the VM."""
raise NotImplementedError()
def SetFiles(self):
"""Apply --set_files to the VM."""
for pair in FLAGS.set_files:
path, value = pair.split('=')
self.RemoteCommand('echo "%s" | sudo tee %s' %
(value, path))
def _DisableCpus(self):
"""Apply num_disable_cpus to the VM.
Raises:
ValueError: if num_disable_cpus is outside of (0 ... num_cpus-1)
inclusive
"""
if not FLAGS.num_disable_cpus:
return
self.num_disable_cpus = FLAGS.num_disable_cpus
if (self.num_disable_cpus <= 0 or
self.num_disable_cpus >= self.num_cpus):
raise ValueError('num_disable_cpus must be between 1 and '
'(num_cpus - 1) inclusive. '
'num_disable_cpus: %i, num_cpus: %i' %
(self.num_disable_cpus, self.num_cpus))
# We can't disable cpu 0, starting from the last cpu in /proc/cpuinfo.
# On multiprocessor systems, we also attempt to disable cpus on each
# physical processor based on "physical id" in order to keep a similar
# number of cpus on each physical processor.
# In addition, for each cpu we disable, we will look for cpu with same
# "core id" in order to disable vcpu pairs.
cpus = copy.deepcopy(self.CheckProcCpu().mappings)
cpu_mapping = collections.defaultdict(list)
for cpu, info in cpus.items():
numa = info.get('physical id')
cpu_mapping[int(numa)].append((cpu, int(info.get('core id'))))
# Sort cpus based on 'core id' on each numa node
for numa in cpu_mapping:
cpu_mapping[numa] = sorted(
cpu_mapping[numa],
key=lambda cpu_info: (cpu_info[1], cpu_info[0]))
def _GetNextCPUToDisable(num_disable_cpus):
"""Get the next CPU id to disable."""
numa_nodes = list(cpu_mapping)
while num_disable_cpus:
for numa in sorted(numa_nodes, reverse=True):
cpu_id, _ = cpu_mapping[numa].pop()
num_disable_cpus -= 1
yield cpu_id
if not num_disable_cpus:
break
for cpu_id in _GetNextCPUToDisable(self.num_disable_cpus):
self.RemoteCommand('sudo bash -c "echo 0 > '
f'/sys/devices/system/cpu/cpu{cpu_id}/online"')
self._proccpu_cache = None
self._lscpu_cache = None
def UpdateEnvironmentPath(self):
"""Specific Linux flavors should override this."""
pass
def FillDisk(self):
"""Fills the primary scratch disk with a zeros file."""
if FLAGS.disk_fill_size:
out_file = posixpath.join(self.scratch_disks[0].mount_point, 'fill_file')
self.RobustRemoteCommand(
'dd if=/dev/zero of={out_file} bs=1G count={fill_size}'.format(
out_file=out_file, fill_size=FLAGS.disk_fill_size))
def _ApplySysctlPersistent(self, sysctl_params):
"""Apply "key=value" pairs to /etc/sysctl.conf and mark the VM for reboot.
The reboot ensures the values take effect and remain persistent across
future reboots.
Args:
sysctl_params: dict - the keys and values to write
"""
if not sysctl_params:
return
for key, value in sysctl_params.items():
self.RemoteCommand('sudo bash -c \'echo "%s=%s" >> /etc/sysctl.conf\''
% (key, value))
self._needs_reboot = True
def ApplySysctlPersistent(self, sysctl_params):
"""Apply "key=value" pairs to /etc/sysctl.conf and reboot immediately.
The reboot ensures the values take effect and remain persistent across
future reboots.
Args:
sysctl_params: dict - the keys and values to write
"""
self._ApplySysctlPersistent(sysctl_params)
self._RebootIfNecessary()
def DoSysctls(self):
"""Apply --sysctl to the VM.
The Sysctl pairs are written persistently so that if a reboot
occurs, the flags are not lost.
"""
sysctl_params = {}
for pair in FLAGS.sysctl:
key, value = pair.split('=')
sysctl_params[key] = value
self._ApplySysctlPersistent(sysctl_params)
def DoConfigureNetworkForBBR(self):
"""Apply --network_enable_BBR to the VM."""
if not FLAGS.network_enable_BBR:
return
if not KernelRelease(self.kernel_release).AtLeast(4, 9):
raise flags.ValidationError(
'BBR requires a linux image with kernel 4.9 or newer')
# if the current congestion control mechanism is already BBR
# then nothing needs to be done (avoid unnecessary reboot)
if self.TcpCongestionControl() == 'bbr':
return
self._ApplySysctlPersistent({
'net.core.default_qdisc': 'fq',
'net.ipv4.tcp_congestion_control': 'bbr'
})
def DoConfigureTCPWindow(self):
"""Change TCP window parameters in sysctl."""
# Return if none of these flags are set
if all(x is None for x in [FLAGS.tcp_max_receive_buffer,
FLAGS.tcp_max_send_buffer,
FLAGS.rmem_max,
FLAGS.wmem_max]):
return
# Get current values from VM
stdout, _ = self.RemoteCommand('cat /proc/sys/net/ipv4/tcp_rmem')
rmem_values = stdout.split()
stdout, _ = self.RemoteCommand('cat /proc/sys/net/ipv4/tcp_wmem')
wmem_values = stdout.split()
stdout, _ = self.RemoteCommand('cat /proc/sys/net/core/rmem_max')
rmem_max = int(stdout)
stdout, _ = self.RemoteCommand('cat /proc/sys/net/core/wmem_max')
wmem_max = int(stdout)
# third number is max receive/send
max_receive = rmem_values[2]
max_send = wmem_values[2]
# if flags are set, override current values from vm
if FLAGS.tcp_max_receive_buffer:
max_receive = FLAGS.tcp_max_receive_buffer
if FLAGS.tcp_max_send_buffer:
max_send = FLAGS.tcp_max_send_buffer
if FLAGS.rmem_max:
rmem_max = FLAGS.rmem_max
if FLAGS.wmem_max:
wmem_max = FLAGS.wmem_max
# Add values to metadata
self.os_metadata['tcp_max_receive_buffer'] = max_receive
self.os_metadata['tcp_max_send_buffer'] = max_send
self.os_metadata['rmem_max'] = rmem_max
self.os_metadata['wmem_max'] = wmem_max
rmem_string = '{} {} {}'.format(rmem_values[0],
rmem_values[1],
max_receive)
wmem_string = '{} {} {}'.format(wmem_values[0],
wmem_values[1],
max_send)
self._ApplySysctlPersistent({
'net.ipv4.tcp_rmem': rmem_string,
'net.ipv4.tcp_wmem': wmem_string,
'net.core.rmem_max': rmem_max,
'net.core.wmem_max': wmem_max
})
def _RebootIfNecessary(self):
"""Will reboot the VM if self._needs_reboot has been set."""
if self._needs_reboot:
self.Reboot()
self._needs_reboot = False
def TcpCongestionControl(self):
"""Return the congestion control used for tcp."""
try:
resp, _ = self.RemoteCommand(
'cat /proc/sys/net/ipv4/tcp_congestion_control')
return resp.rstrip('\n')
except errors.VirtualMachine.RemoteCommandError:
return 'unknown'
def CheckLsCpu(self):
"""Returns a LsCpuResults from the host VM."""
if not self._lscpu_cache:
lscpu, _ = self.RemoteCommand('lscpu')
self._lscpu_cache = LsCpuResults(lscpu)
return self._lscpu_cache
def CheckProcCpu(self):
"""Returns a ProcCpuResults from the host VM."""
if not self._proccpu_cache:
proccpu, _ = self.RemoteCommand('cat /proc/cpuinfo')
self._proccpu_cache = ProcCpuResults(proccpu)
return self._proccpu_cache
def GetOsInfo(self):
"""Returns information regarding OS type and version."""
stdout, _ = self.RemoteCommand('grep PRETTY_NAME /etc/os-release')
return regex_util.ExtractGroup(OS_PRETTY_NAME_REGEXP, stdout)
@property
def os_info(self):
"""Get distribution-specific information."""
if self.os_metadata.get('os_info'):
return self.os_metadata['os_info']
else:
return self.GetOsInfo()
@property
def kernel_release(self):
"""Return kernel release number."""
if self.os_metadata.get('kernel_release'):
return self.os_metadata.get('kernel_release')
else:
stdout, _ = self.RemoteCommand('uname -r')
return stdout.strip()
@property
def kernel_command_line(self):
"""Return the kernel command line."""
return (self.os_metadata.get('kernel_command_line') or
self.RemoteCommand('cat /proc/cmdline')[0].strip())
@property
def partition_table(self):
"""Return partition table information."""
if not self._partition_table:
cmd = 'sudo fdisk -l'
partition_tables = self.RemoteCommand(cmd)[0]
try:
self._partition_table = {
dev: int(size) for (dev, size) in regex_util.ExtractAllMatches(
r'Disk\s*(.*):[\s\w\.]*,\s(\d*)\sbytes', partition_tables)}
except regex_util.NoMatchError:
# TODO(user): Use alternative methods to retrieve partition table.
logging.warning('Partition table not found with "%s".', cmd)
return self._partition_table
@vm_util.Retry(log_errors=False, poll_interval=1)
def WaitForBootCompletion(self):
"""Waits until the VM has booted."""
# Test for listening on the port first, because this will happen strictly
# first.
if (FLAGS.cluster_boot_test_port_listening and
self.port_listening_time is None):
self.TestConnectRemoteAccessPort()
self.port_listening_time = time.time()
self._WaitForSSH()
if self.bootable_time is None:
self.bootable_time = time.time()
@vm_util.Retry(log_errors=False, poll_interval=1)
def _WaitForSSH(self):
"""Waits until the VM is ready."""
# Always wait for remote host command to succeed, because it is necessary to
# run benchmarks
resp, _ = self.RemoteHostCommand('hostname', retries=1,
suppress_warning=True)
if self.hostname is None:
self.hostname = resp[:-1]
def RecordAdditionalMetadata(self):
"""After the VM has been prepared, store metadata about the VM."""
self.tcp_congestion_control = self.TcpCongestionControl()
lscpu_results = self.CheckLsCpu()
self.numa_node_count = lscpu_results.numa_node_count
self.os_metadata['threads_per_core'] = lscpu_results.threads_per_core
self.os_metadata['os_info'] = self.os_info
self.os_metadata['kernel_release'] = self.kernel_release
self.os_metadata.update(self.partition_table)
if FLAGS.append_kernel_command_line:
self.os_metadata['kernel_command_line'] = self.kernel_command_line
self.os_metadata[
'append_kernel_command_line'] = FLAGS.append_kernel_command_line
@vm_util.Retry(log_errors=False, poll_interval=1)
def VMLastBootTime(self):
"""Returns the time the VM was last rebooted as reported by the VM.
See
https://unix.stackexchange.com/questions/165002/how-to-reliably-get-timestamp-at-which-the-system-booted.
"""
stdout, _ = self.RemoteHostCommand(
'stat -c %z /proc/', retries=1, suppress_warning=True)
if stdout.startswith('1970-01-01'):
# Fix for ARM returning epochtime
date_fmt = '+%Y-%m-%d %H:%M:%S.%s %z'
date_cmd = "grep btime /proc/stat | awk '{print $2}'"
stdout, _ = self.RemoteHostCommand(f'date "{date_fmt}" -d@$({date_cmd})')
return stdout
def SnapshotPackages(self):
"""Grabs a snapshot of the currently installed packages."""
pass
def RestorePackages(self):
"""Restores the currently installed packages to those snapshotted."""
pass
def PackageCleanup(self):
"""Cleans up all installed packages.
Deletes the temp directory, restores packages, and uninstalls all
PerfKit packages.
"""
for package_name in self._installed_packages:
self.Uninstall(package_name)
self.RestorePackages()
self.RemoteCommand('sudo rm -rf %s' % linux_packages.INSTALL_DIR)
def GetPathToConfig(self, package_name):
"""Returns the path to the config file for PerfKit packages.
This function is mostly useful when config files locations
don't match across distributions (such as mysql). Packages don't
need to implement it if this is not the case.
"""
pass
def GetServiceName(self, package_name):
"""Returns the service name of a PerfKit package.
This function is mostly useful when service names don't
match across distributions (such as mongodb). Packages don't
need to implement it if this is not the case.
"""
pass
@vm_util.Retry()
def FormatDisk(self, device_path, disk_type=None):
"""Formats a disk attached to the VM."""
# Some images may automount one local disk, but we don't
# want to fail if this wasn't the case.
if disk.NFS == disk_type:
return
if disk.SMB == disk_type:
return
umount_cmd = '[[ -d /mnt ]] && sudo umount /mnt; '
# TODO(user): Allow custom disk formatting options.
if FLAGS.disk_fs_type == 'xfs':
block_size = FLAGS.disk_block_size or 512
fmt_cmd = ('sudo mkfs.xfs -f -i size={0} {1}'.format(
block_size, device_path))
else:
block_size = FLAGS.disk_block_size or 4096
fmt_cmd = ('sudo mke2fs -F -E lazy_itable_init=0,discard -O '
'^has_journal -t ext4 -b {0} {1}'.format(
block_size, device_path))
self.os_metadata['disk_filesystem_type'] = FLAGS.disk_fs_type
self.os_metadata['disk_filesystem_blocksize'] = block_size
self.RemoteHostCommand(umount_cmd + fmt_cmd)
@vm_util.Retry(
timeout=vm_util.DEFAULT_TIMEOUT,
retryable_exceptions=(errors.VirtualMachine.RemoteCommandError,))
def MountDisk(self,
device_path,
mount_path,
disk_type=None,
mount_options=disk.DEFAULT_MOUNT_OPTIONS,
fstab_options=disk.DEFAULT_FSTAB_OPTIONS):
"""Mounts a formatted disk in the VM."""
mount_options = '-o %s' % mount_options if mount_options else ''
if disk.NFS == disk_type:
mount_options = '-t nfs %s' % mount_options
fs_type = 'nfs'
elif disk.SMB == disk_type:
mount_options = '-t cifs %s' % mount_options
fs_type = 'smb'
else:
fs_type = FLAGS.disk_fs_type
fstab_options = fstab_options or ''
mnt_cmd = ('sudo mkdir -p {mount_path};'
'sudo mount {mount_options} {device_path} {mount_path} && '
'sudo chown $USER:$USER {mount_path};').format(
mount_path=mount_path,
device_path=device_path,
mount_options=mount_options)
self.RemoteHostCommand(mnt_cmd)
# add to /etc/fstab to mount on reboot
mnt_cmd = ('echo "{device_path} {mount_path} {fs_type} {fstab_options}" '
'| sudo tee -a /etc/fstab').format(
device_path=device_path,
mount_path=mount_path,
fs_type=fs_type,
fstab_options=fstab_options)
self.RemoteHostCommand(mnt_cmd)
def LogVmDebugInfo(self):
"""Logs the output of calling dmesg on the VM."""
if FLAGS.log_dmesg:
self.RemoteCommand('hostname && dmesg', should_log=True)
def RemoteCopy(self, file_path, remote_path='', copy_to=True):
self.RemoteHostCopy(file_path, remote_path, copy_to)
def RemoteHostCopy(self, file_path, remote_path='', copy_to=True):
"""Copies a file to or from the VM.
Args:
file_path: Local path to file.
remote_path: Optional path of where to copy file on remote host.
copy_to: True to copy to vm, False to copy from vm.
Raises:
RemoteCommandError: If there was a problem copying the file.
"""
if vm_util.RunningOnWindows():
if ':' in file_path:
# scp doesn't like colons in paths.
file_path = file_path.split(':', 1)[1]
# Replace the last instance of '\' with '/' to make scp happy.
file_path = '/'.join(file_path.rsplit('\\', 1))
remote_ip = '[%s]' % self.GetConnectionIp()
remote_location = '%s@%s:%s' % (
self.user_name, remote_ip, remote_path)
scp_cmd = ['scp', '-P', str(self.ssh_port), '-pr']
# An scp is not retried, so increase the connection timeout.
ssh_private_key = (self.ssh_private_key if self.is_static else
vm_util.GetPrivateKeyPath())
scp_cmd.extend(vm_util.GetSshOptions(
ssh_private_key, connect_timeout=FLAGS.scp_connect_timeout))
if copy_to:
scp_cmd.extend([file_path, remote_location])
else:
scp_cmd.extend([remote_location, file_path])
stdout, stderr, retcode = vm_util.IssueCommand(scp_cmd, timeout=None,
raise_on_failure=False)
if retcode:
full_cmd = ' '.join(scp_cmd)
error_text = ('Got non-zero return code (%s) executing %s\n'
'STDOUT: %sSTDERR: %s' %
(retcode, full_cmd, stdout, stderr))
raise errors.VirtualMachine.RemoteCommandError(error_text)
def RemoteCommand(self, *args, **kwargs):
"""Runs a command on the VM.
Args:
*args: Arguments passed directly to RemoteCommandWithReturnCode.
**kwargs: Keyword arguments passed directly to
RemoteCommandWithReturnCode.
Returns:
A tuple of stdout, stderr from running the command.
Raises:
RemoteCommandError: If there was a problem establishing the connection.
"""
return self.RemoteCommandWithReturnCode(*args, **kwargs)[:2]
def RemoteCommandWithReturnCode(self, *args, **kwargs):
"""Runs a command on the VM.
Args:
*args: Arguments passed directly to RemoteHostCommandWithReturnCode.
**kwargs: Keyword arguments passed directly to
RemoteHostCommandWithReturnCode.
Returns:
A tuple of stdout, stderr, return_code from running the command.
Raises:
RemoteCommandError: If there was a problem establishing the connection.
"""
return self.RemoteHostCommandWithReturnCode(*args, **kwargs)
def RemoteHostCommandWithReturnCode(self,
command,
should_log=False,
retries=None,
ignore_failure=False,
login_shell=False,
suppress_warning=False,
timeout=None):
"""Runs a command on the VM.
This is guaranteed to run on the host VM, whereas RemoteCommand might run
within i.e. a container in the host VM.
Args:
command: A valid bash command.
should_log: A boolean indicating whether the command result should be
logged at the info level. Even if it is false, the results will
still be logged at the debug level.
retries: The maximum number of times RemoteCommand should retry SSHing
when it receives a 255 return code. If None, it defaults to the value
of the flag ssh_retries.
ignore_failure: Ignore any failure if set to true.
login_shell: Run command in a login shell.
suppress_warning: Suppress the result logging from IssueCommand when the
return code is non-zero.
timeout: The timeout for IssueCommand.
Returns:
A tuple of stdout, stderr, return_code from running the command.
Raises:
RemoteCommandError: If there was a problem establishing the connection.
"""
if retries is None:
retries = FLAGS.ssh_retries
if vm_util.RunningOnWindows():
# Multi-line commands passed to ssh won't work on Windows unless the
# newlines are escaped.
command = command.replace('\n', '\\n')
ip_address = self.GetConnectionIp()
user_host = '%s@%s' % (self.user_name, ip_address)
ssh_cmd = ['ssh', '-A', '-p', str(self.ssh_port), user_host]
ssh_private_key = (self.ssh_private_key if self.is_static else
vm_util.GetPrivateKeyPath())
ssh_cmd.extend(vm_util.GetSshOptions(ssh_private_key))
try:
if login_shell:
ssh_cmd.extend(['-t', '-t', 'bash -l -c "%s"' % command])
self._pseudo_tty_lock.acquire()
else:
ssh_cmd.append(command)
for _ in range(retries):
stdout, stderr, retcode = vm_util.IssueCommand(
ssh_cmd, force_info_log=should_log,
suppress_warning=suppress_warning,
timeout=timeout, raise_on_failure=False)
# Retry on 255 because this indicates an SSH failure
if retcode != RETRYABLE_SSH_RETCODE:
break
finally:
if login_shell:
self._pseudo_tty_lock.release()
if retcode:
full_cmd = ' '.join(ssh_cmd)
error_text = ('Got non-zero return code (%s) executing %s\n'
'Full command: %s\nSTDOUT: %sSTDERR: %s' %
(retcode, command, full_cmd, stdout, stderr))
if not ignore_failure:
raise errors.VirtualMachine.RemoteCommandError(error_text)
return (stdout, stderr, retcode)
def RemoteHostCommand(self, *args, **kwargs):
"""Runs a command on the VM.
This is guaranteed to run on the host VM, whereas RemoteCommand might run
within i.e. a container in the host VM.
Args:
*args: Arguments passed directly to RemoteHostCommandWithReturnCode.
**kwargs: Keyword arguments passed directly to
RemoteHostCommandWithReturnCode.
Returns:
A tuple of stdout, stderr from running the command.
Raises:
RemoteCommandError: If there was a problem establishing the connection.
"""
return self.RemoteHostCommandWithReturnCode(*args, **kwargs)[:2]
def _CheckRebootability(self):
if not self.IS_REBOOTABLE:
raise errors.VirtualMachine.VirtualMachineError(
"Trying to reboot a VM that isn't rebootable.")
def _Reboot(self):
"""OS-specific implementation of reboot command."""
self._CheckRebootability()
self.RemoteCommand('sudo reboot', ignore_failure=True)
def _AfterReboot(self):
"""Performs any OS-specific setup on the VM following reboot.
This will be called after every call to Reboot().
"""
# clear out os_info and kernel_release as might have changed
previous_os_info = self.os_metadata.pop('os_info', None)
previous_kernel_release = self.os_metadata.pop('kernel_release', None)
previous_kernel_command = self.os_metadata.pop('kernel_command_line', None)
if previous_os_info or previous_kernel_release or previous_kernel_command:
self.RecordAdditionalMetadata()
if self._lscpu_cache:
self._lscpu_cache = None
self.CheckLsCpu()
if self.install_packages:
self._CreateInstallDir()
self._CreateVmTmpDir()
self._SetTransparentHugepages()
self._has_remote_command_script = False
self._DisableCpus()
def MoveFile(self, target, source_path, remote_path=''):
self.MoveHostFile(target, source_path, remote_path)
def MoveHostFile(self, target, source_path, remote_path=''):
"""Copies a file from one VM to a target VM.
Args:
target: The target BaseVirtualMachine object.
source_path: The location of the file on the REMOTE machine.
remote_path: The destination of the file on the TARGET machine, default
is the home directory.
"""
self.AuthenticateVm()
# TODO(user): For security we may want to include
# -o UserKnownHostsFile=/dev/null in the scp command
# however for the moment, this has happy side effects
# ie: the key is added to know known_hosts which allows
# OpenMPI to operate correctly.
remote_location = '%s@%s:%s' % (
target.user_name, target.ip_address, remote_path)
self.RemoteHostCommand('scp -P %s -o StrictHostKeyChecking=no -i %s %s %s' %
(target.ssh_port, REMOTE_KEY_PATH, source_path,
remote_location))
def AuthenticateVm(self):
"""Authenticate a remote machine to access all peers."""
if not self.is_static and not self.has_private_key:
self.RemoteHostCopy(vm_util.GetPrivateKeyPath(),
REMOTE_KEY_PATH)
self.RemoteCommand(
'echo "Host *\n StrictHostKeyChecking no\n" > ~/.ssh/config')
self.RemoteCommand('chmod 600 ~/.ssh/config')
self.has_private_key = True
def TestAuthentication(self, peer):
"""Tests whether the VM can access its peer.
Raises:
AuthError: If the VM cannot access its peer.
"""
if not self.TryRemoteCommand('ssh %s hostname' % peer.internal_ip):
raise errors.VirtualMachine.AuthError(
'Authentication check failed. If you are running with Static VMs, '
'please make sure that %s can ssh into %s without supplying any '
'arguments except the ip address.' % (self, peer))
def CheckJavaVersion(self):
"""Check the version of java on remote machine.
Returns:
The version of Java installed on remote machine.
"""
version, _ = self.RemoteCommand('java -version 2>&1 >/dev/null | '
'grep version | '
'awk \'{print $3}\'')
return version[:-1]
def RemoveFile(self, filename):
"""Deletes a file on a remote machine.
Args:
filename: Path to the file to delete.
"""
self.RemoteCommand('sudo rm -rf %s' % filename)
def GetDeviceSizeFromPath(self, path):
"""Gets the size of the a drive that contains the path specified.
Args:
path: The function will return the amount of space on the file system
that contains this file name.
Returns:
The size in 1K blocks of the file system containing the file.
"""
df_command = "df -k -P %s | tail -n +2 | awk '{ print $2 }'" % path
stdout, _ = self.RemoteCommand(df_command)
return int(stdout)
def DropCaches(self):
"""Drops the VM's caches."""
drop_caches_command = 'sudo /sbin/sysctl vm.drop_caches=3'
self.RemoteCommand(drop_caches_command)
def _GetNumCpus(self):
"""Returns the number of logical CPUs on the VM.
This method does not cache results (unlike "num_cpus").
"""
stdout, _ = self.RemoteCommand(
'cat /proc/cpuinfo | grep processor | wc -l')
return int(stdout)
def _GetTotalFreeMemoryKb(self):
"""Calculate amount of free memory in KB of the given vm.
Free memory is calculated as sum of free, cached, and buffers
as output from /proc/meminfo.
Args:
vm: vm to check
Returns:
free memory on the vm in KB
"""
stdout, _ = self.RemoteCommand("""
awk '
BEGIN {total =0}
/MemFree:/ {total += $2}
/Cached:/ {total += $2}
/Buffers:/ {total += $2}
END {print total}
' /proc/meminfo
""")
return int(stdout)
def _GetTotalMemoryKb(self):
"""Returns the amount of physical memory on the VM in Kilobytes.
This method does not cache results (unlike "total_memory_kb").
"""
meminfo_command = 'cat /proc/meminfo | grep MemTotal | awk \'{print $2}\''
stdout, _ = self.RemoteCommand(meminfo_command)
return int(stdout)
def _TestReachable(self, ip):
"""Returns True if the VM can reach the ip address and False otherwise."""
return self.TryRemoteCommand('ping -c 1 %s' % ip)
def SetupLocalDisks(self):
"""Performs Linux specific setup of local disks."""
pass
def CreateRamDisk(self, disk_spec):
"""Performs Linux specific setup of ram disk."""
assert disk_spec.mount_point
ramdisk = self.RamDisk(disk_spec)
ramdisk.Mount(self)
self.scratch_disks.append(ramdisk)
class RamDisk(disk.MountableDisk):
"""Linux specific setup of ram disk."""
def Mount(self, vm):
logging.info('Mounting and creating Ram Disk %s, %s',
self.mount_point, self.disk_size)
mnt_cmd = ('sudo mkdir -p {0};sudo mount -t tmpfs -o size={1}g tmpfs {0};'
'sudo chown -R $USER:$USER {0};').format(
self.mount_point, self.disk_size)
vm.RemoteHostCommand(mnt_cmd)
def _CreateScratchDiskFromDisks(self, disk_spec, disks):
"""Helper method to prepare data disks.
Given a list of BaseDisk objects, this will do most of the work creating,
attaching, striping, formatting, and mounting them. If multiple BaseDisk
objects are passed to this method, it will stripe them, combining them
into one 'logical' data disk (it will be treated as a single disk from a
benchmarks perspective). This is intended to be called from within a cloud
specific VM's CreateScratchDisk method.
Args:
disk_spec: The BaseDiskSpec object corresponding to the disk.
disks: A list of the disk(s) to be created, attached, striped,
formatted, and mounted. If there is more than one disk in
the list, then they will be striped together.
"""
if len(disks) > 1:
# If the disk_spec called for a striped disk, create one.
disk_spec.device_path = '/dev/md%d' % len(self.scratch_disks)
data_disk = disk.StripedDisk(disk_spec, disks)
else:
data_disk = disks[0]
self.scratch_disks.append(data_disk)
if data_disk.disk_type != disk.LOCAL:
data_disk.Create()
data_disk.Attach(self)
if data_disk.is_striped:
device_paths = [d.GetDevicePath() for d in data_disk.disks]
self.StripeDisks(device_paths, data_disk.GetDevicePath())
if disk_spec.mount_point:
if isinstance(data_disk, disk.MountableDisk):
data_disk.Mount(self)
else:
self.FormatDisk(data_disk.GetDevicePath(), disk_spec.disk_type)
self.MountDisk(data_disk.GetDevicePath(), disk_spec.mount_point,
disk_spec.disk_type, data_disk.mount_options,
data_disk.fstab_options)
def StripeDisks(self, devices, striped_device):
"""Raids disks together using mdadm.
Args:
devices: A list of device paths that should be striped together.
striped_device: The path to the device that will be created.
"""
self.Install('mdadm')
stripe_cmd = ('yes | sudo mdadm --create %s --level=stripe --raid-devices='
'%s %s' % (striped_device, len(devices), ' '.join(devices)))
self.RemoteHostCommand(stripe_cmd)
# Save the RAID layout on the disk
cmd = ('sudo mdadm --detail --scan | ' +
'sudo tee -a /etc/mdadm/mdadm.conf')
self.RemoteHostCommand(cmd)
# Make the disk available during reboot
cmd = 'sudo update-initramfs -u'
self.RemoteHostCommand(cmd)
# Automatically mount the disk after reboot
cmd = ('echo \'/dev/md0 /mnt/md0 ext4 defaults,nofail'
',discard 0 0\' | sudo tee -a /etc/fstab')
self.RemoteHostCommand(cmd)
def BurnCpu(self, burn_cpu_threads=None, burn_cpu_seconds=None):
"""Burns vm cpu for some amount of time and dirty cache.
Args:
burn_cpu_threads: Number of threads to burn cpu.
burn_cpu_seconds: Amount of time in seconds to burn cpu.
"""
burn_cpu_threads = burn_cpu_threads or FLAGS.burn_cpu_threads
burn_cpu_seconds = burn_cpu_seconds or FLAGS.burn_cpu_seconds
if burn_cpu_seconds:
self.Install('sysbench')
end_time = time.time() + burn_cpu_seconds
self.RemoteCommand(
'nohup sysbench --num-threads=%s --test=cpu --cpu-max-prime=10000000 '
'run 1> /dev/null 2> /dev/null &' % burn_cpu_threads)
if time.time() < end_time:
time.sleep(end_time - time.time())
self.RemoteCommand('pkill -9 sysbench')
def SetSmpAffinity(self):
"""Set SMP IRQ affinity."""
if self._smp_affinity_script:
self.PushDataFile(self._smp_affinity_script)
self.RemoteCommand('sudo bash %s' % self._smp_affinity_script)
else:
raise NotImplementedError()
def SetReadAhead(self, num_sectors, devices):
"""Set read-ahead value for block devices.
Args:
num_sectors: int. Number of sectors of read ahead.
devices: list of strings. A list of block devices.
"""
self.RemoteCommand(
'sudo blockdev --setra {0} {1}; sudo blockdev --setfra {0} {1};'.format(
num_sectors, ' '.join(devices)))
def GetSha256sum(self, path, filename):
"""Gets the sha256sum hash for a filename in a path on the VM.
Args:
path: string; Path on the VM.
filename: string; Name of the file in the path.
Returns:
string; The sha256sum hash.
"""
stdout, _ = self.RemoteCommand(
'sha256sum %s' % posixpath.join(path, filename))
sha256sum, _ = stdout.split()
return sha256sum
def _GetSmbService(self):
"""Returns the SmbService created in the benchmark spec.
Before calling this method check that the disk.disk_type is equal to
disk.SMB or else an exception will be raised.
Returns:
The smb_service.BaseSmbService service for this cloud.
Raises:
CreationError: If no SMB service was created.
"""
smb = getattr(context.GetThreadBenchmarkSpec(), 'smb_service')
if smb is None:
raise errors.Resource.CreationError('No SMB Service created')
return smb
def AppendKernelCommandLine(self, command_line, reboot=True):
"""Appends the provided command-line to the VM and reboots by default.
This method should be overwritten by the desired Linux flavor to be useful.
Most (all?) Linux flavors modify the kernel command line by updating the
GRUB configuration files and rebooting.
Args:
command_line: The string to append to the kernel command line.
reboot: Whether or not to reboot to have the change take effect.
"""
raise NotImplementedError(
'Kernel command-line appending for given Linux flavor not implemented.')
def _DoAppendKernelCommandLine(self):
"""If the flag is set, attempts to append the provided kernel command line.
In addition, to consolidate reboots during VM prepare, this method sets the
needs reboot bit instead of immediately rebooting.
"""
if FLAGS.disable_smt and self.CheckLsCpu().threads_per_core != 1:
FLAGS.append_kernel_command_line = ' '.join(
(FLAGS.append_kernel_command_line,
'nosmt')) if FLAGS.append_kernel_command_line else 'nosmt'
if FLAGS.append_kernel_command_line:
self.AppendKernelCommandLine(
FLAGS.append_kernel_command_line, reboot=False)
self._needs_reboot = True
@abc.abstractmethod
def InstallPackages(self, packages: str) -> None:
"""Installs packages using the OS's package manager."""
pass
def _IsSmtEnabled(self):
"""Whether simultaneous multithreading (SMT) is enabled on the vm.
Looks for the "nosmt" attribute in the booted linux kernel command line
parameters.
Returns:
Whether SMT is enabled on the vm.
"""
return not bool(re.search(r'\bnosmt\b', self.kernel_command_line))
@property
def cpu_vulnerabilities(self) -> CpuVulnerabilities:
"""Returns a CpuVulnerabilities of CPU vulnerabilities.
Output of "grep . .../cpu/vulnerabilities/*" looks like this:
/sys/devices/system/cpu/vulnerabilities/itlb_multihit:KVM: Vulnerable
/sys/devices/system/cpu/vulnerabilities/l1tf:Mitigation: PTE Inversion
Which gets turned into
CpuVulnerabilities(vulnerabilities={'itlb_multihit': 'KVM'},
mitigations= {'l1tf': 'PTE Inversion'})
"""
text, _ = self.RemoteCommand(
'sudo grep . /sys/devices/system/cpu/vulnerabilities/*',
ignore_failure=True)
vuln = CpuVulnerabilities()
if not text:
logging.warning('No text response when getting CPU vulnerabilities')
return vuln
for line in text.splitlines():
vuln.AddLine(line)
return vuln
class ClearMixin(BaseLinuxMixin):
"""Class holding Clear Linux specific VM methods and attributes."""
OS_TYPE = os_types.CLEAR
BASE_OS_TYPE = os_types.CLEAR
PYTHON_2_PACKAGE = 'python-basic'
def OnStartup(self):
"""Eliminates the need to have a tty to run sudo commands."""
super(ClearMixin, self).OnStartup()
self.RemoteHostCommand('sudo swupd autoupdate --disable')
self.RemoteHostCommand('sudo mkdir -p /etc/sudoers.d')
self.RemoteHostCommand('echo \'Defaults:{0} !requiretty\' | '
'sudo tee /etc/sudoers.d/pkb'.format(self.user_name),
login_shell=True)
def PackageCleanup(self):
"""Cleans up all installed packages.
Performs the normal package cleanup, then deletes the file
added to the /etc/sudoers.d directory during startup.
"""
super(ClearMixin, self).PackageCleanup()
self.RemoteCommand('sudo rm /etc/sudoers.d/pkb')
def SnapshotPackages(self):
"""See base class."""
self.RemoteCommand('sudo swupd bundle-list > {0}/bundle_list'.format(
linux_packages.INSTALL_DIR))
def RestorePackages(self):
"""See base class."""
self.RemoteCommand(
'sudo swupd bundle-list | grep --fixed-strings --line-regexp --invert-match --file '
'{0}/bundle_list | xargs --no-run-if-empty sudo swupd bundle-remove'
.format(linux_packages.INSTALL_DIR),
ignore_failure=True)
def HasPackage(self, package):
"""Returns True iff the package is available for installation."""
return self.TryRemoteCommand(
'sudo swupd bundle-list --all | grep {0}'.format(package),
suppress_warning=True)
def InstallPackages(self, packages: str) -> None:
"""Installs packages using the swupd bundle manager."""
self.RemoteCommand('sudo swupd bundle-add {0}'.format(packages))
def Install(self, package_name):
"""Installs a PerfKit package on the VM."""
if not self.install_packages:
return
if package_name not in self._installed_packages:
package = linux_packages.PACKAGES[package_name]
if hasattr(package, 'SwupdInstall'):
package.SwupdInstall(self)
elif hasattr(package, 'Install'):
package.Install(self)
else:
raise KeyError(
'Package {0} has no install method for Clear Linux.'.format(
package_name))
self._installed_packages.add(package_name)
def Uninstall(self, package_name):
"""Uninstalls a PerfKit package on the VM."""
package = linux_packages.PACKAGES[package_name]
if hasattr(package, 'SwupdUninstall'):
package.SwupdUninstall(self)
elif hasattr(package, 'Uninstall'):
package.Uninstall(self)
def GetPathToConfig(self, package_name):
"""See base class."""
package = linux_packages.PACKAGES[package_name]
return package.SwupdGetPathToConfig(self)
def GetServiceName(self, package_name):
"""See base class."""
package = linux_packages.PACKAGES[package_name]
return package.SwupdGetServiceName(self)
def GetOsInfo(self):
"""See base class."""
stdout, _ = self.RemoteCommand('swupd info | grep Installed')
return 'Clear Linux build: {0}'.format(
regex_util.ExtractGroup(CLEAR_BUILD_REGEXP, stdout))
def SetupProxy(self):
"""Sets up proxy configuration variables for the cloud environment."""
super(ClearMixin, self).SetupProxy()
profile_file = '/etc/profile'
commands = []
if FLAGS.http_proxy:
commands.append("echo 'export http_proxy=%s' | sudo tee -a %s" % (
FLAGS.http_proxy, profile_file))
if FLAGS.https_proxy:
commands.append("echo 'https_proxy=%s' | sudo tee -a %s" % (
FLAGS.https_proxy, profile_file))
if FLAGS.ftp_proxy:
commands.append("echo 'ftp_proxy=%s' | sudo tee -a %s" % (
FLAGS.ftp_proxy, profile_file))
if FLAGS.no_proxy:
commands.append("echo 'export no_proxy=%s' | sudo tee -a %s" % (
FLAGS.no_proxy, profile_file))
if commands:
self.RemoteCommand(';'.join(commands))
def RemoteCommand(self, command, **kwargs):
"""Runs a command inside the container.
Args:
command: Arguments passed directly to RemoteHostCommandWithReturnCode.
**kwargs: Keyword arguments passed directly to
RemoteHostCommandWithReturnCode.
Returns:
A tuple of stdout and stderr from running the command.
"""
# Escapes bash sequences
command = '. /etc/profile; %s' % (command)
return self.RemoteHostCommand(command, **kwargs)[:2]
class BaseContainerLinuxMixin(BaseLinuxMixin):
"""Class holding VM methods for minimal container-based OSes like Core OS.
These operating systems have SSH like other Linux OSes, but no package manager
to run Linux benchmarks without Docker.
Because they cannot install packages, they only support VM life cycle
benchmarks like cluster_boot.
"""
def InstallPackages(self, package_name):
raise NotImplementedError('Container OSes have no package managers.')
def HasPackage(self, package: str) -> bool:
return False
# Install could theoretically be supported. A hermetic architecture
# appropriate binary could be copied into the VM and run.
# However because curl, wget, and object store clients cannot be installed and
# may or may not be present, copying the binary is non-trivial so simply
# block trying.
def Install(self, package_name):
raise NotImplementedError('Container OSes have no package managers.')
def Uninstall(self, package_name):
raise NotImplementedError('Container OSes have no package managers.')
def PrepareVMEnvironment(self):
# Don't try to install packages as normal, because it will fail.
pass
class BaseRhelMixin(BaseLinuxMixin):
"""Class holding RHEL/CentOS specific VM methods and attributes."""
# OS_TYPE = os_types.RHEL
BASE_OS_TYPE = os_types.RHEL
def OnStartup(self):
"""Eliminates the need to have a tty to run sudo commands."""
super(BaseRhelMixin, self).OnStartup()
self.RemoteHostCommand('echo \'Defaults:%s !requiretty\' | '
'sudo tee /etc/sudoers.d/pkb' % self.user_name,
login_shell=True)
if FLAGS.gce_hpc_tools:
self.InstallGcpHpcTools()
if _DISABLE_YUM_CRON.value:
# yum cron can stall causing yum commands to hang
self.RemoteHostCommand('sudo systemctl disable yum-cron.service',
ignore_failure=True)
def InstallGcpHpcTools(self):
"""Installs the GCP HPC tools."""
self.Install('gce_hpc_tools')
def InstallEpelRepo(self):
"""Installs the Extra Packages for Enterprise Linux repository."""
self.Install('epel_release')
def PackageCleanup(self):
"""Cleans up all installed packages.
Performs the normal package cleanup, then deletes the file
added to the /etc/sudoers.d directory during startup.
"""
super(BaseRhelMixin, self).PackageCleanup()
self.RemoteCommand('sudo rm /etc/sudoers.d/pkb')
def SnapshotPackages(self):
"""Grabs a snapshot of the currently installed packages."""
self.RemoteCommand('rpm -qa > %s/rpm_package_list'
% linux_packages.INSTALL_DIR)
def RestorePackages(self):
"""Restores the currently installed packages to those snapshotted."""
self.RemoteCommand(
'rpm -qa | grep --fixed-strings --line-regexp --invert-match --file '
'%s/rpm_package_list | xargs --no-run-if-empty sudo rpm -e' %
linux_packages.INSTALL_DIR,
ignore_failure=True)
def HasPackage(self, package):
"""Returns True iff the package is available for installation."""
return self.TryRemoteCommand('sudo yum info %s' % package,
suppress_warning=True)
# yum talks to the network on each request so transient issues may fix
# themselves on retry
@vm_util.Retry(max_retries=UPDATE_RETRIES)
def InstallPackages(self, packages):
"""Installs packages using the yum package manager."""
self.RemoteCommand('sudo yum install -y %s' % packages)
@vm_util.Retry()
def InstallPackageGroup(self, package_group):
"""Installs a 'package group' using the yum package manager."""
self.RemoteCommand('sudo yum groupinstall -y "%s"' % package_group)
def Install(self, package_name):
"""Installs a PerfKit package on the VM."""
if not self.install_packages:
return
if package_name not in self._installed_packages:
package = linux_packages.PACKAGES[package_name]
if hasattr(package, 'YumInstall'):
package.YumInstall(self)
elif hasattr(package, 'Install'):
package.Install(self)
else:
raise KeyError('Package %s has no install method for RHEL.' %
package_name)
self._installed_packages.add(package_name)
def Uninstall(self, package_name):
"""Uninstalls a PerfKit package on the VM."""
package = linux_packages.PACKAGES[package_name]
if hasattr(package, 'YumUninstall'):
package.YumUninstall(self)
elif hasattr(package, 'Uninstall'):
package.Uninstall(self)
def GetPathToConfig(self, package_name):
"""Returns the path to the config file for PerfKit packages.
This function is mostly useful when config files locations
don't match across distributions (such as mysql). Packages don't
need to implement it if this is not the case.
"""
package = linux_packages.PACKAGES[package_name]
return package.YumGetPathToConfig(self)
def GetServiceName(self, package_name):
"""Returns the service name of a PerfKit package.
This function is mostly useful when service names don't
match across distributions (such as mongodb). Packages don't
need to implement it if this is not the case.
"""
package = linux_packages.PACKAGES[package_name]
return package.YumGetServiceName(self)
def SetupProxy(self):
"""Sets up proxy configuration variables for the cloud environment."""
super(BaseRhelMixin, self).SetupProxy()
yum_proxy_file = '/etc/yum.conf'
if FLAGS.http_proxy:
self.RemoteCommand("echo -e 'proxy= %s' | sudo tee -a %s" % (
FLAGS.http_proxy, yum_proxy_file))
def AppendKernelCommandLine(self, command_line, reboot=True):
"""Appends the provided command-line to the VM and reboots by default."""
self.RemoteCommand(
r'echo GRUB_CMDLINE_LINUX_DEFAULT=\"\${GRUB_CMDLINE_LINUX_DEFAULT} %s\"'
' | sudo tee -a /etc/default/grub' % command_line)
self.RemoteCommand('sudo grub2-mkconfig -o /boot/grub2/grub.cfg')
self.RemoteCommand('sudo grub2-mkconfig -o /etc/grub2.cfg')
if reboot:
self.Reboot()
class AmazonLinux2Mixin(BaseRhelMixin):
"""Class holding Amazon Linux 2 VM methods and attributes."""
OS_TYPE = os_types.AMAZONLINUX2
class Rhel7Mixin(BaseRhelMixin):
"""Class holding RHEL 7 specific VM methods and attributes."""
OS_TYPE = os_types.RHEL7
class Rhel8Mixin(BaseRhelMixin):
"""Class holding RHEL 8 specific VM methods and attributes."""
OS_TYPE = os_types.RHEL8
class CentOs7Mixin(BaseRhelMixin):
"""Class holding CentOS 7 specific VM methods and attributes."""
OS_TYPE = os_types.CENTOS7
class CentOs8Mixin(BaseRhelMixin):
"""Class holding CentOS 8 specific VM methods and attributes."""
OS_TYPE = os_types.CENTOS8
class ContainerOptimizedOsMixin(BaseContainerLinuxMixin):
"""Class holding COS specific VM methods and attributes."""
OS_TYPE = os_types.COS
BASE_OS_TYPE = os_types.CORE_OS
def PrepareVMEnvironment(self):
super(ContainerOptimizedOsMixin, self).PrepareVMEnvironment()
# COS mounts /home and /tmp with -o noexec, which blocks running benchmark
# binaries.
# TODO(user): Support reboots
self.RemoteCommand('sudo mount -o remount,exec /home')
self.RemoteCommand('sudo mount -o remount,exec /tmp')
class CoreOsMixin(BaseContainerLinuxMixin):
"""Class holding CoreOS Container Linux specific VM methods and attributes."""
OS_TYPE = os_types.CORE_OS
BASE_OS_TYPE = os_types.CORE_OS
class BaseDebianMixin(BaseLinuxMixin):
"""Class holding Debian specific VM methods and attributes."""
OS_TYPE = 'base-only'
BASE_OS_TYPE = os_types.DEBIAN
def __init__(self, *args, **kwargs):
super(BaseDebianMixin, self).__init__(*args, **kwargs)
# Whether or not apt-get update has been called.
# We defer running apt-get update until the first request to install a
# package.
self._apt_updated = False
@vm_util.Retry(max_retries=UPDATE_RETRIES)
def AptUpdate(self):
"""Updates the package lists on VMs using apt."""
try:
# setting the timeout on the apt-get to 5 minutes because
# it is known to get stuck. In a normal update this
# takes less than 30 seconds.
self.RemoteCommand('sudo apt-get update', timeout=300)
except errors.VirtualMachine.RemoteCommandError as e:
# If there is a problem, remove the lists in order to get rid of
# "Hash Sum mismatch" errors (the files will be restored when
# apt-get update is run again).
self.RemoteCommand('sudo rm -r /var/lib/apt/lists/*')
raise e
def SnapshotPackages(self):
"""Grabs a snapshot of the currently installed packages."""
self.RemoteCommand(
'dpkg --get-selections > %s/dpkg_selections'
% linux_packages.INSTALL_DIR)
def RestorePackages(self):
"""Restores the currently installed packages to those snapshotted."""
self.RemoteCommand('sudo dpkg --clear-selections')
self.RemoteCommand(
'sudo dpkg --set-selections < %s/dpkg_selections'
% linux_packages.INSTALL_DIR)
self.RemoteCommand('sudo DEBIAN_FRONTEND=\'noninteractive\' '
'apt-get --purge -y dselect-upgrade')
def HasPackage(self, package):
"""Returns True iff the package is available for installation."""
return self.TryRemoteCommand('apt-get install --just-print %s' % package,
suppress_warning=True)
@vm_util.Retry()
def InstallPackages(self, packages):
"""Installs packages using the apt package manager."""
if not self._apt_updated:
self.AptUpdate()
self._apt_updated = True
try:
install_command = ('sudo DEBIAN_FRONTEND=\'noninteractive\' '
'/usr/bin/apt-get -y install %s' % (packages))
self.RemoteCommand(install_command)
except errors.VirtualMachine.RemoteCommandError as e:
# TODO(user): Remove code below after Azure fix their package repository,
# or add code to recover the sources.list
self.RemoteCommand(
'sudo sed -i.bk "s/azure.archive.ubuntu.com/archive.ubuntu.com/g" '
'/etc/apt/sources.list')
logging.info('Installing "%s" failed on %s. This may be transient. '
'Updating package list.', packages, self)
self.AptUpdate()
raise e
def Install(self, package_name):
"""Installs a PerfKit package on the VM."""
if not self.install_packages:
return
if not self._apt_updated:
self.AptUpdate()
self._apt_updated = True
if package_name not in self._installed_packages:
package = linux_packages.PACKAGES[package_name]
if hasattr(package, 'AptInstall'):
package.AptInstall(self)
elif hasattr(package, 'Install'):
package.Install(self)
else:
raise KeyError('Package %s has no install method for Debian.' %
package_name)
self._installed_packages.add(package_name)
def Uninstall(self, package_name):
"""Uninstalls a PerfKit package on the VM."""
package = linux_packages.PACKAGES[package_name]
if hasattr(package, 'AptUninstall'):
package.AptUninstall(self)
elif hasattr(package, 'Uninstall'):
package.Uninstall(self)
self._installed_packages.discard(package_name)
def GetPathToConfig(self, package_name):
"""Returns the path to the config file for PerfKit packages.
This function is mostly useful when config files locations
don't match across distributions (such as mysql). Packages don't
need to implement it if this is not the case.
Args:
package_name: the name of the package.
"""
package = linux_packages.PACKAGES[package_name]
return package.AptGetPathToConfig(self)
def GetServiceName(self, package_name):
"""Returns the service name of a PerfKit package.
This function is mostly useful when service names don't
match across distributions (such as mongodb). Packages don't
need to implement it if this is not the case.
Args:
package_name: the name of the package.
"""
package = linux_packages.PACKAGES[package_name]
return package.AptGetServiceName(self)
def SetupProxy(self):
"""Sets up proxy configuration variables for the cloud environment."""
super(BaseDebianMixin, self).SetupProxy()
apt_proxy_file = '/etc/apt/apt.conf'
commands = []
if FLAGS.http_proxy:
commands.append("echo -e 'Acquire::http::proxy \"%s\";' |"
'sudo tee -a %s' % (FLAGS.http_proxy, apt_proxy_file))
if FLAGS.https_proxy:
commands.append("echo -e 'Acquire::https::proxy \"%s\";' |"
'sudo tee -a %s' % (FLAGS.https_proxy, apt_proxy_file))
if commands:
self.RemoteCommand(';'.join(commands))
def IncreaseSSHConnection(self, target):
"""Increase maximum number of ssh connections on vm.
Args:
target: int. The max number of ssh connection.
"""
self.RemoteCommand(r'sudo sed -i -e "s/.*MaxStartups.*/MaxStartups {0}/" '
'/etc/ssh/sshd_config'.format(target))
self.RemoteCommand('sudo service ssh restart')
def AppendKernelCommandLine(self, command_line, reboot=True):
"""Appends the provided command-line to the VM and reboots by default."""
self.RemoteCommand(
r'echo GRUB_CMDLINE_LINUX_DEFAULT=\"\${GRUB_CMDLINE_LINUX_DEFAULT} %s\"'
r' | sudo tee -a /etc/default/grub' % command_line)
self.RemoteCommand('sudo update-grub')
if reboot:
self.Reboot()
class Debian9Mixin(BaseDebianMixin):
"""Class holding Debian9 specific VM methods and attributes."""
OS_TYPE = os_types.DEBIAN9
# https://packages.debian.org/stretch/python
PYTHON_2_PACKAGE = 'python'
class Debian10Mixin(BaseDebianMixin):
"""Class holding Debian 10 specific VM methods and attributes."""
OS_TYPE = os_types.DEBIAN10
class Debian11Mixin(BaseDebianMixin):
"""Class holding Debian 11 specific VM methods and attributes."""
OS_TYPE = os_types.DEBIAN11
def PrepareVMEnvironment(self):
# Missing in some images. Required by PrepareVMEnvironment to determine
# partitioning.
self.InstallPackages('fdisk')
super().PrepareVMEnvironment()
class BaseUbuntuMixin(BaseDebianMixin):
"""Class holding Ubuntu specific VM methods and attributes."""
def AppendKernelCommandLine(self, command_line, reboot=True):
"""Appends the provided command-line to the VM and reboots by default."""
self.RemoteCommand(
r'echo GRUB_CMDLINE_LINUX_DEFAULT=\"\${GRUB_CMDLINE_LINUX_DEFAULT} %s\"'
r' | sudo tee -a /etc/default/grub.d/50-cloudimg-settings.cfg' %
command_line)
self.RemoteCommand('sudo update-grub')
if reboot:
self.Reboot()
class Ubuntu1604Mixin(BaseUbuntuMixin, virtual_machine.DeprecatedOsMixin):
"""Class holding Ubuntu1604 specific VM methods and attributes."""
OS_TYPE = os_types.UBUNTU1604
PYTHON_2_PACKAGE = 'python'
END_OF_LIFE = '2021-05-01'
ALTERNATIVE_OS = os_types.UBUNTU1804
class Ubuntu1804Mixin(BaseUbuntuMixin):
"""Class holding Ubuntu1804 specific VM methods and attributes."""
OS_TYPE = os_types.UBUNTU1804
# https://packages.ubuntu.com/bionic/python
PYTHON_2_PACKAGE = 'python'
def UpdateEnvironmentPath(self):
"""Add /snap/bin to default search path for Ubuntu1804.
See https://bugs.launchpad.net/snappy/+bug/1659719.
"""
self.RemoteCommand(
r'sudo sed -i "1 i\export PATH=$PATH:/snap/bin" ~/.bashrc')
self.RemoteCommand(
r'sudo sed -i "1 i\export PATH=$PATH:/snap/bin" /etc/bash.bashrc')
# Inherit Ubuntu 18's idiosyncracies.
# Note https://bugs.launchpad.net/snappy/+bug/1659719 is also marked not fix in
# focal.
class Ubuntu2004Mixin(Ubuntu1804Mixin):
"""Class holding Ubuntu2004 specific VM methods and attributes."""
OS_TYPE = os_types.UBUNTU2004
# https://packages.ubuntu.com/focal/python2
PYTHON_2_PACKAGE = 'python2'
class Ubuntu1604Cuda9Mixin(Ubuntu1604Mixin):
"""Class holding NVIDIA CUDA specific VM methods and attributes."""
OS_TYPE = os_types.UBUNTU1604_CUDA9
class ContainerizedDebianMixin(BaseDebianMixin):
"""Class representing a Containerized Virtual Machine.
A Containerized Virtual Machine is a VM that runs remote commands
within a Docker Container.
Any call to RemoteCommand() will be run within the container
whereas any call to RemoteHostCommand() will be run in the VM itself.
"""
OS_TYPE = os_types.UBUNTU_CONTAINER
BASE_DOCKER_IMAGE = 'ubuntu:xenial'
def __init__(self, *args, **kwargs):
super(ContainerizedDebianMixin, self).__init__(*args, **kwargs)
self.docker_id = None
def _CheckDockerExists(self):
"""Returns whether docker is installed or not."""
resp, _ = self.RemoteHostCommand('command -v docker', ignore_failure=True,
suppress_warning=True)
if resp.rstrip() == '':
return False
return True
def PrepareVMEnvironment(self):
"""Initializes docker before proceeding with preparation."""
if not self._CheckDockerExists():
self.Install('docker')
# We need to explicitly create VM_TMP_DIR in the host because
# otherwise it will be implicitly created by Docker in InitDocker()
# (because of the -v option) and owned by root instead of perfkit,
# causing permission problems.
self.RemoteHostCommand('mkdir -p %s' % vm_util.VM_TMP_DIR)
self.InitDocker()
# This will create the VM_TMP_DIR in the container.
# Has to be done after InitDocker() because it needs docker_id.
self._CreateVmTmpDir()
super(ContainerizedDebianMixin, self).PrepareVMEnvironment()
def InitDocker(self):
"""Initializes the docker container daemon."""
init_docker_cmd = ['sudo docker run -d '
'--rm '
'--net=host '
'--workdir=%s '
'-v %s:%s ' % (CONTAINER_WORK_DIR,
vm_util.VM_TMP_DIR,
CONTAINER_MOUNT_DIR)]
for sd in self.scratch_disks:
init_docker_cmd.append('-v %s:%s ' % (sd.mount_point, sd.mount_point))
init_docker_cmd.append('%s sleep infinity ' % self.BASE_DOCKER_IMAGE)
init_docker_cmd = ''.join(init_docker_cmd)
resp, _ = self.RemoteHostCommand(init_docker_cmd)
self.docker_id = resp.rstrip()
return self.docker_id
def RemoteCommand(self, command, **kwargs):
"""Runs a command inside the container.
Args:
command: A valid bash command.
**kwargs: Keyword arguments passed directly to RemoteHostCommand.
Returns:
A tuple of stdout and stderr from running the command.
"""
# Escapes bash sequences
command = command.replace("'", r"'\''")
logging.info('Docker running: %s', command)
command = "sudo docker exec %s bash -c '%s'" % (self.docker_id, command)
return self.RemoteHostCommand(command, **kwargs)
def ContainerCopy(self, file_name, container_path='', copy_to=True):
"""Copies a file to or from container_path to the host's vm_util.VM_TMP_DIR.
Args:
file_name: Name of the file in the host's vm_util.VM_TMP_DIR.
container_path: Optional path of where to copy file on container.
copy_to: True to copy to container, False to copy from container.
Raises:
RemoteExceptionError: If the source container_path is blank.
"""
if copy_to:
if container_path == '':
container_path = CONTAINER_WORK_DIR
# Everything in vm_util.VM_TMP_DIR is directly accessible
# both in the host and in the container
source_path = posixpath.join(CONTAINER_MOUNT_DIR, file_name)
command = 'cp %s %s' % (source_path, container_path)
self.RemoteCommand(command)
else:
if container_path == '':
raise errors.VirtualMachine.RemoteExceptionError('Cannot copy '
'from blank target')
destination_path = posixpath.join(CONTAINER_MOUNT_DIR, file_name)
command = 'cp %s %s' % (container_path, destination_path)
self.RemoteCommand(command)
@vm_util.Retry(
poll_interval=1, max_retries=3,
retryable_exceptions=(errors.VirtualMachine.RemoteCommandError,))
def RemoteCopy(self, file_path, remote_path='', copy_to=True):
"""Copies a file to or from the container in the remote VM.
Args:
file_path: Local path to file.
remote_path: Optional path of where to copy file inside the container.
copy_to: True to copy to VM, False to copy from VM.
"""
if copy_to:
file_name = os.path.basename(file_path)
tmp_path = posixpath.join(vm_util.VM_TMP_DIR, file_name)
self.RemoteHostCopy(file_path, tmp_path, copy_to)
self.ContainerCopy(file_name, remote_path, copy_to)
else:
file_name = posixpath.basename(remote_path)
tmp_path = posixpath.join(vm_util.VM_TMP_DIR, file_name)
self.ContainerCopy(file_name, remote_path, copy_to)
self.RemoteHostCopy(file_path, tmp_path, copy_to)
def MoveFile(self, target, source_path, remote_path=''):
"""Copies a file from one VM to a target VM.
Copies a file from a container in the source VM to a container
in the target VM.
Args:
target: The target ContainerizedVirtualMachine object.
source_path: The location of the file on the REMOTE machine.
remote_path: The destination of the file on the TARGET machine, default
is the root directory.
"""
file_name = posixpath.basename(source_path)
# Copies the file to vm_util.VM_TMP_DIR in source
self.ContainerCopy(file_name, source_path, copy_to=False)
# Moves the file to vm_util.VM_TMP_DIR in target
source_host_path = posixpath.join(vm_util.VM_TMP_DIR, file_name)
target_host_dir = vm_util.VM_TMP_DIR
self.MoveHostFile(target, source_host_path, target_host_dir)
# Copies the file to its final destination in the container
target.ContainerCopy(file_name, remote_path)
def SnapshotPackages(self):
"""Grabs a snapshot of the currently installed packages."""
pass
def PackageCleanup(self):
"""Cleans up all installed packages.
Stop the docker container launched with --rm.
"""
if self.docker_id:
self.RemoteHostCommand('docker stop %s' % self.docker_id)
class KernelRelease(object):
"""Holds the contents of the linux kernel version returned from uname -r."""
def __init__(self, uname):
"""KernelVersion Constructor.
Args:
uname: A string in the format of "uname -r" command
"""
# example format would be: "4.5.0-96-generic"
# or "3.10.0-514.26.2.el7.x86_64" for centos
# major.minor.Rest
# in this example, major = 4, minor = 5
major_string, minor_string, _ = uname.split('.', 2)
self.major = int(major_string)
self.minor = int(minor_string)
def AtLeast(self, major, minor):
"""Check If the kernel version meets a minimum bar.
The kernel version needs to be at least as high as the major.minor
specified in args.
Args:
major: The major number to test, as an integer
minor: The minor number to test, as an integer
Returns:
True if the kernel version is at least as high as major.minor,
False otherwise
"""
if self.major < major:
return False
if self.major > major:
return True
return self.minor >= minor
def _ParseTextProperties(text):
"""Parses raw text that has lines in "key:value" form.
When comes across an empty line will return a dict of the current values.
Args:
text: Text of lines in "key:value" form.
Yields:
Dict of [key,value] values for a section.
"""
current_data = {}
for line in (line.strip() for line in text.splitlines()):
if line:
m = _COLON_SEPARATED_RE.match(line)
if m:
current_data[m.group('key')] = m.group('value')
else:
logging.debug('Ignoring bad line "%s"', line)
else:
# Hit a section break
if current_data:
yield current_data
current_data = {}
if current_data:
yield current_data
class LsCpuResults(object):
"""Holds the contents of the command lscpu."""
def __init__(self, lscpu):
"""LsCpuResults Constructor.
The lscpu command on Ubuntu 16.04 does *not* have the "--json" option for
json output, so keep on using the text format.
Args:
lscpu: A string in the format of "lscpu" command
Raises:
ValueError: if the format of lscpu isnt what was expected for parsing
Example value of lscpu is:
Architecture: x86_64
CPU op-mode(s): 32-bit, 64-bit
Byte Order: Little Endian
CPU(s): 12
On-line CPU(s) list: 0-11
Thread(s) per core: 2
Core(s) per socket: 6
Socket(s): 1
NUMA node(s): 1
Vendor ID: GenuineIntel
CPU family: 6
Model: 79
Stepping: 1
CPU MHz: 1202.484
BogoMIPS: 7184.10
Virtualization: VT-x
L1d cache: 32K
L1i cache: 32K
L2 cache: 256K
L3 cache: 15360K
NUMA node0 CPU(s): 0-11
"""
self.data = {}
for stanza in _ParseTextProperties(lscpu):
self.data.update(stanza)
def GetInt(key):
if key in self.data and self.data[key].isdigit():
return int(self.data[key])
raise ValueError('Could not find integer "{}" in {}'.format(
key, sorted(self.data)))
self.numa_node_count = GetInt('NUMA node(s)')
self.cores_per_socket = GetInt('Core(s) per socket')
self.socket_count = GetInt('Socket(s)')
self.threads_per_core = GetInt('Thread(s) per core')
class ProcCpuResults(object):
"""Parses /proc/cpuinfo text into grouped values.
Most of the cpuinfo is repeated per processor. Known ones that change per
processor are listed in _PER_CPU_KEYS and are processed separately to make
reporting easier.
Example metadata for metric='proccpu':
|bugs:spec_store_bypass spectre_v1 spectre_v2 swapgs|,
|cache size:25344 KB|
Example metadata for metric='proccpu_mapping':
|proc_0:apicid=0;core id=0;initial apicid=0;physical id=0|,
|proc_1:apicid=2;core id=1;initial apicid=2;physical id=0|
Attributes:
text: The /proc/cpuinfo text.
mappings: Dict of [processor id: dict of values that change with cpu]
attributes: Dict of /proc/cpuinfo entries that are not in mappings.
"""
# known attributes that vary with the processor id
_PER_CPU_KEYS = ['core id', 'initial apicid', 'apicid', 'physical id']
# attributes that should be sorted, for example turning the 'flags' value
# of "popcnt avx512bw" to "avx512bw popcnt"
_SORT_VALUES = ['flags', 'bugs']
def __init__(self, text):
self.mappings = {}
self.attributes = collections.defaultdict(set)
for stanza in _ParseTextProperties(text):
processor_id, single_values, multiple_values = self._ParseStanza(stanza)
if processor_id is None: # can be 0
continue
if processor_id in self.mappings:
logging.warning('Processor id %s seen twice in %s', processor_id, text)
continue
self.mappings[processor_id] = single_values
for key, value in multiple_values.items():
self.attributes[key].add(value)
def GetValues(self):
"""Dict of cpuinfo keys to its values.
Multiple values are joined by semicolons.
Returns:
Dict of [cpuinfo key:value string]
"""
cpuinfo = {
key: ';'.join(sorted(values))
for key, values in self.attributes.items()
}
cpuinfo['proccpu'] = ','.join(sorted(self.attributes.keys()))
return cpuinfo
def _ParseStanza(self, stanza):
"""Parses the cpuinfo section for an individual CPU.
Args:
stanza: Dict of the /proc/cpuinfo results for an individual CPU.
Returns:
Tuple of (processor_id, dict of values that are known to change with
each CPU, dict of other cpuinfo results).
"""
singles = {}
if 'processor' not in stanza:
return None, None, None
processor_id = int(stanza.pop('processor'))
for key in self._PER_CPU_KEYS:
if key in stanza:
singles[key] = stanza.pop(key)
for key in self._SORT_VALUES:
if key in stanza:
stanza[key] = ' '.join(sorted(stanza[key].split()))
return processor_id, singles, stanza
class JujuMixin(BaseDebianMixin):
"""Class to allow running Juju-deployed workloads.
Bootstraps a Juju environment using the manual provider:
https://jujucharms.com/docs/stable/config-manual
"""
# TODO: Add functionality to tear down and uninstall Juju
# (for pre-provisioned) machines + JujuUninstall for packages using charms.
OS_TYPE = os_types.JUJU
is_controller = False
# A reference to the juju controller, useful when operations occur against
# a unit's VM but need to be preformed from the controller.
controller = None
vm_group = None
machines = {}
units = []
installation_lock = threading.Lock()
environments_yaml = """
default: perfkit
environments:
perfkit:
type: manual
bootstrap-host: {0}
"""
def _Bootstrap(self):
"""Bootstrap a Juju environment."""
resp, _ = self.RemoteHostCommand('juju bootstrap')
def JujuAddMachine(self, unit):
"""Adds a manually-created virtual machine to Juju.
Args:
unit: An object representing the unit's BaseVirtualMachine.
"""
resp, _ = self.RemoteHostCommand('juju add-machine ssh:%s' %
unit.internal_ip)
# We don't know what the machine's going to be used for yet,
# but track it's placement for easier access later.
# We're looking for the output: created machine %d
machine_id = _[_.rindex(' '):].strip()
self.machines[machine_id] = unit
def JujuConfigureEnvironment(self):
"""Configure a bootstrapped Juju environment."""
if self.is_controller:
resp, _ = self.RemoteHostCommand('mkdir -p ~/.juju')
with vm_util.NamedTemporaryFile() as tf:
tf.write(self.environments_yaml.format(self.internal_ip))
tf.close()
self.PushFile(tf.name, '~/.juju/environments.yaml')
def JujuEnvironment(self):
"""Get the name of the current environment."""
output, _ = self.RemoteHostCommand('juju switch')
return output.strip()
def JujuRun(self, cmd):
"""Run a command on the virtual machine.
Args:
cmd: The command to run.
"""
output, _ = self.RemoteHostCommand(cmd)
return output.strip()
def JujuStatus(self, pattern=''):
"""Return the status of the Juju environment.
Args:
pattern: Optionally match machines/services with a pattern.
"""
output, _ = self.RemoteHostCommand('juju status %s --format=json' %
pattern)
return output.strip()
def JujuVersion(self):
"""Return the Juju version."""
output, _ = self.RemoteHostCommand('juju version')
return output.strip()
def JujuSet(self, service, params=[]):
"""Set the configuration options on a deployed service.
Args:
service: The name of the service.
params: A list of key=values pairs.
"""
output, _ = self.RemoteHostCommand(
'juju set %s %s' % (service, ' '.join(params)))
return output.strip()
@vm_util.Retry(poll_interval=30, timeout=3600)
def JujuWait(self):
"""Wait for all deployed services to be installed, configured, and idle."""
status = yaml.safe_load(self.JujuStatus())
for service in status['services']:
ss = status['services'][service]['service-status']['current']
# Accept blocked because the service may be waiting on relation
if ss not in ['active', 'unknown']:
raise errors.Juju.TimeoutException(
'Service %s is not ready; status is %s' % (service, ss))
if ss in ['error']:
# The service has failed to deploy.
debuglog = self.JujuRun('juju debug-log --limit 200')
logging.warning(debuglog)
raise errors.Juju.UnitErrorException(
'Service %s is in an error state' % service)
for unit in status['services'][service]['units']:
unit_data = status['services'][service]['units'][unit]
ag = unit_data['agent-state']
if ag != 'started':
raise errors.Juju.TimeoutException(
'Service %s is not ready; agent-state is %s' % (service, ag))
ws = unit_data['workload-status']['current']
if ws not in ['active', 'unknown']:
raise errors.Juju.TimeoutException(
'Service %s is not ready; workload-state is %s' % (service, ws))
def JujuDeploy(self, charm, vm_group):
"""Deploy (and scale) this service to the machines in its vm group.
Args:
charm: The charm to deploy, i.e., cs:trusty/ubuntu.
vm_group: The name of vm_group the unit(s) should be deployed to.
"""
# Find the already-deployed machines belonging to this vm_group
machines = []
for machine_id, unit in self.machines.items():
if unit.vm_group == vm_group:
machines.append(machine_id)
# Deploy the first machine
resp, _ = self.RemoteHostCommand(
'juju deploy %s --to %s' % (charm, machines.pop()))
# Get the name of the service
service = charm[charm.rindex('/') + 1:]
# Deploy to the remaining machine(s)
for machine in machines:
resp, _ = self.RemoteHostCommand(
'juju add-unit %s --to %s' % (service, machine))
def JujuRelate(self, service1, service2):
"""Create a relation between two services.
Args:
service1: The first service to relate.
service2: The second service to relate.
"""
resp, _ = self.RemoteHostCommand(
'juju add-relation %s %s' % (service1, service2))
def Install(self, package_name):
"""Installs a PerfKit package on the VM."""
package = linux_packages.PACKAGES[package_name]
try:
# Make sure another unit doesn't try
# to install the charm at the same time
with self.controller.installation_lock:
if package_name not in self.controller._installed_packages:
package.JujuInstall(self.controller, self.vm_group)
self.controller._installed_packages.add(package_name)
except AttributeError as e:
logging.warning('Failed to install package %s, falling back to Apt (%s)',
package_name, e)
if package_name not in self._installed_packages:
if hasattr(package, 'AptInstall'):
package.AptInstall(self)
elif hasattr(package, 'Install'):
package.Install(self)
else:
raise KeyError('Package %s has no install method for Juju machines.' %
package_name)
self._installed_packages.add(package_name)
def SetupPackageManager(self):
if self.is_controller:
resp, _ = self.RemoteHostCommand(
'sudo add-apt-repository ppa:juju/stable'
)
super(JujuMixin, self).SetupPackageManager()
def PrepareVMEnvironment(self):
"""Install and configure a Juju environment."""
super(JujuMixin, self).PrepareVMEnvironment()
if self.is_controller:
self.InstallPackages('juju')
self.JujuConfigureEnvironment()
self.AuthenticateVm()
self._Bootstrap()
# Install the Juju agent on the other VMs
for unit in self.units:
unit.controller = self
self.JujuAddMachine(unit)
class BaseLinuxVirtualMachine(BaseLinuxMixin,
virtual_machine.BaseVirtualMachine):
"""Linux VM for use with pytyping."""
|
import facebook, urllib3, requests
'''Demonstrate usage of Facebook graph API.
1. Create a Facebook developer account. See https://developers.facebook.com/docs/development/register.
2. Create a Facebook app. See https://developers.facebook.com/apps/.
- type: business
- display name: sql sith is all business
- app contact email: chris@databaseguy.com
- business account: no business manager account selected
3. Get your app id from the top of the "add products to your app" page
- mine was 502422597913056
4. Setup Messenger for your app and link it to one or more pages. Copy the access keys.
- People and Animals Against Butter Cow Vandals: EAAHI82B5yeABAKoiAt8CUHXHfQt8BtU0xNXw71JosTy2II6Ev3ZB8biVGmL8v0xTcVkHhkm8LytZAlt2BZAjTC9ZBiAGyHbtY6K8jfZAHFCwZAuRQGowZB4vHFT9ofjpEnIHsCktUvEc2ptOsLJMR1Rf6wfYNsEm6G6WbYNB6QpLcNo0vvdf3QJ
- Light Day: EAAHI82B5yeABALVls4akj4b6zPtYCLAWAvYJYtyuPDsczGkF4WPqpbhgjaWCRo1EXR97qCeWQ2fTyxQnMVxROoFiOi1tfgkTpTQLTtnwMm9UiQvF3fKBny2bG6r5FUkPlJCvRYvZAGFAZAAv0R5MoEQBuwmvC4ZAsbRJGm8LeIX5lSm9xqj
'''
# api = GraphAPI(app_id="id", app_secret="secret", oauth_flow=True)
|
import os
import requests
from getpass import getpass
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = "Get api token for user providing username/password"
def handle(self, *args, **options):
username = os.environ.get("USERNAME", "analytics")
obtain_token_url = os.environ.get("OBTAIN_TOKEN_URL")
params = {"username": username, "password": getpass()}
r = requests.post(obtain_token_url, data=params)
token = r.json()["token"]
print("token: ", token)
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'expat',
'type': 'none',
'link_settings': {
'libraries': [
'-lexpat',
],
},
},
],
}
|
#!/usr/bin/env python
import sys
from setuptools import setup
extra = {}
if sys.version_info >= (3,):
extra['use_2to3'] = True
setup(
name='marathon',
version='0.8.2',
description='Marathon Client Library',
long_description="""Python interface to the Mesos Marathon REST API.""",
author='Mike Babineau',
author_email='michael.babineau@gmail.com',
install_requires=['requests>=2.0.0', 'sseclient'],
url='https://github.com/thefactory/marathon-python',
packages=['marathon', 'marathon.models'],
license='MIT',
platforms='Posix; MacOS X; Windows',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules'
],
**extra
)
|
words = ["donkey", "kaddu", "mote"]
with open("sample.txt") as f:
content = f.read()
for word in words:
content = content.replace(word, "$%^@$^#")
with open("sample.txt", "w") as f:
f.write(content)
|
import asyncio
import time
import logging
import zmq
import zmq.asyncio
from collections import namedtuple
from gabriel_protocol import gabriel_pb2
from gabriel_server import cognitive_engine
from gabriel_server import network_engine
from gabriel_server.websocket_server import WebsocketServer
FIVE_SECONDS = 5
logger = logging.getLogger(__name__)
Metadata = namedtuple('Metadata', ['frame_id', 'host', 'port'])
MetadataPayload = namedtuple('MetadataPayload', ['metadata', 'payload'])
def run(websocket_port, zmq_address, num_tokens, input_queue_maxsize,
timeout=FIVE_SECONDS):
context = zmq.asyncio.Context()
zmq_socket = context.socket(zmq.ROUTER)
zmq_socket.bind(zmq_address)
logger.info('Waiting for engines to connect')
server = _Server(websocket_port, num_tokens, zmq_socket, timeout,
input_queue_maxsize)
server.launch()
class _Server(WebsocketServer):
def __init__(self, websocket_port, num_tokens, zmq_socket, timeout,
size_for_queues):
super().__init__(websocket_port, num_tokens)
self._zmq_socket = zmq_socket
self._engine_workers = {}
self._filter_infos = {}
self._from_engines = asyncio.Queue()
self._timeout = timeout
self._size_for_queues = size_for_queues
def launch(self):
# We cannot use while self.is_running because these loops would
# terminate before super().launch() is called launch
async def receive_from_engine_worker_loop():
while True:
await self._receive_from_engine_worker_helper()
async def heartbeat_loop():
while True:
await asyncio.sleep(self._timeout)
await self._heartbeat_helper()
asyncio.ensure_future(receive_from_engine_worker_loop())
asyncio.ensure_future(heartbeat_loop())
super().launch()
async def _receive_from_engine_worker_helper(self):
address, _, payload = await self._zmq_socket.recv_multipart()
engine_worker = self._engine_workers.get(address)
if payload == network_engine.HEARTBEAT:
if engine_worker == None:
logger.error('Heartbeat from unknown engine')
else:
engine_worker.record_heatbeat()
return
to_server_runner = gabriel_pb2.ToServerRunner()
to_server_runner.ParseFromString(payload)
if engine_worker is None:
await self._add_engine_worker(address, to_server_runner)
return
if to_server_runner.HasField('welcome'):
logger.error('Engine sent duplicate welcome message')
return
result_wrapper = to_server_runner.result_wrapper
engine_worker_metadata = engine_worker.get_current_input_metadata()
assert result_wrapper.frame_id == engine_worker_metadata.frame_id
filter_info = self._filter_infos[result_wrapper.filter_passed]
latest_input = filter_info.get_latest_input()
if (latest_input is not None and
latest_input.metadata == engine_worker_metadata):
# Send response to client
await filter_info.respond_to_client(
engine_worker_metadata, result_wrapper, return_token=True)
await engine_worker.send_message_from_queue()
return
if len(result_wrapper.results) > 0:
await filter_info.respond_to_client(
engine_worker_metadata, result_wrapper, return_token=False)
if latest_input is None:
# There is no new input to give the worker
engine_worker.clear_current_input_metadata()
else:
# Give the worker the latest input
await engine_worker.send_payload(latest_input)
async def _add_engine_worker(self, address, to_server_runner):
if to_server_runner.HasField('welcome'):
filter_name = to_server_runner.welcome.filter_name
logger.info('New engine connected that consumes filter: %s',
filter_name)
elif to_server_runner.HasField('result_wrapper'):
# The filter was probably running for too long.
filter_name = to_server_runner.result_wrapper.filter_passed
logger.warning('Result from unrecognized engines that consumes '
'frames from filter: %s', filter_name)
logger.info('Adding filter as if it were new')
filter_info = self._filter_infos.get(filter_name)
if filter_info is None:
logger.info('First engine for inputs that pass filter: '
'%s', filter_name)
filter_info = _FilterInfo(
filter_name, self._size_for_queues, self._from_engines)
self._filter_infos[filter_name] = filter_info
# Tell super() to accept inputs that have passed filter_name
self.add_filter_consumed(filter_name)
engine_worker = _EngineWorker(self._zmq_socket, filter_info, address)
self._engine_workers[address] = engine_worker
filter_info.add_engine_worker(engine_worker)
async def _heartbeat_helper(self):
current_time = time.time()
# We cannot directly iterate over items because we delete some entries
for address, engine_worker in list(self._engine_workers.items()):
if (current_time - engine_worker.get_last_sent()) < self._timeout:
continue
if ((not engine_worker.get_awaiting_heartbeat_response()) and
engine_worker.get_current_input_metadata() is None):
await engine_worker.send_heartbeat()
continue
filter_info = engine_worker.get_filter_info()
logger.info('Lost connection to engine worker that consumes items '
'from filter: %s', filter_info.get_name())
await engine_worker.drop()
del self._engine_workers[address]
if filter_info.has_no_engine_workers():
filter_name = filter_info.get_name()
logger.info('No remaining engines consume input from filter: '
'%s', filter_name)
del self._filter_infos[filter_name]
self.remove_filter_consumed(filter_name)
async def _send_to_engine(self, to_engine):
filter_name = to_engine.from_client.filter_passed
filter_info = self._filter_infos[filter_name]
return await filter_info.handle_new_to_engine(to_engine)
async def _recv_from_engine(self):
return await self._from_engines.get()
class _EngineWorker:
def __init__(self, zmq_socket, filter_info, address):
self._zmq_socket = zmq_socket
self._filter_info = filter_info
self._address = address
self._last_sent = 0
self._awaiting_heartbeat_response = False
self._current_input_metadata = None
def get_address(self):
return self._address
def get_filter_info(self):
return self._filter_info
def get_current_input_metadata(self):
return self._current_input_metadata
def clear_current_input_metadata(self):
self._current_input_metadata = None
def record_heatbeat(self):
self._awaiting_heartbeat_response = False
def get_awaiting_heartbeat_response(self):
return self._awaiting_heartbeat_response
def get_last_sent(self):
return self._last_sent
async def send_heartbeat(self):
await self._send_helper(network_engine.HEARTBEAT)
self._awaiting_heartbeat_response = True
async def _send_helper(self, payload):
await self._zmq_socket.send_multipart([self._address, b'', payload])
self._last_sent = time.time()
async def drop(self):
latest_input = self._filter_info.get_latest_input()
if (latest_input is not None and
self._current_input_metadata == latest_input.metadata):
# Return token for frame engine was in the middle of processing
status = gabriel_pb2.ResultWrapper.Status.ENGINE_ERROR
metadata = self._current_input_metadata
filter_name = self._filter_info.get_name()
result_wrapper = cognitive_engine.error_result_wrapper(
metadata.frame_id, status, filter_name)
await self._filter_info.respond_to_client(
metadata, result_wrapper, return_token=True)
self._filter_info.remove_engine_worker(self)
async def send_payload(self, metadata_payload):
self._current_input_metadata = metadata_payload.metadata
await self._send_helper(metadata_payload.payload)
async def send_message_from_queue(self):
'''Send message from queue and update current input.
Current input will be set as None if there is nothing on the queue.'''
metadata_payload = self._filter_info.advance_unsent_queue()
if metadata_payload is None:
self._current_input_metadata = None
else:
await self.send_payload(metadata_payload)
class _FilterInfo:
def __init__(self, filter_name, fresh_inputs_queue_size, from_engines):
self._filter_name = filter_name
self._unsent_inputs = asyncio.Queue(maxsize=fresh_inputs_queue_size)
self._from_engines = from_engines
self._latest_input = None
self._engine_workers = set()
def get_name(self):
return self._filter_name
def add_engine_worker(self, engine_worker):
self._engine_workers.add(engine_worker)
def remove_engine_worker(self, engine_worker):
self._engine_workers.remove(engine_worker)
def has_no_engine_workers(self):
return len(self._engine_workers) == 0
def get_latest_input(self):
return self._latest_input
async def handle_new_to_engine(self, to_engine):
sent_to_engine = False
metadata = Metadata(frame_id=to_engine.from_client.frame_id,
host=to_engine.host, port=to_engine.port)
payload = to_engine.from_client.SerializeToString()
metadata_payload = MetadataPayload(metadata=metadata, payload=payload)
for engine_worker in self._engine_workers:
if engine_worker.get_current_input_metadata() is None:
await engine_worker.send_payload(metadata_payload)
sent_to_engine = True
if sent_to_engine:
self._latest_input = metadata_payload
return True
if self._unsent_inputs.full():
return False
self._unsent_inputs.put_nowait(metadata_payload)
return True
async def respond_to_client(self, metadata, result_wrapper, return_token):
from_engine = cognitive_engine.pack_from_engine(
metadata.host, metadata.port, result_wrapper, return_token)
await self._from_engines.put(from_engine)
if return_token:
self._latest_input = None
def advance_unsent_queue(self):
'''
Remove an item from the queue of unsent to_engine messages, and store
this as the latest input.
Return metadata_payload if there was an item pulled off the queue.
Return None otherwise.
'''
if self._unsent_inputs.empty():
# We do not need to update latest input, because respond_to_client
# has already cleared latest_input
return None
metadata_payload = self._unsent_inputs.get_nowait()
self._latest_input = metadata_payload
return metadata_payload
|
# -*- coding: utf-8 -*-
class Taiyang(object):
def __init__(self):
pass
class ShouTaiyang(Taiyang):
def __init__(self):
pass
class ZuTaiyang(Taiyang):
def __init__(self):
pass
|
"""
Register an iFrame front end panel.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/panel_iframe/
"""
import voluptuous as vol
from homeassistant.const import (CONF_ICON, CONF_URL)
import homeassistant.helpers.config_validation as cv
DEPENDENCIES = ['frontend']
DOMAIN = 'panel_iframe'
CONF_TITLE = 'title'
CONF_RELATIVE_URL_ERROR_MSG = "Invalid relative URL. Absolute path required."
CONF_RELATIVE_URL_REGEX = r'\A/'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: cv.schema_with_slug_keys(
vol.Schema({
# pylint: disable=no-value-for-parameter
vol.Optional(CONF_TITLE): cv.string,
vol.Optional(CONF_ICON): cv.icon,
vol.Required(CONF_URL): vol.Any(
vol.Match(
CONF_RELATIVE_URL_REGEX,
msg=CONF_RELATIVE_URL_ERROR_MSG),
vol.Url()),
})
)
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass, config):
"""Set up the iFrame frontend panels."""
for url_path, info in config[DOMAIN].items():
await hass.components.frontend.async_register_built_in_panel(
'iframe', info.get(CONF_TITLE), info.get(CONF_ICON),
url_path, {'url': info[CONF_URL]})
return True
|
#!/usr/bin/env python
###################################################################
# uids_example_script.py : Demonstrate how to Execute/skip particular test
# By default is there is uids is not defined, everything is
# executed.
# If uids is defined, only what match true with uids will be executed
###################################################################
# To get a logger for the script
import logging
# Needed for aetest script
from pyats import aetest
# Get your logger for your script
log = logging.getLogger(__name__)
###################################################################
### COMMON SETUP SECTION ###
###################################################################
# CommonSetup will also be skipped if it is not mentioned in the uids
class common_setup(aetest.CommonSetup):
""" Common Setup section """
@aetest.subsection
def common_setup_subsection(self):
""" Common Setup subsection """
log.info("Aetest Common Setup")
###################################################################
### TESTCASES SECTION ###
###################################################################
class my_looped_testcase(aetest.Testcase):
@aetest.setup
def empty_setup(self):
""" Testcase Setup section """
pass
# Each loop can be singly skipped, we have an example of loop2 being
# skipped but not loop1
@aetest.loop(['loop1', 'loop2'])
@aetest.test
def test_one(self, section):
log.info ("---------------TEST1 LOOP-------------------")
log.info ("Test Iteration in testcase %s section" % (section,))
# We have an example which skips ^loop2. This will not be skipped, as it
# does not match the regex expression.
@aetest.test
def test_two_loop2(self):
log.info ("Test Iteration in testcase test_two_loop2 section")
class tc_two(aetest.Testcase):
""" This is user Testcases section """
@aetest.setup
def tc_two_setup(self):
pass
@aetest.cleanup
def tc_two_cleanup(self):
""" Testcase cleanup section """
pass
#####################################################################
#### COMMON CLEANUP SECTION ###
#####################################################################
# CommonCleanup will be skipped if not mentioned in the uids list to be executed
class common_cleanup(aetest.CommonCleanup):
""" Common Cleanup for Sample Test """
@aetest.subsection
def common_cleanup_subsection(self):
""" Common Cleanup Subsection """
log.info("Aetest Common Cleanup")
if __name__ == "__main__": # pragma: no cover
aetest.main()
|
__VERSION__ = '0.9.4'
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
''' IMPORTS '''
import json
import requests
import base64
import email
import hashlib
from typing import List
from dateutil.parser import parse
from typing import Dict, Tuple, Any, Optional, Union
from threading import Timer
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
''' GLOBALS/PARAMS '''
INTEGRATION_NAME = 'CrowdStrike Falcon'
CLIENT_ID = demisto.params().get('client_id')
SECRET = demisto.params().get('secret')
# Remove trailing slash to prevent wrong URL path to service
SERVER = demisto.params()['url'][:-1] if (demisto.params()['url'] and demisto.params()['url'].endswith('/')) else \
demisto.params()['url']
# Should we use SSL
USE_SSL = not demisto.params().get('insecure', False)
# How many time before the first fetch to retrieve incidents
FETCH_TIME = demisto.params().get('fetch_time', '3 days')
BYTE_CREDS = '{name}:{password}'.format(name=CLIENT_ID, password=SECRET).encode('utf-8')
# Headers to be sent in requests
HEADERS = {
'Content-Type': 'application/json',
'Accept': 'application/json',
'Authorization': 'Basic {}'.format(base64.b64encode(BYTE_CREDS).decode())
}
# Note: True life time of token is actually 30 mins
TOKEN_LIFE_TIME = 28
INCIDENTS_PER_FETCH = int(demisto.params().get('incidents_per_fetch', 15))
# Remove proxy if not set to true in params
handle_proxy()
''' KEY DICTIONARY '''
DETECTIONS_BASE_KEY_MAP = {
'device.hostname': 'System',
'device.cid': 'CustomerID',
'hostinfo.domain': 'MachineDomain',
'detection_id': 'ID',
'created_timestamp': 'ProcessStartTime',
'max_severity': 'MaxSeverity',
'show_in_ui': 'ShowInUi',
'status': 'Status'
}
DETECTIONS_BEHAVIORS_KEY_MAP = {
'filename': 'FileName',
'scenario': 'Scenario',
'md5': 'MD5',
'sha256': 'SHA256',
'ioc_type': 'IOCType',
'ioc_value': 'IOCValue',
'cmdline': 'CommandLine',
'user_name': 'UserName',
'behavior_id': 'ID',
}
IOC_KEY_MAP = {
'type': 'Type',
'value': 'Value',
'policy': 'Policy',
'source': 'Source',
'share_level': 'ShareLevel',
'expiration': 'Expiration',
'description': 'Description',
'created_on': 'CreatedTime',
'created_by': 'CreatedBy',
'modified_on': 'ModifiedTime',
'modified_by': 'ModifiedBy',
'id': 'ID',
'platforms': 'Platforms',
'action': 'Action',
'severity': 'Severity',
'tags': 'Tags',
}
IOC_DEVICE_COUNT_MAP = {
'id': 'ID',
'type': 'Type',
'value': 'Value',
'device_count': 'DeviceCount'
}
SEARCH_DEVICE_KEY_MAP = {
'device_id': 'ID',
'external_ip': 'ExternalIP',
'local_ip': 'LocalIP',
'hostname': 'Hostname',
'os_version': 'OS',
'mac_address': 'MacAddress',
'first_seen': 'FirstSeen',
'last_seen': 'LastSeen',
'status': 'Status',
}
ENDPOINT_KEY_MAP = {
'device_id': 'ID',
'local_ip': 'IPAddress',
'os_version': 'OS',
'hostname': 'Hostname',
'status': 'Status',
}
''' SPLIT KEY DICTIONARY '''
"""
Pattern:
{
'Path': 'Path to item',
'NewKey': 'Value of output key',
'Delim': 'Delimiter char',
'Index': Split Array Index
}
"""
DETECTIONS_BEHAVIORS_SPLIT_KEY_MAP = [
{
'Path': 'parent_details.parent_process_graph_id',
'NewKey': 'SensorID',
'Delim': ':',
'Index': 1
},
{
'Path': 'parent_details.parent_process_graph_id',
'NewKey': 'ParentProcessID',
'Delim': ':',
'Index': 2
},
{
'Path': 'triggering_process_graph_id',
'NewKey': 'ProcessID',
'Delim': ':',
'Index': 2
},
]
HOST_GROUP_HEADERS = ['id', 'name', 'group_type', 'description', 'assignment_rule',
'created_by', 'created_timestamp',
'modified_by', 'modified_timestamp']
STATUS_TEXT_TO_NUM = {'New': "20",
'Reopened': "25",
'In Progress': "30",
'Closed': "40"}
STATUS_NUM_TO_TEXT = {20: 'New',
25: 'Reopened',
30: 'In Progress',
40: 'Closed'}
''' HELPER FUNCTIONS '''
def http_request(method, url_suffix, params=None, data=None, files=None, headers=HEADERS, safe=False,
get_token_flag=True, no_json=False, json=None, status_code=None):
"""
A wrapper for requests lib to send our requests and handle requests and responses better.
:param json: JSON body
:type json ``dict`` or ``list``
:type method: ``str``
:param method: HTTP method for the request.
:type url_suffix: ``str``
:param url_suffix: The suffix of the URL (endpoint)
:type params: ``dict``
:param params: The URL params to be passed.
:type data: ``str``
:param data: The body data of the request.
:type headers: ``dict``
:param headers: Request headers
:type safe: ``bool``
:param safe: If set to true will return None in case of http error
:type get_token_flag: ``bool``
:param get_token_flag: If set to True will call get_token()
:type no_json: ``bool``
:param no_json: If set to true will not parse the content and will return the raw response object for successful
response
:type status_code: ``int``
:param: status_code: The request codes to accept as OK.
:return: Returns the http request response json
:rtype: ``dict``
"""
if get_token_flag:
token = get_token()
headers['Authorization'] = 'Bearer {}'.format(token)
url = SERVER + url_suffix
try:
res = requests.request(
method,
url,
verify=USE_SSL,
params=params,
data=data,
headers=headers,
files=files,
json=json,
)
except requests.exceptions.RequestException as e:
return_error(f'Error in connection to the server. Please make sure you entered the URL correctly.'
f' Exception is {str(e)}.')
try:
valid_status_codes = {200, 201, 202, 204}
# Handling a case when we want to return an entry for 404 status code.
if status_code:
valid_status_codes.add(status_code)
if res.status_code not in valid_status_codes:
res_json = res.json()
reason = res.reason
resources = res_json.get('resources', {})
if resources:
if isinstance(resources, list):
reason += f'\n{str(resources)}'
else:
for host_id, resource in resources.items():
errors = resource.get('errors', [])
if errors:
error_message = errors[0].get('message')
reason += f'\nHost ID {host_id} - {error_message}'
elif res_json.get('errors'):
errors = res_json.get('errors', [])
for error in errors:
reason += f"\n{error.get('message')}"
err_msg = 'Error in API call to CrowdStrike Falcon: code: {code} - reason: {reason}'.format(
code=res.status_code,
reason=reason
)
# try to create a new token
if res.status_code == 403 and get_token_flag:
LOG(err_msg)
token = get_token(new_token=True)
headers['Authorization'] = 'Bearer {}'.format(token)
return http_request(
method=method,
url_suffix=url_suffix,
params=params,
data=data,
headers=headers,
files=files,
json=json,
safe=safe,
get_token_flag=False,
status_code=status_code,
no_json=no_json,
)
elif safe:
return None
return_error(err_msg)
return res if no_json else res.json()
except ValueError as exception:
raise ValueError(
f'Failed to parse json object from response: {exception} - {res.content}') # type: ignore[str-bytes-safe]
def create_entry_object(contents: Union[List[Any], Dict[str, Any]] = {}, ec: Union[List[Any], Dict[str, Any]] = None,
hr: str = ''):
"""
Creates an entry object
:type contents: ``dict``
:param contents: Raw response to output
:type ec: ``dict``
:param ec: Entry context of the entry object
:type hr: ``str``
:param hr: Human readable
:return: Entry object
:rtype: ``dict``
"""
return {
'Type': entryTypes['note'],
'Contents': contents,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': hr,
'EntryContext': ec
}
def detection_to_incident(detection):
"""
Creates an incident of a detection.
:type detection: ``dict``
:param detection: Single detection object
:return: Incident representation of a detection
:rtype ``dict``
"""
incident = {
'name': 'Detection ID: ' + str(detection.get('detection_id')),
'occurred': str(detection.get('created_timestamp')),
'rawJSON': json.dumps(detection),
'severity': severity_string_to_int(detection.get('max_severity_displayname'))
}
return incident
def incident_to_incident_context(incident):
"""
Creates an incident context of a incident.
:type incident: ``dict``
:param incident: Single detection object
:return: Incident context representation of a incident
:rtype ``dict``
"""
incident_id = str(incident.get('incident_id'))
incident_context = {
'name': f'Incident ID: {incident_id}',
'occurred': str(incident.get('start')),
'rawJSON': json.dumps(incident)
}
return incident_context
def severity_string_to_int(severity):
"""
Converts a severity string to DBot score representation
:type severity: ``str``
:param severity: String representation of a severity
:return: DBot score representation of the severity
:rtype ``int``
"""
if severity in ('Critical', 'High'):
return 3
elif severity in ('Medium', 'Low'):
return 2
return 0
def get_trasnformed_dict(old_dict, transformation_dict):
"""
Returns a dictionary with the same values as old_dict, with the correlating key:value in transformation_dict
:type old_dict: ``dict``
:param old_dict: Old dictionary to pull values from
:type transformation_dict: ``dict``
:param transformation_dict: Transformation dictionary that contains oldkeys:newkeys
:return Transformed dictionart (according to transformation_dict values)
:rtype ``dict``
"""
new_dict = {}
for k in list(old_dict.keys()):
if k in transformation_dict:
new_dict[transformation_dict[k]] = old_dict[k]
return new_dict
def extract_transformed_dict_with_split(old_dict, transformation_dict_arr):
"""
Extracts new values out of old_dict using a json structure of:
{'Path': 'Path to item', 'NewKey': 'Value of output key', 'Delim': 'Delimiter char', 'Index': Split Array Index}
"""
new_dict = {}
for trans_dict in transformation_dict_arr:
try:
val = demisto.get(old_dict, trans_dict['Path'])
if 'split' in dir(val):
i = trans_dict['Index']
new_dict[trans_dict['NewKey']] = val.split(trans_dict['Delim'])[i]
except Exception as ex:
LOG('Error {exception} with: {tdict}'.format(exception=ex, tdict=trans_dict))
return new_dict
def get_passed_mins(start_time, end_time_str):
"""
Returns the time passed in mins
:param start_time: Start time in datetime
:param end_time_str: End time in str
:return: The passed mins in int
"""
time_delta = start_time - datetime.fromtimestamp(end_time_str)
return time_delta.seconds / 60
def handle_response_errors(raw_res: dict, err_msg: str = None):
"""
Raise exception if raw_res is empty or contains errors
"""
if not err_msg:
err_msg = "The server was unable to return a result, please run the command again."
if not raw_res:
raise DemistoException(err_msg)
if raw_res.get('errors'):
raise DemistoException(raw_res.get('errors'))
return
''' COMMAND SPECIFIC FUNCTIONS '''
def init_rtr_single_session(host_id: str) -> str:
"""
Start a session with single host.
:param host_id: Host agent ID to initialize a RTR session on.
:return: The session ID to execute the command on
"""
endpoint_url = '/real-time-response/entities/sessions/v1'
body = json.dumps({
'device_id': host_id
})
response = http_request('POST', endpoint_url, data=body)
resources = response.get('resources')
if resources and isinstance(resources, list) and isinstance(resources[0], dict):
session_id = resources[0].get('session_id')
if isinstance(session_id, str):
return session_id
raise ValueError('No session id found in the response')
def init_rtr_batch_session(host_ids: list) -> str:
"""
Start a session with one or more hosts
:param host_ids: List of host agent ID’s to initialize a RTR session on.
:return: The session batch ID to execute the command on
"""
endpoint_url = '/real-time-response/combined/batch-init-session/v1'
body = json.dumps({
'host_ids': host_ids
})
response = http_request('POST', endpoint_url, data=body)
return response.get('batch_id')
def refresh_session(host_id: str) -> Dict:
"""
Refresh a session timeout on a single host.
:param host_id: Host agent ID to run RTR command on.
:return: Response JSON which contains errors (if exist) and retrieved resources
"""
endpoint_url = '/real-time-response/entities/refresh-session/v1'
body = json.dumps({
'device_id': host_id
})
response = http_request('POST', endpoint_url, data=body)
return response
def batch_refresh_session(batch_id: str) -> None:
"""
Batch refresh a RTR session on multiple hosts.
:param batch_id: Batch ID to execute the command on.
"""
demisto.debug('Starting session refresh')
endpoint_url = '/real-time-response/combined/batch-refresh-session/v1'
body = json.dumps({
'batch_id': batch_id
})
response = http_request('POST', endpoint_url, data=body)
demisto.debug(f'Refresh session response: {response}')
demisto.debug('Finished session refresh')
def run_batch_read_cmd(batch_id: str, command_type: str, full_command: str) -> Dict:
"""
Sends RTR command scope with read access
:param batch_id: Batch ID to execute the command on.
:param command_type: Read-only command type we are going to execute, for example: ls or cd.
:param full_command: Full command string for the command.
:return: Response JSON which contains errors (if exist) and retrieved resources
"""
endpoint_url = '/real-time-response/combined/batch-command/v1'
body = json.dumps({
'base_command': command_type,
'batch_id': batch_id,
'command_string': full_command
})
response = http_request('POST', endpoint_url, data=body)
return response
def run_batch_write_cmd(batch_id: str, command_type: str, full_command: str) -> Dict:
"""
Sends RTR command scope with write access
:param batch_id: Batch ID to execute the command on.
:param command_type: Read-only command type we are going to execute, for example: ls or cd.
:param full_command: Full command string for the command.
:return: Response JSON which contains errors (if exist) and retrieved resources
"""
endpoint_url = '/real-time-response/combined/batch-active-responder-command/v1'
body = json.dumps({
'base_command': command_type,
'batch_id': batch_id,
'command_string': full_command
})
response = http_request('POST', endpoint_url, data=body)
return response
def run_batch_admin_cmd(batch_id: str, command_type: str, full_command: str, timeout: int = 30) -> Dict:
"""
Sends RTR command scope with write access
:param batch_id: Batch ID to execute the command on.
:param command_type: Read-only command type we are going to execute, for example: ls or cd.
:param full_command: Full command string for the command.
:param timeout: Timeout for how long to wait for the request in seconds.
:return: Response JSON which contains errors (if exist) and retrieved resources
"""
endpoint_url = '/real-time-response/combined/batch-admin-command/v1'
params = {
'timeout': timeout
}
body = json.dumps({
'base_command': command_type,
'batch_id': batch_id,
'command_string': full_command
})
response = http_request('POST', endpoint_url, data=body, params=params)
return response
def run_batch_get_cmd(host_ids: list, file_path: str, optional_hosts: list = None, timeout: int = None,
timeout_duration: str = None) -> Dict:
"""
Batch executes `get` command across hosts to retrieve files.
After this call is made `/real-time-response/combined/batch-get-command/v1` is used to query for the results.
:param host_ids: List of host agent ID’s to run RTR command on.
:param file_path: Full path to the file that is to be retrieved from each host in the batch.
:param optional_hosts: List of a subset of hosts we want to run the command on.
If this list is supplied, only these hosts will receive the command.
:param timeout: Timeout for how long to wait for the request in seconds
:param timeout_duration: Timeout duration for for how long to wait for the request in duration syntax
:return: Response JSON which contains errors (if exist) and retrieved resources
"""
endpoint_url = '/real-time-response/combined/batch-get-command/v1'
batch_id = init_rtr_batch_session(host_ids)
body = assign_params(batch_id=batch_id, file_path=file_path, optional_hosts=optional_hosts)
params = assign_params(timeout=timeout, timeout_duration=timeout_duration)
response = http_request('POST', endpoint_url, data=json.dumps(body), params=params)
return response
def status_get_cmd(request_id: str, timeout: int = None, timeout_duration: str = None) -> Dict:
"""
Retrieves the status of the specified batch get command. Will return successful files when they are finished processing.
:param request_id: ID to the request of `get` command.
:param timeout: Timeout for how long to wait for the request in seconds
:param timeout_duration: Timeout duration for for how long to wait for the request in duration syntax
:return: Response JSON which contains errors (if exist) and retrieved resources
"""
endpoint_url = '/real-time-response/combined/batch-get-command/v1'
params = assign_params(timeout=timeout, timeout_duration=timeout_duration, batch_get_cmd_req_id=request_id)
response = http_request('GET', endpoint_url, params=params)
return response
def run_single_read_cmd(host_id: str, command_type: str, full_command: str) -> Dict:
"""
Sends RTR command scope with read access
:param host_id: Host agent ID to run RTR command on.
:param command_type: Active-Responder command type we are going to execute, for example: get or cp.
:param full_command: Full command string for the command.
:return: Response JSON which contains errors (if exist) and retrieved resources
"""
endpoint_url = '/real-time-response/entities/command/v1'
session_id = init_rtr_single_session(host_id)
body = json.dumps({
'base_command': command_type,
'command_string': full_command,
'session_id': session_id
})
response = http_request('POST', endpoint_url, data=body)
return response
def run_single_write_cmd(host_id: str, command_type: str, full_command: str) -> Dict:
"""
Sends RTR command scope with write access
:param host_id: Host agent ID to run RTR command on.
:param command_type: Active-Responder command type we are going to execute, for example: get or cp.
:param full_command: Full command string for the command.
:return: Response JSON which contains errors (if exist) and retrieved resources
"""
endpoint_url = '/real-time-response/entities/active-responder-command/v1'
session_id = init_rtr_single_session(host_id)
body = json.dumps({
'base_command': command_type,
'command_string': full_command,
'session_id': session_id
})
response = http_request('POST', endpoint_url, data=body)
return response
def run_single_admin_cmd(host_id: str, command_type: str, full_command: str) -> Dict:
"""
Sends RTR command scope with admin access
:param host_id: Host agent ID to run RTR command on.
:param command_type: Active-Responder command type we are going to execute, for example: get or cp.
:param full_command: Full command string for the command.
:return: Response JSON which contains errors (if exist) and retrieved resources
"""
endpoint_url = '/real-time-response/entities/admin-command/v1'
session_id = init_rtr_single_session(host_id)
body = json.dumps({
'base_command': command_type,
'command_string': full_command,
'session_id': session_id
})
response = http_request('POST', endpoint_url, data=body)
return response
def status_read_cmd(request_id: str, sequence_id: Optional[int]) -> Dict:
"""
Get status of an executed command with read access on a single host.
:param request_id: Cloud Request ID of the executed command to query
:param sequence_id: Sequence ID that we want to retrieve. Command responses are chunked across sequences
"""
endpoint_url = '/real-time-response/entities/command/v1'
params = {
'cloud_request_id': request_id,
'sequence_id': sequence_id or 0
}
response = http_request('GET', endpoint_url, params=params)
return response
def status_write_cmd(request_id: str, sequence_id: Optional[int]) -> Dict:
"""
Get status of an executed command with write access on a single host.
:param request_id: Cloud Request ID of the executed command to query
:param sequence_id: Sequence ID that we want to retrieve. Command responses are chunked across sequences
"""
endpoint_url = '/real-time-response/entities/active-responder-command/v1'
params = {
'cloud_request_id': request_id,
'sequence_id': sequence_id or 0
}
response = http_request('GET', endpoint_url, params=params)
return response
def status_admin_cmd(request_id: str, sequence_id: Optional[int]) -> Dict:
"""
Get status of an executed command with admin access on a single host.
:param request_id: Cloud Request ID of the executed command to query
:param sequence_id: Sequence ID that we want to retrieve. Command responses are chunked across sequences
"""
endpoint_url = '/real-time-response/entities/admin-command/v1'
params = {
'cloud_request_id': request_id,
'sequence_id': sequence_id or 0
}
response = http_request('GET', endpoint_url, params=params)
return response
def list_host_files(host_id: str) -> Dict:
"""
Get a list of files for the specified RTR session on a host.
:param host_id: Host agent ID to run RTR command on.
:return: Response JSON which contains errors (if exist) and retrieved resources
"""
endpoint_url = '/real-time-response/entities/file/v1'
session_id = init_rtr_single_session(host_id)
params = {
'session_id': session_id
}
response = http_request('GET', endpoint_url, params=params)
return response
def upload_script(name: str, permission_type: str, content: str, entry_id: str) -> Dict:
"""
Uploads a script by either given content or file
:param name: Script name to upload
:param permission_type: Permissions type of script to upload
:param content: PowerShell script content
:param entry_id: Script file to upload
:return: Response JSON which contains errors (if exist) and how many resources were affected
"""
endpoint_url = '/real-time-response/entities/scripts/v1'
body: Dict[str, Tuple[Any, Any]] = {
'name': (None, name),
'permission_type': (None, permission_type)
}
temp_file = None
try:
if content:
body['content'] = (None, content)
else: # entry_id was provided
file_ = demisto.getFilePath(entry_id)
file_name = file_.get('name') # pylint: disable=E1101
temp_file = open(file_.get('path'), 'rb') # pylint: disable=E1101
body['file'] = (file_name, temp_file)
headers = {
'Authorization': HEADERS['Authorization'],
'Accept': 'application/json'
}
response = http_request('POST', endpoint_url, files=body, headers=headers)
return response
finally:
if temp_file:
temp_file.close()
def get_script(script_id: list) -> Dict:
"""
Retrieves a script given its ID
:param script_id: ID of script to get
:return: Response JSON which contains errors (if exist) and retrieved resource
"""
endpoint_url = '/real-time-response/entities/scripts/v1'
params = {
'ids': script_id
}
response = http_request('GET', endpoint_url, params=params)
return response
def delete_script(script_id: str) -> Dict:
"""
Deletes a script given its ID
:param script_id: ID of script to delete
:return: Response JSON which contains errors (if exist) and how many resources were affected
"""
endpoint_url = '/real-time-response/entities/scripts/v1'
params = {
'ids': script_id
}
response = http_request('DELETE', endpoint_url, params=params)
return response
def list_scripts() -> Dict:
"""
Retrieves list of scripts
:return: Response JSON which contains errors (if exist) and retrieved resources
"""
endpoint_url = '/real-time-response/entities/scripts/v1'
response = http_request('GET', endpoint_url)
return response
def get_extracted_file(host_id: str, sha256: str, filename: str = None):
"""
Get RTR extracted file contents for specified session and sha256.
:param host_id: The host agent ID to initialize the RTR session on.
:param sha256: Extracted SHA256
:param filename: Filename to use for the archive name and the file within the archive.
"""
endpoint_url = '/real-time-response/entities/extracted-file-contents/v1'
session_id = init_rtr_single_session(host_id)
params = {
'session_id': session_id,
'sha256': sha256
}
if filename:
params['filename'] = filename
response = http_request('GET', endpoint_url, params=params, no_json=True)
return response
def upload_file(entry_id: str, description: str) -> Tuple:
"""
Uploads a file given entry ID
:param entry_id: The entry ID of the file to upload
:param description: String description of file to upload
:return: Response JSON which contains errors (if exist) and how many resources were affected and the file name
"""
endpoint_url = '/real-time-response/entities/put-files/v1'
temp_file = None
try:
file_ = demisto.getFilePath(entry_id)
file_name = file_.get('name') # pylint: disable=E1101
temp_file = open(file_.get('path'), 'rb') # pylint: disable=E1101
body = {
'name': (None, file_name),
'description': (None, description),
'file': (file_name, temp_file)
}
headers = {
'Authorization': HEADERS['Authorization'],
'Accept': 'application/json'
}
response = http_request('POST', endpoint_url, files=body, headers=headers)
return response, file_name
finally:
if temp_file:
temp_file.close()
def delete_file(file_id: str) -> Dict:
"""
Delete a put-file based on the ID given
:param file_id: ID of file to delete
:return: Response JSON which contains errors (if exist) and how many resources were affected
"""
endpoint_url = '/real-time-response/entities/put-files/v1'
params = {
'ids': file_id
}
response = http_request('DELETE', endpoint_url, params=params)
return response
def get_file(file_id: list) -> Dict:
"""
Get put-files based on the ID's given
:param file_id: ID of file to get
:return: Response JSON which contains errors (if exist) and retrieved resources
"""
endpoint_url = '/real-time-response/entities/put-files/v1'
params = {
'ids': file_id
}
response = http_request('GET', endpoint_url, params=params)
return response
def list_files() -> Dict:
"""
Get a list of put-file ID's that are available to the user for the put command.
:return: Response JSON which contains errors (if exist) and retrieved resources
"""
endpoint_url = '/real-time-response/entities/put-files/v1'
response = http_request('GET', endpoint_url)
return response
def get_token(new_token=False):
"""
Retrieves the token from the server if it's expired and updates the global HEADERS to include it
:param new_token: If set to True will generate a new token regardless of time passed
:rtype: ``str``
:return: Token
"""
now = datetime.now()
ctx = demisto.getIntegrationContext()
if ctx and not new_token:
passed_mins = get_passed_mins(now, ctx.get('time'))
if passed_mins >= TOKEN_LIFE_TIME:
# token expired
auth_token = get_token_request()
demisto.setIntegrationContext({'auth_token': auth_token, 'time': date_to_timestamp(now) / 1000})
else:
# token hasn't expired
auth_token = ctx.get('auth_token')
else:
# there is no token
auth_token = get_token_request()
demisto.setIntegrationContext({'auth_token': auth_token, 'time': date_to_timestamp(now) / 1000})
return auth_token
def get_token_request():
"""
Sends token request
:rtype ``str``
:return: Access token
"""
body = {
'client_id': CLIENT_ID,
'client_secret': SECRET
}
headers = {
'Content-Type': 'application/x-www-form-urlencoded'
}
token_res = http_request('POST', '/oauth2/token', data=body, headers=headers, safe=True,
get_token_flag=False)
if not token_res:
err_msg = 'Authorization Error: User has no authorization to create a token. Please make sure you entered the' \
' credentials correctly.'
raise Exception(err_msg)
return token_res.get('access_token')
def get_detections(last_behavior_time=None, behavior_id=None, filter_arg=None):
"""
Sends detections request. The function will ignore the arguments passed according to priority:
filter_arg > behavior_id > last_behavior_time
:param last_behavior_time: 3rd priority. The last behavior time of results will be greater than this value
:param behavior_id: 2nd priority. The result will only contain the detections with matching behavior id
:param filter_arg: 1st priority. The result will be filtered using this argument.
:return: Response json of the get detection endpoint (IDs of the detections)
"""
endpoint_url = '/detects/queries/detects/v1'
params = {
'sort': 'first_behavior.asc'
}
if filter_arg:
params['filter'] = filter_arg
elif behavior_id:
params['filter'] = "behaviors.behavior_id:'{0}'".format(behavior_id)
elif last_behavior_time:
params['filter'] = "first_behavior:>'{0}'".format(last_behavior_time)
response = http_request('GET', endpoint_url, params)
return response
def get_fetch_detections(last_created_timestamp=None, filter_arg=None, offset: int = 0):
""" Sends detection request, based on the created_timestamp field. Used for fetch-incidents
Args:
last_created_timestamp: last created timestamp of the results will be greater than this value.
filter_arg: The result will be filtered using this argument.
Returns:
Response json of the get detection endpoint (IDs of the detections)
"""
endpoint_url = '/detects/queries/detects/v1'
params = {
'sort': 'first_behavior.asc',
'offset': offset,
'limit': INCIDENTS_PER_FETCH
}
if filter_arg:
params['filter'] = filter_arg
elif last_created_timestamp:
params['filter'] = "created_timestamp:>'{0}'".format(last_created_timestamp)
response = http_request('GET', endpoint_url, params)
return response
def get_detections_entities(detections_ids):
"""
Sends detection entities request
:param detections_ids: IDs of the requested detections.
:return: Response json of the get detection entities endpoint (detection objects)
"""
ids_json = {'ids': detections_ids}
if detections_ids:
response = http_request(
'POST',
'/detects/entities/summaries/GET/v1',
data=json.dumps(ids_json)
)
return response
return detections_ids
def get_incidents_ids(last_created_timestamp=None, filter_arg=None, offset: int = 0):
get_incidents_endpoint = '/incidents/queries/incidents/v1'
params = {
'sort': 'start.asc',
'offset': offset,
'limit': INCIDENTS_PER_FETCH
}
if filter_arg:
params['filter'] = filter_arg
elif last_created_timestamp:
params['filter'] = "start:>'{0}'".format(last_created_timestamp)
response = http_request('GET', get_incidents_endpoint, params)
return response
def get_incidents_entities(incidents_ids):
ids_json = {'ids': incidents_ids}
response = http_request(
'POST',
'/incidents/entities/incidents/GET/v1',
data=json.dumps(ids_json)
)
return response
def upload_ioc(ioc_type, value, policy=None, expiration_days=None,
share_level=None, description=None, source=None):
"""
Create a new IOC (or replace an existing one)
"""
payload = assign_params(
type=ioc_type,
value=value,
policy=policy,
share_level=share_level,
expiration_days=expiration_days,
source=source,
description=description,
)
return http_request('POST', '/indicators/entities/iocs/v1', json=[payload])
def update_ioc(ioc_type, value, policy=None, expiration_days=None,
share_level=None, description=None, source=None):
"""
Update an existing IOC
"""
body = assign_params(
type=ioc_type,
value=value,
policy=policy,
share_level=share_level,
expiration_days=expiration_days,
source=source,
description=description,
)
params = assign_params(
type=ioc_type,
value=value
)
return http_request('PATCH', '/indicators/entities/iocs/v1', json=body, params=params)
def search_iocs(types=None, values=None, policies=None, sources=None, expiration_from=None,
expiration_to=None, limit=None, share_levels=None, ids=None, sort=None, offset=None):
"""
:param types: A list of indicator types. Separate multiple types by comma.
:param values: Comma-separated list of indicator values
:param policies: Comma-separated list of indicator policies
:param sources: Comma-separated list of IOC sources
:param expiration_from: Start of date range to search (YYYY-MM-DD format).
:param expiration_to: End of date range to search (YYYY-MM-DD format).
:param share_levels: A list of share levels. Only red is supported.
:param limit: The maximum number of records to return. The minimum is 1 and the maximum is 500. Default is 100.
:param sort: The order of the results. Format
:param offset: The offset to begin the list from
"""
if not ids:
payload = assign_params(
types=argToList(types),
values=argToList(values),
policies=argToList(policies),
sources=argToList(sources),
share_levels=argToList(share_levels),
sort=sort,
offset=offset,
limit=limit or '50',
)
if expiration_from:
payload['from.expiration_timestamp'] = expiration_from
if expiration_to:
payload['to.expiration_timestamp'] = expiration_to
ids = http_request('GET', '/indicators/queries/iocs/v1', payload).get('resources')
if not ids:
return None
else:
ids = str(ids)
payload = {
'ids': ids
}
return http_request('GET', '/indicators/entities/iocs/v1', params=payload)
def enrich_ioc_dict_with_ids(ioc_dict):
"""
Enriches the provided ioc_dict with IOC ID
:param ioc_dict: IOC dict transformed using the SEARCH_IOC_KEY_MAP
:return: ioc_dict with its ID key:value updated
"""
for ioc in ioc_dict:
ioc['ID'] = '{type}:{val}'.format(type=ioc.get('Type'), val=ioc.get('Value'))
return ioc_dict
def delete_ioc(ioc_type, value):
"""
Delete an IOC
"""
payload = assign_params(
type=ioc_type,
value=value
)
return http_request('DELETE', '/indicators/entities/iocs/v1', payload)
def search_custom_iocs(
types: Optional[Union[list, str]] = None,
values: Optional[Union[list, str]] = None,
sources: Optional[Union[list, str]] = None,
expiration: Optional[str] = None,
limit: str = '50',
sort: Optional[str] = None,
offset: Optional[str] = None,
) -> dict:
"""
:param types: A list of indicator types. Separate multiple types by comma.
:param values: Comma-separated list of indicator values
:param sources: Comma-separated list of IOC sources
:param expiration: The date on which the indicator will become inactive. (YYYY-MM-DD format).
:param limit: The maximum number of records to return. The minimum is 1 and the maximum is 500. Default is 100.
:param sort: The order of the results. Format
:param offset: The offset to begin the list from
"""
filter_list = []
if types:
filter_list.append(f'type:{types}')
if values:
filter_list.append(f'value:{values}')
if sources:
filter_list.append(f'source:{sources}')
if expiration:
filter_list.append(f'expiration:"{expiration}"')
params = {
'filter': '+'.join(filter_list),
'sort': sort,
'offset': offset,
'limit': limit,
}
return http_request('GET', '/iocs/combined/indicator/v1', params=params)
def get_custom_ioc(ioc_id: str) -> dict:
params = {'ids': ioc_id}
return http_request('GET', '/iocs/entities/indicators/v1', params=params)
def upload_custom_ioc(
ioc_type: str,
value: str,
action: str,
platforms: str,
severity: Optional[str] = None,
source: Optional[str] = None,
description: Optional[str] = None,
expiration: Optional[str] = None,
applied_globally: Optional[bool] = None,
host_groups: Optional[List[str]] = None,
) -> dict:
"""
Create a new IOC (or replace an existing one)
"""
payload = {
'indicators': [assign_params(
type=ioc_type,
value=value,
action=action,
platforms=platforms,
severity=severity,
source=source,
description=description,
expiration=expiration,
applied_globally=applied_globally,
host_groups=host_groups,
)]
}
return http_request('POST', '/iocs/entities/indicators/v1', json=payload)
def update_custom_ioc(
ioc_id: str,
action: Optional[str] = None,
platforms: Optional[str] = None,
severity: Optional[str] = None,
source: Optional[str] = None,
description: Optional[str] = None,
expiration: Optional[str] = None,
) -> dict:
"""
Update an IOC
"""
payload = {
'indicators': [{
'id': ioc_id,
} | assign_params(
action=action,
platforms=platforms,
severity=severity,
source=source,
description=description,
expiration=expiration,
)]
}
return http_request('PATCH', '/iocs/entities/indicators/v1', json=payload)
def delete_custom_ioc(ids: str) -> dict:
"""
Delete an IOC
"""
params = {'ids': ids}
return http_request('DELETE', '/iocs/entities/indicators/v1', params=params)
def get_ioc_device_count(ioc_type, value):
"""
Gets the devices that encountered the IOC
"""
payload = assign_params(
type=ioc_type,
value=value
)
response = http_request('GET', '/indicators/aggregates/devices-count/v1', payload, status_code=404)
errors = response.get('errors', [])
for error in errors:
if error.get('code') == 404:
return f'No results found for {ioc_type} - {value}'
return response
def get_process_details(ids):
"""
Get given processes details
"""
payload = assign_params(ids=ids)
return http_request('GET', '/processes/entities/processes/v1', payload)
def get_proccesses_ran_on(ioc_type, value, device_id):
"""
Get processes ids that ran on the given device_id that encountered the ioc
"""
payload = assign_params(
type=ioc_type,
value=value,
device_id=device_id
)
return http_request('GET', '/indicators/queries/processes/v1', payload)
def search_device():
"""
Searches for devices using the argument provided by the command execution. Returns empty
result of no device was found
:return: Search device response json
"""
args = demisto.args()
input_arg_dict = {
'device_id': str(args.get('ids', '')).split(','),
'status': str(args.get('status', '')).split(','),
'hostname': str(args.get('hostname', '')).split(','),
'platform_name': str(args.get('platform_name', '')).split(','),
'site_name': str(args.get('site_name', '')).split(',')
}
url_filter = '{}'.format(str(args.get('filter', '')))
for k, arg in input_arg_dict.items():
if arg:
if type(arg) is list:
arg_filter = ''
for arg_elem in arg:
if arg_elem:
first_arg = '{filter},{inp_arg}'.format(filter=arg_filter, inp_arg=k) if arg_filter else k
arg_filter = "{first}:'{second}'".format(first=first_arg, second=arg_elem)
if arg_filter:
url_filter = "{url_filter}{arg_filter}".format(url_filter=url_filter + '+' if url_filter else '',
arg_filter=arg_filter)
else:
# All args should be a list. this is a fallback
url_filter = "{url_filter}+{inp_arg}:'{arg_val}'".format(url_filter=url_filter, inp_arg=k, arg_val=arg)
raw_res = http_request('GET', '/devices/queries/devices/v1', params={'filter': url_filter})
device_ids = raw_res.get('resources')
if not device_ids:
return None
return http_request('GET', '/devices/entities/devices/v1', params={'ids': device_ids})
def behavior_to_entry_context(behavior):
"""
Transforms a behavior to entry context representation
:param behavior: Behavior dict in the format of crowdstrike's API response
:return: Behavior in entry context representation
"""
raw_entry = get_trasnformed_dict(behavior, DETECTIONS_BEHAVIORS_KEY_MAP)
raw_entry.update(extract_transformed_dict_with_split(behavior, DETECTIONS_BEHAVIORS_SPLIT_KEY_MAP))
return raw_entry
def get_username_uuid(username: str):
"""
Obtain CrowdStrike user’s UUId by email.
:param username: Username to get UUID of.
:return: The user UUID
"""
response = http_request('GET', '/users/queries/user-uuids-by-email/v1', params={'uid': username})
resources: list = response.get('resources', [])
if not resources:
raise ValueError(f'User {username} was not found')
return resources[0]
def resolve_detection(ids, status, assigned_to_uuid, show_in_ui, comment):
"""
Sends a resolve detection request
:param ids: Single or multiple ids in an array string format
:param status: New status of the detection
:param assigned_to_uuid: uuid to assign the detection to
:param show_in_ui: Boolean flag in string format (true/false)
:param comment: Optional comment to add to the detection
:return: Resolve detection response json
"""
payload = {
'ids': ids
}
if status:
payload['status'] = status
if assigned_to_uuid:
payload['assigned_to_uuid'] = assigned_to_uuid
if show_in_ui:
payload['show_in_ui'] = show_in_ui
if comment:
payload['comment'] = comment
# We do this so show_in_ui value won't contain ""
data = json.dumps(payload).replace('"show_in_ui": "false"', '"show_in_ui": false').replace('"show_in_ui": "true"',
'"show_in_ui": true')
return http_request('PATCH', '/detects/entities/detects/v2', data=data)
def contain_host(ids):
"""
Contains host(s) with matching ids
:param ids: IDs of host to contain
:return: Contain host response json
"""
payload = {
'ids': ids
}
data = json.dumps(payload)
params = {
'action_name': 'contain'
}
return http_request('POST', '/devices/entities/devices-actions/v2', data=data, params=params)
def lift_host_containment(ids):
"""
Lifts off containment from host(s) with matchind ids
:param ids: IDs of host to lift off containment from
:return: Lift off containment response json
"""
payload = {
'ids': ids
}
data = json.dumps(payload)
params = {
'action_name': 'lift_containment'
}
return http_request('POST', '/devices/entities/devices-actions/v2', data=data, params=params)
def timestamp_length_equalization(timestamp1, timestamp2):
"""
Makes sure the timestamps are of the same length.
Args:
timestamp1: First timestamp to compare.
timestamp2: Second timestamp to compare.
Returns:
the two timestamps in the same length (the longer one)
"""
diff_len = len(str(timestamp1)) - len(str(timestamp2))
# no difference in length
if diff_len == 0:
return int(timestamp1), int(timestamp2)
# length of timestamp1 > timestamp2
if diff_len > 0:
ten_times = pow(10, diff_len)
timestamp2 = int(timestamp2) * ten_times
# length of timestamp2 > timestamp1
else:
ten_times = pow(10, diff_len * -1)
timestamp1 = int(timestamp1) * ten_times
return int(timestamp1), int(timestamp2)
def change_host_group(is_post: bool,
host_group_id: Optional[str] = None,
name: Optional[str] = None,
group_type: Optional[str] = None,
description: Optional[str] = None,
assignment_rule: Optional[str] = None) -> Dict:
method = 'POST' if is_post else 'PATCH'
data = {'resources': [{
'id': host_group_id,
"name": name,
"description": description,
"group_type": group_type,
"assignment_rule": assignment_rule
}]}
response = http_request(method=method,
url_suffix='/devices/entities/host-groups/v1',
json=data)
return response
def change_host_group_members(action_name: str,
host_group_id: str,
host_ids: List[str]) -> Dict:
allowed_actions = {'add-hosts', 'remove-hosts'}
if action_name not in allowed_actions:
raise DemistoException(f'CrowdStrike Falcon error: action name should be in {allowed_actions}')
data = {'action_parameters': [{'name': 'filter',
'value': f"(device_id:{str(host_ids)})"}],
'ids': [host_group_id]}
response = http_request(method='POST',
url_suffix='/devices/entities/host-group-actions/v1',
params={'action_name': action_name},
json=data)
return response
def host_group_members(filter: Optional[str],
host_group_id: Optional[str],
limit: Optional[str],
offset: Optional[str]):
params = {'id': host_group_id,
'filter': filter,
'offset': offset,
'limit': limit}
response = http_request(method='GET',
url_suffix='/devices/combined/host-group-members/v1',
params=params)
return response
def resolve_incident(ids: List[str], status: str):
if status not in STATUS_TEXT_TO_NUM:
raise DemistoException(f'CrowdStrike Falcon Error: '
f'Status given is {status} and it is not in {STATUS_TEXT_TO_NUM.keys()}')
data = {
"action_parameters": [
{
"name": "update_status",
"value": STATUS_TEXT_TO_NUM[status]
}
],
"ids": ids
}
http_request(method='POST',
url_suffix='/incidents/entities/incident-actions/v1',
json=data)
def list_host_groups(filter: Optional[str], limit: Optional[str], offset: Optional[str]) -> Dict:
params = {'filter': filter,
'offset': offset,
'limit': limit}
response = http_request(method='GET',
url_suffix='/devices/combined/host-groups/v1',
params=params)
return response
def delete_host_groups(host_group_ids: List[str]) -> Dict:
params = {'ids': host_group_ids}
response = http_request(method='DELETE',
url_suffix='/devices/entities/host-groups/v1',
params=params)
return response
''' COMMANDS FUNCTIONS '''
def get_fetch_times_and_offset(incident_type):
last_run = demisto.getLastRun()
last_fetch_time = last_run.get(f'first_behavior_{incident_type}_time')
offset = last_run.get(f'{incident_type}_offset', 0)
if not last_fetch_time:
last_fetch_time, _ = parse_date_range(FETCH_TIME, date_format='%Y-%m-%dT%H:%M:%SZ')
prev_fetch = last_fetch_time
last_fetch_timestamp = int(parse(last_fetch_time).timestamp() * 1000)
return last_fetch_time, offset, prev_fetch, last_fetch_timestamp
def fetch_incidents():
incidents = [] # type:List
current_fetch_info = demisto.getLastRun()
fetch_incidents_or_detections = demisto.params().get('fetch_incidents_or_detections')
if 'Detections' in fetch_incidents_or_detections:
incident_type = 'detection'
last_fetch_time, offset, prev_fetch, last_fetch_timestamp = get_fetch_times_and_offset(incident_type)
fetch_query = demisto.params().get('fetch_query')
if fetch_query:
fetch_query = "created_timestamp:>'{time}'+{query}".format(time=last_fetch_time, query=fetch_query)
detections_ids = demisto.get(get_fetch_detections(filter_arg=fetch_query, offset=offset), 'resources')
else:
detections_ids = demisto.get(get_fetch_detections(last_created_timestamp=last_fetch_time, offset=offset),
'resources')
if detections_ids:
raw_res = get_detections_entities(detections_ids)
if "resources" in raw_res:
for detection in demisto.get(raw_res, "resources"):
detection['incident_type'] = incident_type
incident = detection_to_incident(detection)
incident_date = incident['occurred']
incident_date_timestamp = int(parse(incident_date).timestamp() * 1000)
# make sure that the two timestamps are in the same length
if len(str(incident_date_timestamp)) != len(str(last_fetch_timestamp)):
incident_date_timestamp, last_fetch_timestamp = timestamp_length_equalization(
incident_date_timestamp, last_fetch_timestamp)
# Update last run and add incident if the incident is newer than last fetch
if incident_date_timestamp > last_fetch_timestamp:
last_fetch_time = incident_date
last_fetch_timestamp = incident_date_timestamp
incidents.append(incident)
if len(incidents) == INCIDENTS_PER_FETCH:
current_fetch_info['first_behavior_detection_time'] = prev_fetch
current_fetch_info['detection_offset'] = offset + INCIDENTS_PER_FETCH
else:
current_fetch_info['first_behavior_detection_time'] = last_fetch_time
current_fetch_info['detection_offset'] = 0
if 'Incidents' in fetch_incidents_or_detections:
incident_type = 'incident'
last_fetch_time, offset, prev_fetch, last_fetch_timestamp = get_fetch_times_and_offset(incident_type)
last_run = demisto.getLastRun()
last_incident_fetched = last_run.get('last_fetched_incident')
new_last_incident_fetched = ''
fetch_query = demisto.params().get('incidents_fetch_query')
if fetch_query:
fetch_query = "start:>'{time}'+{query}".format(time=last_fetch_time, query=fetch_query)
incidents_ids = demisto.get(get_incidents_ids(filter_arg=fetch_query, offset=offset), 'resources')
else:
incidents_ids = demisto.get(get_incidents_ids(last_created_timestamp=last_fetch_time, offset=offset),
'resources')
if incidents_ids:
raw_res = get_incidents_entities(incidents_ids)
if "resources" in raw_res:
for incident in demisto.get(raw_res, "resources"):
incident['incident_type'] = incident_type
incident_to_context = incident_to_incident_context(incident)
incident_date = incident_to_context['occurred']
incident_date_timestamp = int(parse(incident_date).timestamp() * 1000)
# make sure that the two timestamps are in the same length
if len(str(incident_date_timestamp)) != len(str(last_fetch_timestamp)):
incident_date_timestamp, last_fetch_timestamp = timestamp_length_equalization(
incident_date_timestamp, last_fetch_timestamp)
# Update last run and add incident if the incident is newer than last fetch
if incident_date_timestamp > last_fetch_timestamp:
last_fetch_time = incident_date
last_fetch_timestamp = incident_date_timestamp
new_last_incident_fetched = incident.get('incident_id')
if last_incident_fetched != incident.get('incident_id'):
incidents.append(incident_to_context)
if len(incidents) == INCIDENTS_PER_FETCH:
current_fetch_info['first_behavior_incident_time'] = prev_fetch
current_fetch_info['incident_offset'] = offset + INCIDENTS_PER_FETCH
current_fetch_info['last_fetched_incident'] = new_last_incident_fetched
else:
current_fetch_info['first_behavior_incident_time'] = last_fetch_time
current_fetch_info['incident_offset'] = 0
current_fetch_info['last_fetched_incident'] = new_last_incident_fetched
demisto.setLastRun(current_fetch_info)
return incidents
def upload_ioc_command(ioc_type=None, value=None, policy=None, expiration_days=None,
share_level=None, description=None, source=None):
"""
:param ioc_type: The type of the indicator:
:param policy :The policy to enact when the value is detected on a host.
:param share_level: The level at which the indicator will be shared.
:param expiration_days: This represents the days the indicator should be valid for.
:param source: The source where this indicator originated.
:param description: A meaningful description of the indicator.
:param value: The string representation of the indicator.
"""
raw_res = upload_ioc(ioc_type, value, policy, expiration_days, share_level, description, source)
handle_response_errors(raw_res)
iocs = search_iocs(ids=f"{ioc_type}:{value}").get('resources')
if not iocs:
raise DemistoException("Failed to create IOC. Please try again.")
ec = [get_trasnformed_dict(iocs[0], IOC_KEY_MAP)]
enrich_ioc_dict_with_ids(ec)
return create_entry_object(contents=raw_res, ec={'CrowdStrike.IOC(val.ID === obj.ID)': ec},
hr=tableToMarkdown('Custom IOC was created successfully', ec))
def update_ioc_command(ioc_type=None, value=None, policy=None, expiration_days=None,
share_level=None, description=None, source=None):
"""
:param ioc_type: The type of the indicator:
:param policy :The policy to enact when the value is detected on a host.
:param share_level: The level at which the indicator will be shared.
:param expiration_days: This represents the days the indicator should be valid for.
:param source: The source where this indicator originated.
:param description: A meaningful description of the indicator.
:param value: The string representation of the indicator.
"""
raw_res = update_ioc(ioc_type, value, policy, expiration_days, share_level, description, source)
handle_response_errors(raw_res)
iocs = search_iocs(ids=f"{ioc_type}:{value}").get('resources')
ec = [get_trasnformed_dict(iocs[0], IOC_KEY_MAP)]
enrich_ioc_dict_with_ids(ec)
return create_entry_object(contents=raw_res, ec={'CrowdStrike.IOC(val.ID === obj.ID)': ec},
hr=tableToMarkdown('Custom IOC was created successfully', ec))
def search_iocs_command(types=None, values=None, policies=None, sources=None, from_expiration_date=None,
to_expiration_date=None, share_levels=None, limit=None, sort=None, offset=None):
"""
:param types: A list of indicator types. Separate multiple types by comma.
:param values: Comma-separated list of indicator values
:param policies: Comma-separated list of indicator policies
:param sources: Comma-separated list of IOC sources
:param from_expiration_date: Start of date range to search (YYYY-MM-DD format).
:param to_expiration_date: End of date range to search (YYYY-MM-DD format).
:param share_levels: A list of share levels. Only red is supported.
:param limit: The maximum number of records to return. The minimum is 1 and the maximum is 500. Default is 100.
:param sort: The order of the results. Format
:param offset: The offset to begin the list from
"""
raw_res = search_iocs(types=types, values=values, policies=policies, sources=sources, sort=sort, offset=offset,
expiration_from=from_expiration_date, expiration_to=to_expiration_date,
share_levels=share_levels, limit=limit)
if not raw_res:
return create_entry_object(hr='Could not find any Indicators of Compromise.')
handle_response_errors(raw_res)
iocs = raw_res.get('resources')
ec = [get_trasnformed_dict(ioc, IOC_KEY_MAP) for ioc in iocs]
enrich_ioc_dict_with_ids(ec)
return create_entry_object(contents=raw_res, ec={'CrowdStrike.IOC(val.ID === obj.ID)': ec},
hr=tableToMarkdown('Indicators of Compromise', ec))
def get_ioc_command(ioc_type: str, value: str):
"""
:param ioc_type: The type of the indicator
:param value: The IOC value to retrieve
"""
raw_res = search_iocs(ids=f"{ioc_type}:{value}")
handle_response_errors(raw_res, 'Could not find any Indicators of Compromise.')
iocs = raw_res.get('resources')
ec = [get_trasnformed_dict(ioc, IOC_KEY_MAP) for ioc in iocs]
enrich_ioc_dict_with_ids(ec)
return create_entry_object(contents=raw_res, ec={'CrowdStrike.IOC(val.ID === obj.ID)': ec},
hr=tableToMarkdown('Indicator of Compromise', ec))
def delete_ioc_command(ioc_type, value):
"""
:param ioc_type: The type of the indicator
:param value: The IOC value to delete
"""
raw_res = delete_ioc(ioc_type, value)
handle_response_errors(raw_res, "The server has not confirmed deletion, please manually confirm deletion.")
ids = f"{ioc_type}:{value}"
return create_entry_object(contents=raw_res, hr=f"Custom IOC {ids} was successfully deleted.")
def search_custom_iocs_command(
types: Optional[Union[list, str]] = None,
values: Optional[Union[list, str]] = None,
sources: Optional[Union[list, str]] = None,
expiration: Optional[str] = None,
limit: str = '50',
sort: Optional[str] = None,
offset: Optional[str] = None,
) -> dict:
"""
:param types: A list of indicator types. Separate multiple types by comma.
:param values: Comma-separated list of indicator values
:param sources: Comma-separated list of IOC sources
:param expiration: The date on which the indicator will become inactive. (YYYY-MM-DD format).
:param limit: The maximum number of records to return. The minimum is 1 and the maximum is 500. Default is 100.
:param sort: The order of the results. Format
:param offset: The offset to begin the list from
"""
raw_res = search_custom_iocs(
types=argToList(types),
values=argToList(values),
sources=argToList(sources),
sort=sort,
offset=offset,
expiration=expiration,
limit=limit,
)
iocs = raw_res.get('resources')
if not iocs:
return create_entry_object(hr='Could not find any Indicators of Compromise.')
handle_response_errors(raw_res)
ec = [get_trasnformed_dict(ioc, IOC_KEY_MAP) for ioc in iocs]
return create_entry_object(
contents=raw_res,
ec={'CrowdStrike.IOC(val.ID === obj.ID)': ec},
hr=tableToMarkdown('Indicators of Compromise', ec),
)
def get_custom_ioc_command(
ioc_type: Optional[str] = None,
value: Optional[str] = None,
ioc_id: Optional[str] = None,
) -> dict:
"""
:param ioc_type: IOC type
:param value: IOC value
:param ioc_id: IOC ID
"""
if not ioc_id and not (ioc_type and value):
raise ValueError('Either ioc_id or ioc_type and value must be provided.')
if ioc_id:
raw_res = get_custom_ioc(ioc_id)
else:
raw_res = search_custom_iocs(
types=argToList(ioc_type),
values=argToList(value),
)
iocs = raw_res.get('resources')
handle_response_errors(raw_res)
if not iocs:
return create_entry_object(hr='Could not find any Indicators of Compromise.')
ec = [get_trasnformed_dict(ioc, IOC_KEY_MAP) for ioc in iocs]
return create_entry_object(
contents=raw_res,
ec={'CrowdStrike.IOC(val.ID === obj.ID)': ec},
hr=tableToMarkdown('Indicator of Compromise', ec),
)
def upload_custom_ioc_command(
ioc_type: str,
value: str,
action: str,
platforms: str,
severity: Optional[str] = None,
source: Optional[str] = None,
description: Optional[str] = None,
expiration: Optional[str] = None,
applied_globally: Optional[bool] = None,
host_groups: Optional[List[str]] = None,
) -> dict:
"""
:param ioc_type: The type of the indicator.
:param value: The string representation of the indicator.
:param action: Action to take when a host observes the custom IOC.
:param platforms: The platforms that the indicator applies to.
:param severity: The severity level to apply to this indicator.
:param source: The source where this indicator originated.
:param description: A meaningful description of the indicator.
:param expiration: The date on which the indicator will become inactive.
:param applied_globally: Whether the indicator is applied globally.
:param host_groups: List of host group IDs that the indicator applies to.
"""
if action in {'prevent', 'detect'} and not severity:
raise ValueError(f'Severity is required for action {action}.')
raw_res = upload_custom_ioc(
ioc_type,
value,
action,
argToList(platforms),
severity,
source,
description,
expiration,
argToBoolean(applied_globally) if applied_globally else None,
argToList(host_groups),
)
handle_response_errors(raw_res)
iocs = raw_res.get('resources', [])
ec = [get_trasnformed_dict(iocs[0], IOC_KEY_MAP)]
return create_entry_object(
contents=raw_res,
ec={'CrowdStrike.IOC(val.ID === obj.ID)': ec},
hr=tableToMarkdown('Custom IOC was created successfully', ec),
)
def update_custom_ioc_command(
ioc_id: str,
action: Optional[str] = None,
platforms: Optional[str] = None,
severity: Optional[str] = None,
source: Optional[str] = None,
description: Optional[str] = None,
expiration: Optional[str] = None,
) -> dict:
"""
:param ioc_id: The ID of the indicator to update.
:param action: Action to take when a host observes the custom IOC.
:param platforms: The platforms that the indicator applies to.
:param severity: The severity level to apply to this indicator.
:param source: The source where this indicator originated.
:param description: A meaningful description of the indicator.
:param expiration: The date on which the indicator will become inactive.
"""
raw_res = update_custom_ioc(
ioc_id,
action,
argToList(platforms),
severity,
source,
description,
expiration,
)
handle_response_errors(raw_res)
iocs = raw_res.get('resources', [])
ec = [get_trasnformed_dict(iocs[0], IOC_KEY_MAP)]
return create_entry_object(
contents=raw_res,
ec={'CrowdStrike.IOC(val.ID === obj.ID)': ec},
hr=tableToMarkdown('Custom IOC was updated successfully', ec),
)
def delete_custom_ioc_command(ioc_id: str) -> dict:
"""
:param ioc_id: The ID of indicator to delete.
"""
raw_res = delete_custom_ioc(ioc_id)
handle_response_errors(raw_res, "The server has not confirmed deletion, please manually confirm deletion.")
return create_entry_object(contents=raw_res, hr=f"Custom IOC {ioc_id} was successfully deleted.")
def get_ioc_device_count_command(ioc_type: str, value: str):
"""
:param ioc_type: The type of the indicator
:param value: The IOC value
"""
raw_res = get_ioc_device_count(ioc_type, value)
if 'No results found for' in raw_res:
return raw_res
else:
handle_response_errors(raw_res)
device_count_res = raw_res.get('resources')
ioc_id = f"{ioc_type}:{value}"
if not device_count_res:
return create_entry_object(raw_res, hr=f"Could not find any devices the IOC **{ioc_id}** was detected in.")
context = [get_trasnformed_dict(device_count, IOC_DEVICE_COUNT_MAP) for device_count in device_count_res]
hr = f'Indicator of Compromise **{ioc_id}** device count: **{device_count_res[0].get("device_count")}**'
return create_entry_object(contents=raw_res, ec={'CrowdStrike.IOC(val.ID === obj.ID)': context}, hr=hr)
def get_process_details_command(ids: str):
"""
:param ids: proccess ids
"""
ids = argToList(ids)
raw_res = get_process_details(ids)
handle_response_errors(raw_res)
proc = raw_res.get('resources')
if not proc:
return create_entry_object(raw_res, hr="Could not find any searched processes.")
proc_hr_ids = str(ids)[1:-1].replace('\'', '')
title = f"Details for process{'es' if len(ids) > 1 else ''}: {proc_hr_ids}."
return create_entry_object(contents=raw_res, hr=tableToMarkdown(title, proc),
ec={'CrowdStrike.Process(val.process_id === obj.process_id)': proc})
def get_proccesses_ran_on_command(ioc_type, value, device_id):
"""
:param device_id: Device id the IOC ran on
:param ioc_type: The type of the indicator
:param value: The IOC value
"""
raw_res = get_proccesses_ran_on(ioc_type, value, device_id)
handle_response_errors(raw_res)
proc_ids = raw_res.get('resources')
ioc_id = f"{ioc_type}:{value}"
if not proc_ids:
return create_entry_object(raw_res, hr=f"Could not find any processes associated with the IOC **{ioc_id}**.")
context = {'ID': ioc_id, 'Type': ioc_type, 'Value': value, 'Process': {'DeviceID': device_id, 'ID': proc_ids}}
hr = tableToMarkdown(f"Processes with custom IOC {ioc_id} on device {device_id}.", proc_ids, headers="Process ID")
return create_entry_object(contents=raw_res, hr=hr, ec={'CrowdStrike.IOC(val.ID === obj.ID)': context})
def search_device_command():
"""
Searches for a device
:return: EntryObject of search device command
"""
raw_res = search_device()
if not raw_res:
return create_entry_object(hr='Could not find any devices.')
devices = raw_res.get('resources')
command_results = []
for single_device in devices:
status, is_isolated = generate_status_fields(single_device.get('status'))
endpoint = Common.Endpoint(
id=single_device.get('device_id'),
hostname=single_device.get('hostname'),
ip_address=single_device.get('local_ip'),
os=single_device.get('platform_name'),
os_version=single_device.get('os_version'),
status=status,
is_isolated=is_isolated,
mac_address=single_device.get('mac_address'),
vendor=INTEGRATION_NAME)
entry = get_trasnformed_dict(single_device, SEARCH_DEVICE_KEY_MAP)
headers = ['ID', 'Hostname', 'OS', 'MacAddress', 'LocalIP', 'ExternalIP', 'FirstSeen', 'LastSeen', 'Status']
command_results.append(CommandResults(
outputs_prefix='CrowdStrike.Device',
outputs_key_field='ID',
outputs=entry,
readable_output=tableToMarkdown('Devices', entry, headers=headers, headerTransform=pascalToSpace),
raw_response=raw_res,
indicator=endpoint,
))
return command_results
def search_device_by_ip(raw_res, ip_address):
devices = raw_res.get('resources')
filtered_devices = []
for single_device in devices:
if single_device.get('local_ip') == ip_address:
filtered_devices.append(single_device)
if filtered_devices:
raw_res['resources'] = filtered_devices
else:
raw_res = None
return raw_res
def generate_status_fields(endpoint_status):
status = ''
is_isolated = ''
if endpoint_status.lower() == 'normal':
status = 'Online'
elif endpoint_status == 'containment_pending':
is_isolated = 'Pending isolation'
elif endpoint_status == 'contained':
is_isolated = 'Yes'
elif endpoint_status == 'lift_containment_pending':
is_isolated = 'Pending unisolation'
else:
raise DemistoException(f'Error: Unknown endpoint status was given: {endpoint_status}')
return status, is_isolated
def generate_endpoint_by_contex_standard(devices):
standard_endpoints = []
for single_device in devices:
status, is_isolated = generate_status_fields(single_device.get('status'))
endpoint = Common.Endpoint(
id=single_device.get('device_id'),
hostname=single_device.get('hostname'),
ip_address=single_device.get('local_ip'),
os=single_device.get('platform_name'),
os_version=single_device.get('os_version'),
status=status,
is_isolated=is_isolated,
mac_address=single_device.get('mac_address'),
vendor=INTEGRATION_NAME)
standard_endpoints.append(endpoint)
return standard_endpoints
def get_endpoint_command():
args = demisto.args()
if 'id' in args.keys():
args['ids'] = args.get('id', '')
# handles the search by id or by hostname
raw_res = search_device()
if ip := args.get('ip') and raw_res:
# there is no option to filter by ip in an api call, therefore we would filter the devices in the code
raw_res = search_device_by_ip(raw_res, ip)
if not ip and not args.get('id') and not args.get('hostname'):
# in order not to return all the devices
return create_entry_object(hr='Please add a filter argument - ip, hostname or id.')
if not raw_res:
return create_entry_object(hr='Could not find any devices.')
devices = raw_res.get('resources')
standard_endpoints = generate_endpoint_by_contex_standard(devices)
command_results = []
for endpoint in standard_endpoints:
endpoint_context = endpoint.to_context().get(Common.Endpoint.CONTEXT_PATH)
hr = tableToMarkdown('CrowdStrike Falcon Endpoint', endpoint_context)
command_results.append(CommandResults(
readable_output=hr,
raw_response=raw_res,
indicator=endpoint
))
return command_results
def get_behavior_command():
"""
Gets a behavior by ID
:return: EntryObject of get behavior command
"""
behavior_id = demisto.args().get('behavior_id')
detections_ids = demisto.get(get_detections(behavior_id=behavior_id), 'resources')
raw_res = get_detections_entities(detections_ids)
entries = []
if "resources" in raw_res:
for resource in demisto.get(raw_res, "resources"):
for behavior in demisto.get(resource, 'behaviors'):
entries.append(behavior_to_entry_context(behavior))
hr = tableToMarkdown('Behavior ID: {}'.format(behavior_id), entries, headerTransform=pascalToSpace)
# no dt since behavior vary by more than their ID
ec = {'CrowdStrike.Behavior': entries}
return create_entry_object(contents=raw_res, ec=ec, hr=hr)
def search_detections_command():
"""
Searches for a detection
:return: EntryObject of search detections command
"""
d_args = demisto.args()
detections_ids = argToList(d_args.get('ids'))
if not detections_ids:
filter_arg = d_args.get('filter')
if not filter_arg:
return_error('Command Error: Please provide at least one argument.')
detections_ids = get_detections(filter_arg=filter_arg).get('resources')
raw_res = get_detections_entities(detections_ids)
entries = []
headers = ['ID', 'Status', 'System', 'ProcessStartTime', 'CustomerID', 'MaxSeverity']
if "resources" in raw_res:
for detection in demisto.get(raw_res, "resources"):
detection_entry = {}
for path, new_key in DETECTIONS_BASE_KEY_MAP.items():
detection_entry[new_key] = demisto.get(detection, path)
behaviors = []
for behavior in demisto.get(detection, 'behaviors'):
behaviors.append(behavior_to_entry_context(behavior))
detection_entry['Behavior'] = behaviors
entries.append(detection_entry)
hr = tableToMarkdown('Detections Found:', entries, headers=headers, removeNull=True, headerTransform=pascalToSpace)
ec = {'CrowdStrike.Detection(val.ID === obj.ID)': entries}
return create_entry_object(contents=raw_res, ec=ec, hr=hr)
def resolve_detection_command():
"""
Resolves single or multiple detections
:return: EntryObject of resolve detection command
"""
args = demisto.args()
ids = argToList(args.get('ids'))
username = args.get('username')
assigned_to_uuid = args.get('assigned_to_uuid')
comment = args.get('comment')
if username and assigned_to_uuid:
raise ValueError('Only one of the arguments assigned_to_uuid or username should be provided, not both.')
if username:
assigned_to_uuid = get_username_uuid(username)
status = args.get('status')
show_in_ui = args.get('show_in_ui')
if not (username or assigned_to_uuid or comment or status or show_in_ui):
raise DemistoException("Please provide at least one argument to resolve the detection with.")
raw_res = resolve_detection(ids, status, assigned_to_uuid, show_in_ui, comment)
args.pop('ids')
hr = "Detection {0} updated\n".format(str(ids)[1:-1])
hr += 'With the following values:\n'
for k, arg in args.items():
hr += '\t{name}:{val}\n'.format(name=k, val=arg)
return create_entry_object(contents=raw_res, hr=hr)
def contain_host_command():
"""
Contains hosts with user arg ids
:return: EntryObject of contain host command
"""
ids = argToList(demisto.args().get('ids'))
raw_res = contain_host(ids)
hr = "Host {} contained".format(str(ids)[1:-1])
return create_entry_object(contents=raw_res, hr=hr)
def lift_host_containment_command():
"""
Lifts containment off a host
:return: EntryObject of lift host containment
"""
ids = argToList(demisto.args().get('ids'))
raw_res = lift_host_containment(ids)
hr = "Containment has been lift off host {}".format(str(ids)[1:-1])
return create_entry_object(contents=raw_res, hr=hr)
def run_command():
args = demisto.args()
host_ids = argToList(args.get('host_ids'))
command_type = args.get('command_type')
full_command = args.get('full_command')
scope = args.get('scope', 'read')
target = args.get('target', 'batch')
output = []
if target == 'batch':
batch_id = init_rtr_batch_session(host_ids)
timer = Timer(300, batch_refresh_session, kwargs={'batch_id': batch_id})
timer.start()
try:
if scope == 'read':
response = run_batch_read_cmd(batch_id, command_type, full_command)
elif scope == 'write':
response = run_batch_write_cmd(batch_id, command_type, full_command)
else: # scope = admin
response = run_batch_admin_cmd(batch_id, command_type, full_command)
finally:
timer.cancel()
resources: dict = response.get('combined', {}).get('resources', {})
for _, resource in resources.items():
errors = resource.get('errors', [])
if errors:
error_message = errors[0].get('message', '')
if not error_message:
error_message = f'Could not run command\n{errors}'
return_error(error_message)
output.append({
'HostID': resource.get('aid'),
'SessionID': resource.get('session_id'),
'Stdout': resource.get('stdout'),
'Stderr': resource.get('stderr'),
'BaseCommand': resource.get('base_command'),
'Command': full_command
})
human_readable = tableToMarkdown(f'Command {full_command} results', output, removeNull=True)
entry_context_batch = {
'CrowdStrike': {
'Command': output
}
}
return create_entry_object(contents=response, ec=entry_context_batch, hr=human_readable)
else: # target = 'single'
responses = []
for host_id in host_ids:
if scope == 'read':
response1 = run_single_read_cmd(host_id, command_type, full_command)
elif scope == 'write':
response1 = run_single_write_cmd(host_id, command_type, full_command)
else: # scope = admin
response1 = run_single_admin_cmd(host_id, command_type, full_command)
responses.append(response1)
for resource in response1.get('resources', []):
errors = resource.get('errors', [])
if errors:
error_message = errors[0].get('message', '')
if not error_message:
error_message = f'Could not run command\n{errors}'
return_error(error_message)
output.append({
'HostID': host_id,
'TaskID': resource.get('cloud_request_id'),
'SessionID': resource.get('session_id'),
'BaseCommand': command_type,
'Command': full_command,
'Complete': False,
'NextSequenceID': 0
})
human_readable = tableToMarkdown(f'Command {full_command} results', output, removeNull=True)
entry_context_single = {
'CrowdStrike.Command(val.TaskID === obj.TaskID)': output
}
return create_entry_object(contents=responses, ec=entry_context_single, hr=human_readable)
def upload_script_command():
args = demisto.args()
name = args.get('name')
permission_type = args.get('permission_type', 'private')
content = args.get('content')
entry_id = args.get('entry_id')
if content and entry_id:
raise ValueError('Only one of the arguments entry_id or content should be provided, not both.')
elif not content and not entry_id:
raise ValueError('One of the arguments entry_id or content must be provided, none given.')
response = upload_script(name, permission_type, content, entry_id)
return create_entry_object(contents=response, hr='The script was uploaded successfully')
def get_script_command():
script_id = argToList(demisto.args().get('script_id'))
response = get_script(script_id)
resources: list = response.get('resources', [])
if resources and isinstance(resources, list):
resource = resources[0]
script = {
'ID': resource.get('id'),
'CreatedBy': resource.get('created_by'),
'CreatedTime': resource.get('created_timestamp'),
'Description': resource.get('description'),
'ModifiedBy': resource.get('modified_by'),
'ModifiedTime': resource.get('modified_timestamp'),
'Name': resource.get('name'),
'Permission': resource.get('permission_type'),
'SHA256': resource.get('sha256'),
'RunAttemptCount': resource.get('run_attempt_count'),
'RunSuccessCount': resource.get('run_success_count'),
'WriteAccess': resource.get('write_access')
}
human_readable = tableToMarkdown(f'CrowdStrike Falcon script {script_id}', script)
entry_context = {
'CrowdStrike.Script(val.ID === obj.ID)': script
}
script_content = resource.get('content')
if script_content:
demisto.results(
fileResult(
f"{resource.get('name', 'script')}.ps1",
script_content
)
)
return create_entry_object(contents=response, ec=entry_context, hr=human_readable)
else:
return 'No script found.'
def delete_script_command():
script_id = demisto.args().get('script_id')
response = delete_script(script_id)
return create_entry_object(contents=response, hr=f'Script {script_id} was deleted successfully')
def list_scripts_command():
response = list_scripts()
resources: list = response.get('resources', [])
scripts = []
for resource in resources:
scripts.append({
'ID': resource.get('id'),
'CreatedBy': resource.get('created_by'),
'CreatedTime': resource.get('created_timestamp'),
'Description': resource.get('description'),
'ModifiedBy': resource.get('modified_by'),
'ModifiedTime': resource.get('modified_timestamp'),
'Name': resource.get('name'),
'Permission': resource.get('permission_type'),
'SHA256': resource.get('sha256'),
'RunAttemptCount': resource.get('run_attempt_count'),
'RunSuccessCount': resource.get('run_success_count'),
'Platform': resource.get('platform'),
'WriteAccess': resource.get('write_access')
})
human_readable = tableToMarkdown('CrowdStrike Falcon scripts', scripts)
entry_context = {
'CrowdStrike.Script(val.ID === obj.ID)': scripts
}
return create_entry_object(contents=response, ec=entry_context, hr=human_readable)
def upload_file_command():
entry_id = demisto.args().get('entry_id')
description = demisto.args().get('description', 'File uploaded from Demisto')
response, file_name = upload_file(entry_id, description)
return create_entry_object(contents=response, hr='File was uploaded successfully')
def delete_file_command():
file_id = demisto.args().get('file_id')
response = delete_file(file_id)
return create_entry_object(contents=response, hr=f'File {file_id} was deleted successfully')
def get_file_command():
file_id = argToList(demisto.args().get('file_id'))
response = get_file(file_id)
resources: list = response.get('resources', [])
if resources and isinstance(resources, list):
# will always be a list of one resource
resource = resources[0]
file_ = {
'ID': resource.get('id'),
'CreatedBy': resource.get('created_by'),
'CreatedTime': resource.get('created_timestamp'),
'Description': resource.get('description'),
'Type': resource.get('file_type'),
'ModifiedBy': resource.get('modified_by'),
'ModifiedTime': resource.get('modified_timestamp'),
'Name': resource.get('name'),
'Permission': resource.get('permission_type'),
'SHA256': resource.get('sha256'),
}
file_standard_context = {
'Type': resource.get('file_type'),
'Name': resource.get('name'),
'SHA256': resource.get('sha256'),
'Size': resource.get('size'),
}
human_readable = tableToMarkdown(f'CrowdStrike Falcon file {file_id}', file_)
entry_context = {
'CrowdStrike.File(val.ID === obj.ID)': file_,
outputPaths['file']: file_standard_context
}
file_content = resource.get('content')
if file_content:
demisto.results(
fileResult(
resource.get('name'),
file_content
)
)
return create_entry_object(contents=response, ec=entry_context, hr=human_readable)
else:
return 'No file found.'
def list_files_command():
response = list_files()
resources: list = response.get('resources', [])
files_output = []
file_standard_context = []
for resource in resources:
files_output.append({
'ID': resource.get('id'),
'CreatedBy': resource.get('created_by'),
'CreatedTime': resource.get('created_timestamp'),
'Description': resource.get('description'),
'Type': resource.get('file_type'),
'ModifiedBy': resource.get('modified_by'),
'ModifiedTime': resource.get('modified_timestamp'),
'Name': resource.get('name'),
'Permission': resource.get('permission_type'),
'SHA256': resource.get('sha256'),
})
file_standard_context.append({
'Type': resource.get('file_type'),
'Name': resource.get('name'),
'SHA256': resource.get('sha256'),
'Size': resource.get('size'),
})
human_readable = tableToMarkdown('CrowdStrike Falcon files', files_output)
entry_context = {
'CrowdStrike.File(val.ID === obj.ID)': files_output,
outputPaths['file']: file_standard_context
}
return create_entry_object(contents=response, ec=entry_context, hr=human_readable)
def run_script_command():
args = demisto.args()
script_name = args.get('script_name')
raw = args.get('raw')
host_ids = argToList(args.get('host_ids'))
try:
timeout = int(args.get('timeout', 30))
except ValueError as e:
demisto.error(str(e))
raise ValueError('Timeout argument should be an integer, for example: 30')
if script_name and raw:
raise ValueError('Only one of the arguments script_name or raw should be provided, not both.')
elif not script_name and not raw:
raise ValueError('One of the arguments script_name or raw must be provided, none given.')
elif script_name:
full_command = f'runscript -CloudFile={script_name}'
elif raw:
full_command = f'runscript -Raw=```{raw}```'
full_command += f' -Timeout={timeout}'
command_type = 'runscript'
batch_id = init_rtr_batch_session(host_ids)
timer = Timer(300, batch_refresh_session, kwargs={'batch_id': batch_id})
timer.start()
try:
response = run_batch_admin_cmd(batch_id, command_type, full_command, timeout)
finally:
timer.cancel()
resources: dict = response.get('combined', {}).get('resources', {})
output = []
for _, resource in resources.items():
errors = resource.get('errors', [])
if errors:
error_message = errors[0].get('message', '')
if not error_message:
error_message = f'Could not run command\n{errors}'
return_error(error_message)
full_command = full_command.replace('`', '')
output.append({
'HostID': resource.get('aid'),
'SessionID': resource.get('session_id'),
'Stdout': resource.get('stdout'),
'Stderr': resource.get('stderr'),
'BaseCommand': resource.get('base_command'),
'Command': full_command
})
human_readable = tableToMarkdown(f'Command {full_command} results', output)
entry_context = {
'CrowdStrike': {
'Command': output
}
}
return create_entry_object(contents=response, ec=entry_context, hr=human_readable)
def run_get_command():
args = demisto.args()
host_ids = argToList(args.get('host_ids'))
file_path = args.get('file_path')
optional_hosts = argToList(args.get('optional_hosts'))
timeout = args.get('timeout')
timeout_duration = args.get('timeout_duration')
timeout = timeout and int(timeout)
response = run_batch_get_cmd(host_ids, file_path, optional_hosts, timeout, timeout_duration)
resources: dict = response.get('combined', {}).get('resources', {})
output = []
for _, resource in resources.items():
errors = resource.get('errors', [])
if errors:
error_message = errors[0].get('message', '')
if not error_message:
error_message = f'Could not get command\n{errors}'
return_error(error_message)
output.append({
'HostID': resource.get('aid'),
'Stdout': resource.get('stdout'),
'Stderr': resource.get('stderr'),
'BaseCommand': resource.get('base_command'),
'TaskID': resource.get('task_id'),
'GetRequestID': response.get('batch_get_cmd_req_id'),
'Complete': resource.get('complete') or False,
'FilePath': file_path
})
human_readable = tableToMarkdown(f'Get command has requested for a file {file_path}', output)
entry_context = {
'CrowdStrike.Command(val.TaskID === obj.TaskID)': output
}
return create_entry_object(contents=response, ec=entry_context, hr=human_readable)
def status_get_command():
args = demisto.args()
request_ids = argToList(args.get('request_ids'))
timeout = args.get('timeout')
timeout_duration = args.get('timeout_duration')
timeout = timeout and int(timeout)
responses = []
files_output = []
file_standard_context = []
for request_id in request_ids:
response = status_get_cmd(request_id, timeout, timeout_duration)
responses.append(response)
resources: dict = response.get('resources', {})
for _, resource in resources.items():
errors = resource.get('errors', [])
if errors:
error_message = errors[0].get('message', '')
if not error_message:
error_message = f'Could not get command\n{errors}'
return_error(error_message)
files_output.append({
'ID': resource.get('id'),
'TaskID': resource.get('cloud_request_id'),
'CreatedAt': resource.get('created_at'),
'DeletedAt': resource.get('deleted_at'),
'UpdatedAt': resource.get('updated_at'),
'Name': resource.get('name'),
'Size': resource.get('size'),
'SHA256': resource.get('sha256')
})
file_standard_context.append({
'Name': resource.get('name'),
'SHA256': resource.get('sha256'),
'Size': resource.get('size'),
})
human_readable = tableToMarkdown('CrowdStrike Falcon files', files_output)
entry_context = {
'CrowdStrike.File(val.ID === obj.ID || val.TaskID === obj.TaskID)': files_output,
outputPaths['file']: file_standard_context
}
if len(responses) == 1:
return create_entry_object(contents=responses[0], ec=entry_context, hr=human_readable)
else:
return create_entry_object(contents=response, ec=entry_context, hr=human_readable)
def status_command():
args = demisto.args()
request_id = args.get('request_id')
sequence_id = args.get('sequence_id')
scope = args.get('scope', 'read')
sequence_id = None if sequence_id is None else int(sequence_id)
if scope == 'read':
response = status_read_cmd(request_id, sequence_id)
elif scope == 'write':
response = status_write_cmd(request_id, sequence_id)
else: # scope = admin
response = status_admin_cmd(request_id, sequence_id)
resources: list = response.get('resources', [])
output = []
for resource in resources:
errors = resource.get('errors', [])
if errors:
error_message = errors[0].get('message', '')
if not error_message:
error_message = f'Could not run command\n{errors}'
return_error(error_message)
sequence_id = int(resource.get('sequence_id', 0))
output.append({
'Complete': resource.get('complete') or False,
'Stdout': resource.get('stdout'),
'Stderr': resource.get('stderr'),
'BaseCommand': resource.get('base_command'),
'TaskID': resource.get('task_id'),
'SequenceID': sequence_id,
'NextSequenceID': sequence_id + 1
})
human_readable = tableToMarkdown('Command status results', output, removeNull=True)
entry_context = {
'CrowdStrike.Command(val.TaskID === obj.TaskID)': output
}
return create_entry_object(contents=response, ec=entry_context, hr=human_readable)
def get_extracted_file_command():
args = demisto.args()
host_id = args.get('host_id')
sha256 = args.get('sha256')
filename = args.get('filename')
response = get_extracted_file(host_id, sha256, filename)
# save an extracted file
content_type = response.headers.get('Content-Type', '').lower()
if content_type == 'application/x-7z-compressed':
content_disposition = response.headers.get('Content-Disposition', '').lower()
if content_disposition:
filename = email.message_from_string(f'Content-Disposition: {content_disposition}\n\n').get_filename()
if not filename:
sha256 = sha256 or hashlib.sha256(response.content).hexdigest()
filename = sha256.lower() + '.7z'
return fileResult(filename, response.content)
return_error('An extracted file is missing in the response')
def list_host_files_command():
args = demisto.args()
host_id = args.get('host_id')
response = list_host_files(host_id)
resources: list = response.get('resources', [])
files_output = []
file_standard_context = []
command_output = []
for resource in resources:
errors = resource.get('errors', [])
if errors:
error_message = errors[0].get('message', '')
if not error_message:
error_message = f'Could not run command\n{errors}'
return_error(error_message)
command_output.append({
'HostID': host_id,
'TaskID': resource.get('cloud_request_id'),
'SessionID': resource.get('session_id')
})
files_output.append({
'ID': resource.get('id'),
'CreatedAt': resource.get('created_at'),
'DeletedAt': resource.get('deleted_at'),
'UpdatedAt': resource.get('updated_at'),
'Name': resource.get('name'),
'SHA256': resource.get('sha256'),
'Size': resource.get('size'),
'Stdout': resource.get('stdout'),
'Stderr': resource.get('stderr')
})
file_standard_context.append({
'Name': resource.get('name'),
'SHA256': resource.get('sha256'),
'Size': resource.get('size'),
})
if files_output:
human_readable = tableToMarkdown('CrowdStrike Falcon files', files_output)
else:
human_readable = 'No result found'
entry_context = {
'CrowdStrike.Command(val.TaskID === obj.TaskID)': command_output,
'CrowdStrike.File(val.ID === obj.ID)': files_output,
outputPaths['file']: file_standard_context
}
return create_entry_object(contents=response, ec=entry_context, hr=human_readable)
def refresh_session_command():
args = demisto.args()
host_id = args.get('host_id')
response = refresh_session(host_id)
resources: list = response.get('resources', [])
session_id = None
for resource in resources:
errors = resource.get('errors', [])
if errors:
error_message = errors[0].get('message', '')
if not error_message:
error_message = f'Could not run command\n{errors}'
return_error(error_message)
session_id = resource.get('session_id')
return create_entry_object(contents=response, hr=f'CrowdStrike Session Refreshed: {session_id}')
def build_error_message(raw_res):
if raw_res.get('errors'):
error_data = raw_res.get('errors')[0]
else:
error_data = {"code": 'None', "message": 'something got wrong, please try again'}
error_code = error_data.get('code')
error_message = error_data.get('message')
return f'Error: error code: {error_code}, error_message: {error_message}.'
def validate_response(raw_res):
return 'resources' in raw_res.keys()
def get_indicator_device_id():
args = demisto.args()
ioc_type = args.get('type')
ioc_value = args.get('value')
params = assign_params(
type=ioc_type,
value=ioc_value
)
raw_res = http_request('GET', '/indicators/queries/devices/v1', params=params, status_code=404)
errors = raw_res.get('errors', [])
for error in errors:
if error.get('code') == 404:
return f'No results found for {ioc_type} - {ioc_value}'
context_output = ''
if validate_response(raw_res):
context_output = raw_res.get('resources')
else:
error_message = build_error_message(raw_res)
return_error(error_message)
ioc_id = f"{ioc_type}:{ioc_value}"
readable_output = tableToMarkdown(f"Devices that encountered the IOC {ioc_id}", context_output, headers='Device ID')
return CommandResults(
readable_output=readable_output,
outputs_prefix='CrowdStrike.DeviceID',
outputs_key_field='DeviceID',
outputs=context_output,
raw_response=raw_res
)
def detections_to_human_readable(detections):
detections_readable_outputs = []
for detection in detections:
readable_output = assign_params(status=detection.get('status'),
max_severity=detection.get('max_severity_displayname'),
detection_id=detection.get('detection_id'),
created_time=detection.get('created_timestamp'))
detections_readable_outputs.append(readable_output)
headers = ['detection_id', 'created_time', 'status', 'max_severity']
human_readable = tableToMarkdown('CrowdStrike Detections', detections_readable_outputs, headers, removeNull=True)
return human_readable
def list_detection_summaries_command():
args = demisto.args()
fetch_query = args.get('fetch_query')
args_ids = args.get('ids')
if args_ids:
detections_ids = argToList(args_ids)
elif fetch_query:
fetch_query = "{query}".format(query=fetch_query)
detections_ids = demisto.get(get_fetch_detections(filter_arg=fetch_query), 'resources')
else:
detections_ids = demisto.get(get_fetch_detections(), 'resources')
detections_response_data = get_detections_entities(detections_ids)
detections = [resource for resource in detections_response_data.get('resources')]
detections_human_readable = detections_to_human_readable(detections)
return CommandResults(
readable_output=detections_human_readable,
outputs_prefix='CrowdStrike.Detections',
outputs_key_field='detection_id',
outputs=detections
)
def incidents_to_human_readable(incidents):
incidents_readable_outputs = []
for incident in incidents:
readable_output = assign_params(description=incident.get('description'), state=incident.get('state'),
name=incident.get('name'), tags=incident.get('tags'),
incident_id=incident.get('incident_id'), created_time=incident.get('created'),
status=STATUS_NUM_TO_TEXT.get(incident.get('status')))
incidents_readable_outputs.append(readable_output)
headers = ['incident_id', 'created_time', 'name', 'description', 'status', 'state', 'tags']
human_readable = tableToMarkdown('CrowdStrike Incidents', incidents_readable_outputs, headers, removeNull=True)
return human_readable
def list_incident_summaries_command():
args = demisto.args()
fetch_query = args.get('fetch_query')
args_ids = args.get('ids')
if args_ids:
ids = argToList(args_ids)
else:
if fetch_query:
fetch_query = "{query}".format(query=fetch_query)
incidents_ids = get_incidents_ids(filter_arg=fetch_query)
else:
incidents_ids = get_incidents_ids()
handle_response_errors(incidents_ids)
ids = incidents_ids.get('resources')
if not ids:
return CommandResults(readable_output='No incidents were found.')
incidents_response_data = get_incidents_entities(ids)
incidents = [resource for resource in incidents_response_data.get('resources')]
incidents_human_readable = incidents_to_human_readable(incidents)
return CommandResults(
readable_output=incidents_human_readable,
outputs_prefix='CrowdStrike.Incidents',
outputs_key_field='incident_id',
outputs=incidents
)
def create_host_group_command(name: str,
group_type: str = None,
description: str = None,
assignment_rule: str = None) -> CommandResults:
response = change_host_group(is_post=True,
name=name,
group_type=group_type,
description=description,
assignment_rule=assignment_rule)
host_groups = response.get('resources')
return CommandResults(outputs_prefix='CrowdStrike.HostGroup',
outputs_key_field='id',
outputs=host_groups,
readable_output=tableToMarkdown('Host Groups', host_groups, headers=HOST_GROUP_HEADERS),
raw_response=response)
def update_host_group_command(host_group_id: str,
name: Optional[str] = None,
description: Optional[str] = None,
assignment_rule: Optional[str] = None) -> CommandResults:
response = change_host_group(is_post=False,
host_group_id=host_group_id,
name=name,
description=description,
assignment_rule=assignment_rule)
host_groups = response.get('resources')
return CommandResults(outputs_prefix='CrowdStrike.HostGroup',
outputs_key_field='id',
outputs=host_groups,
readable_output=tableToMarkdown('Host Groups', host_groups, headers=HOST_GROUP_HEADERS),
raw_response=response)
def list_host_group_members_command(host_group_id: Optional[str] = None,
filter: Optional[str] = None,
offset: Optional[str] = None,
limit: Optional[str] = None) -> CommandResults:
response = host_group_members(filter, host_group_id, limit, offset)
devices = response.get('resources')
if not devices:
return CommandResults(readable_output='No hosts are found',
raw_response=response)
headers = list(SEARCH_DEVICE_KEY_MAP.values())
outputs = [get_trasnformed_dict(single_device, SEARCH_DEVICE_KEY_MAP) for single_device in devices]
return CommandResults(
outputs_prefix='CrowdStrike.Device',
outputs_key_field='ID',
outputs=outputs,
readable_output=tableToMarkdown('Devices', outputs, headers=headers, headerTransform=pascalToSpace),
raw_response=response
)
def add_host_group_members_command(host_group_id: str, host_ids: List[str]) -> CommandResults:
response = change_host_group_members(action_name='add-hosts',
host_group_id=host_group_id,
host_ids=host_ids)
host_groups = response.get('resources')
return CommandResults(outputs_prefix='CrowdStrike.HostGroup',
outputs_key_field='id',
outputs=host_groups,
readable_output=tableToMarkdown('Host Groups', host_groups, headers=HOST_GROUP_HEADERS),
raw_response=response)
def remove_host_group_members_command(host_group_id: str, host_ids: List[str]) -> CommandResults:
response = change_host_group_members(action_name='remove-hosts',
host_group_id=host_group_id,
host_ids=host_ids)
host_groups = response.get('resources')
return CommandResults(outputs_prefix='CrowdStrike.HostGroup',
outputs_key_field='id',
outputs=host_groups,
readable_output=tableToMarkdown('Host Groups', host_groups, headers=HOST_GROUP_HEADERS),
raw_response=response)
def resolve_incident_command(ids: List[str], status: str):
resolve_incident(ids, status)
readable = '\n'.join([f'{incident_id} changed successfully to {status}' for incident_id in ids])
return CommandResults(readable_output=readable)
def list_host_groups_command(filter: Optional[str] = None, offset: Optional[str] = None, limit: Optional[str] = None) \
-> CommandResults:
response = list_host_groups(filter, limit, offset)
host_groups = response.get('resources')
return CommandResults(outputs_prefix='CrowdStrike.HostGroup',
outputs_key_field='id',
outputs=host_groups,
readable_output=tableToMarkdown('Host Groups', host_groups, headers=HOST_GROUP_HEADERS),
raw_response=response)
def delete_host_groups_command(host_group_ids: List[str]) -> CommandResults:
response = delete_host_groups(host_group_ids)
deleted_ids = response.get('resources')
readable = '\n'.join([f'Host groups {host_group_id} deleted successfully' for host_group_id in deleted_ids]) \
if deleted_ids else f'Host groups {host_group_ids} are not deleted'
return CommandResults(readable_output=readable,
raw_response=response)
def test_module():
try:
get_token(new_token=True)
except ValueError:
return 'Connection Error: The URL or The API key you entered is probably incorrect, please try again.'
if demisto.params().get('isFetch'):
try:
fetch_incidents()
except ValueError:
return 'Error: Something is wrong with the filters you entered for the fetch incident, please try again.'
return 'ok'
''' COMMANDS MANAGER / SWITCH PANEL '''
LOG('Command being called is {}'.format(demisto.command()))
def main():
command = demisto.command()
args = demisto.args()
try:
if command == 'test-module':
result = test_module()
return_results(result)
elif command == 'fetch-incidents':
demisto.incidents(fetch_incidents())
elif command in ('cs-device-ran-on', 'cs-falcon-device-ran-on'):
return_results(get_indicator_device_id())
elif demisto.command() == 'cs-falcon-search-device':
return_results(search_device_command())
elif command == 'cs-falcon-get-behavior':
demisto.results(get_behavior_command())
elif command == 'cs-falcon-search-detection':
demisto.results(search_detections_command())
elif command == 'cs-falcon-resolve-detection':
demisto.results(resolve_detection_command())
elif command == 'cs-falcon-contain-host':
demisto.results(contain_host_command())
elif command == 'cs-falcon-lift-host-containment':
demisto.results(lift_host_containment_command())
elif command == 'cs-falcon-run-command':
demisto.results(run_command())
elif command == 'cs-falcon-upload-script':
demisto.results(upload_script_command())
elif command == 'cs-falcon-get-script':
demisto.results(get_script_command())
elif command == 'cs-falcon-delete-script':
demisto.results(delete_script_command())
elif command == 'cs-falcon-list-scripts':
demisto.results(list_scripts_command())
elif command == 'cs-falcon-upload-file':
demisto.results(upload_file_command())
elif command == 'cs-falcon-delete-file':
demisto.results(delete_file_command())
elif command == 'cs-falcon-get-file':
demisto.results(get_file_command())
elif command == 'cs-falcon-list-files':
demisto.results(list_files_command())
elif command == 'cs-falcon-run-script':
demisto.results(run_script_command())
elif command == 'cs-falcon-run-get-command':
demisto.results(run_get_command())
elif command == 'cs-falcon-status-get-command':
demisto.results(status_get_command())
elif command == 'cs-falcon-status-command':
demisto.results(status_command())
elif command == 'cs-falcon-get-extracted-file':
demisto.results(get_extracted_file_command())
elif command == 'cs-falcon-list-host-files':
demisto.results(list_host_files_command())
elif command == 'cs-falcon-refresh-session':
demisto.results(refresh_session_command())
elif command == 'cs-falcon-list-detection-summaries':
return_results(list_detection_summaries_command())
elif command == 'cs-falcon-list-incident-summaries':
return_results(list_incident_summaries_command())
elif command == 'cs-falcon-search-iocs':
return_results(search_iocs_command(**args))
elif command == 'cs-falcon-get-ioc':
return_results(get_ioc_command(ioc_type=args.get('type'), value=args.get('value')))
elif command == 'cs-falcon-upload-ioc':
return_results(upload_ioc_command(**args))
elif command == 'cs-falcon-update-ioc':
return_results(update_ioc_command(**args))
elif command == 'cs-falcon-delete-ioc':
return_results(delete_ioc_command(ioc_type=args.get('type'), value=args.get('value')))
elif command == 'cs-falcon-search-custom-iocs':
return_results(search_custom_iocs_command(**args))
elif command == 'cs-falcon-get-custom-ioc':
return_results(get_custom_ioc_command(
ioc_type=args.get('type'), value=args.get('value'), ioc_id=args.get('ioc_id')))
elif command == 'cs-falcon-upload-custom-ioc':
return_results(upload_custom_ioc_command(**args))
elif command == 'cs-falcon-update-custom-ioc':
return_results(update_custom_ioc_command(**args))
elif command == 'cs-falcon-delete-custom-ioc':
return_results(delete_custom_ioc_command(ioc_id=args.get('ioc_id')))
elif command == 'cs-falcon-device-count-ioc':
return_results(get_ioc_device_count_command(ioc_type=args.get('type'), value=args.get('value')))
elif command == 'cs-falcon-process-details':
return_results(get_process_details_command(**args))
elif command == 'cs-falcon-processes-ran-on':
return_results(
get_proccesses_ran_on_command(
ioc_type=args.get('type'),
value=args.get('value'),
device_id=args.get('device_id')
)
)
elif command == 'endpoint':
return_results(get_endpoint_command())
elif command == 'cs-falcon-create-host-group':
return_results(create_host_group_command(**args))
elif command == 'cs-falcon-update-host-group':
return_results(update_host_group_command(**args))
elif command == 'cs-falcon-list-host-groups':
return_results(list_host_groups_command(**args))
elif command == 'cs-falcon-delete-host-groups':
return_results(delete_host_groups_command(host_group_ids=argToList(args.get('host_group_id'))))
elif command == 'cs-falcon-list-host-group-members':
return_results(list_host_group_members_command(**args))
elif command == 'cs-falcon-add-host-group-members':
return_results(add_host_group_members_command(host_group_id=args.get('host_group_id'),
host_ids=argToList(args.get('host_ids'))))
elif command == 'cs-falcon-remove-host-group-members':
return_results(remove_host_group_members_command(host_group_id=args.get('host_group_id'),
host_ids=argToList(args.get('host_ids'))))
elif command == 'cs-falcon-resolve-incident':
return_results(resolve_incident_command(status=args.get('status'),
ids=argToList(args.get('ids'))))
else:
raise NotImplementedError(f'CrowdStrike Falcon error: '
f'command {command} is not implemented')
except Exception as e:
return_error(str(e))
if __name__ in ('__main__', 'builtin', 'builtins'):
main()
|
import pyblish.api
class CollectAnimatedOutputs(pyblish.api.InstancePlugin):
"""Collect transform animated nodes
This only collect and extract animated transform nodes,
shape node will not be included.
"""
order = pyblish.api.CollectorOrder + 0.2
label = "Collect Animated Outputs"
hosts = ["maya"]
families = [
"reveries.animation",
]
def process(self, instance):
import maya.cmds as cmds
from reveries.maya import lib, pipeline
variant = instance.data["subset"][len("animation"):].lower()
members = instance[:]
# Re-Create instances
context = instance.context
context.remove(instance)
source_data = instance.data
ANIM_SET = "ControlSet"
out_cache = dict()
if variant == "default":
# Collect animatable nodes from ControlSet of loaded subset
out_sets = list()
for node in cmds.ls(members, type="transform"):
try:
# Must be containerized subset group node
pipeline.get_container_from_group(node)
except AssertionError:
continue
namespace = lib.get_ns(node)
out_sets += cmds.ls("%s:*%s" % (namespace, ANIM_SET),
sets=True)
for node in out_sets:
name = node.rsplit(":", 1)[-1][:-len(ANIM_SET)] or "Default"
namespace = lib.get_ns(node)
animatables = cmds.ls(cmds.sets(node, query=True),
type="transform")
key = (namespace, name)
self.log.info("%s, %s" % key)
if not animatables:
self.log.warning("No animatable (e.g. controllers) been "
"found in '%s', skipping.." % node)
continue
out_cache[key] = animatables
else:
# Collect animatable nodes from instance member
for node in cmds.ls(members, type="transform"):
namespace = lib.get_ns(node)
try:
# Must be containerized
pipeline.get_container_from_namespace(namespace)
except RuntimeError:
continue
key = (namespace, variant)
if key not in out_cache:
self.log.info("%s, %s" % key)
out_cache[key] = list()
out_cache[key].append(node)
for (namespace, name), animatables in sorted(out_cache.items()):
container = pipeline.get_container_from_namespace(namespace)
asset_id = cmds.getAttr(container + ".assetId")
fixed_namespace = namespace[1:] # Remove root ":"
# For filesystem, remove other ":" if the namespace is nested
fixed_namespace = fixed_namespace.replace(":", "._.")
subset = ".".join(["animation",
fixed_namespace,
name])
instance = context.create_instance(subset)
instance.data.update(source_data)
instance.data["subset"] = subset
instance[:] = animatables
instance.data["outAnim"] = animatables
instance.data["animatedNamespace"] = namespace
instance.data["animatedAssetId"] = asset_id
# (NOTE) Although we put those animatable nodes to validate
# AvalonUUID existence, but currently AvalonUUID is
# not needed on load.
instance.data["requireAvalonUUID"] = animatables
|
# Copyright (C) 2017-2018 GIG Technology NV and Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from unittest import mock
from .generated import daemon_pb2_grpc as stubs
from .generated import daemon_pb2 as model
from .metadata import Metadata
class TestMetadataClient(unittest.TestCase):
def setUp(self):
with mock.patch.object(stubs, 'MetadataServiceStub') as m:
m.side_effect = mock.MagicMock()
self.client = Metadata(None)
def test_created(self):
self.assertIsNotNone(self.client)
self.assertIsInstance(self.client._stub, mock.MagicMock)
def test_set(self):
key, total_size, chunks, creation_epoch, last_write_epoch =\
b'key', mock.MagicMock(), mock.MagicMock(), mock.MagicMock(), mock.MagicMock()
self.client.set(key, total_size, chunks, creation_epoch, last_write_epoch)
self.client._stub.SetMetadata.assert_called_once_with(
model.SetMetadataRequest(
metadata=model.Metadata(
key=key,
creationEpoch=creation_epoch,
lastWriteEpoch=last_write_epoch,
totalSize=total_size,
chunks=chunks,
)
)
)
def test_get(self):
key = b'key'
obj = mock.MagicMock()
self.client._stub.GetMetadata.return_value = obj
result = self.client.get(key)
self.client._stub.GetMetadata.assert_called_once_with(
model.GetMetadataRequest(
key=key,
)
)
self.assertEqual(obj.metadata, result)
def test_delete(self):
key = b'key'
self.client.delete(key)
self.client._stub.DeleteMetadata.assert_called_once_with(
model.DeleteMetadataRequest(
key=key,
)
)
def test_list_keys(self):
self.client.list_keys()
self.client._stub.ListKeys.assert_called_once_with(
model.ListMetadataKeysRequest()
)
|
# Download the Python helper library from twilio.com/docs/python/install
from twilio.rest.ip_messaging import TwilioIpMessagingClient
# Your Account Sid and Auth Token from twilio.com/user/account
account = "ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
token = "your_auth_token"
client = TwilioIpMessagingClient(account, token)
credential = client.credentials.get("CRXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
print(credential)
|
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# alias because we already had an option named argparse
import argparse as argparse_mod
import collections
import copy
import json
import os
import re
import sys
import warnings
import appdirs
from keystoneauth1 import adapter
from keystoneauth1 import loading
import yaml
from openstack import _log
from openstack.config import _util
from openstack.config import cloud_region
from openstack.config import defaults
from openstack.config import vendors
from openstack import exceptions
APPDIRS = appdirs.AppDirs('openstack', 'OpenStack', multipath='/etc')
CONFIG_HOME = APPDIRS.user_config_dir
CACHE_PATH = APPDIRS.user_cache_dir
UNIX_CONFIG_HOME = os.path.join(
os.path.expanduser(os.path.join('~', '.config')), 'openstack')
UNIX_SITE_CONFIG_HOME = '/etc/openstack'
SITE_CONFIG_HOME = APPDIRS.site_config_dir
CONFIG_SEARCH_PATH = [
os.getcwd(),
CONFIG_HOME, UNIX_CONFIG_HOME,
SITE_CONFIG_HOME, UNIX_SITE_CONFIG_HOME
]
YAML_SUFFIXES = ('.yaml', '.yml')
JSON_SUFFIXES = ('.json',)
CONFIG_FILES = [
os.path.join(d, 'clouds' + s)
for d in CONFIG_SEARCH_PATH
for s in YAML_SUFFIXES + JSON_SUFFIXES
]
SECURE_FILES = [
os.path.join(d, 'secure' + s)
for d in CONFIG_SEARCH_PATH
for s in YAML_SUFFIXES + JSON_SUFFIXES
]
VENDOR_FILES = [
os.path.join(d, 'clouds-public' + s)
for d in CONFIG_SEARCH_PATH
for s in YAML_SUFFIXES + JSON_SUFFIXES
]
BOOL_KEYS = ('insecure', 'cache')
FORMAT_EXCLUSIONS = frozenset(['password'])
def get_boolean(value):
if value is None:
return False
if type(value) is bool:
return value
if value.lower() == 'true':
return True
return False
def _auth_update(old_dict, new_dict_source):
"""Like dict.update, except handling the nested dict called auth."""
new_dict = copy.deepcopy(new_dict_source)
for (k, v) in new_dict.items():
if k == 'auth':
if k in old_dict:
old_dict[k].update(v)
else:
old_dict[k] = v.copy()
else:
old_dict[k] = v
return old_dict
def _fix_argv(argv):
# Transform any _ characters in arg names to - so that we don't
# have to throw billions of compat argparse arguments around all
# over the place.
processed = collections.defaultdict(list)
for index in range(0, len(argv)):
# If the value starts with '--' and has '-' or '_' in it, then
# it's worth looking at it
if re.match('^--.*(_|-)+.*', argv[index]):
split_args = argv[index].split('=')
orig = split_args[0]
new = orig.replace('_', '-')
if orig != new:
split_args[0] = new
argv[index] = "=".join(split_args)
# Save both for later so we can throw an error about dupes
processed[new].append(orig)
overlap = []
for new, old in processed.items():
if len(old) > 1:
overlap.extend(old)
if overlap:
raise exceptions.ConfigException(
"The following options were given: '{options}' which contain"
" duplicates except that one has _ and one has -. There is"
" no sane way for us to know what you're doing. Remove the"
" duplicate option and try again".format(
options=','.join(overlap)))
class OpenStackConfig(object):
# These two attribute are to allow os-client-config to plumb in its
# local versions for backwards compat.
# They should not be used by anyone else.
_cloud_region_class = cloud_region.CloudRegion
_defaults_module = defaults
def __init__(self, config_files=None, vendor_files=None,
override_defaults=None, force_ipv4=None,
envvar_prefix=None, secure_files=None,
pw_func=None, session_constructor=None,
app_name=None, app_version=None,
load_yaml_config=True, load_envvars=True):
self.log = _log.setup_logging('openstack.config')
self._session_constructor = session_constructor
self._app_name = app_name
self._app_version = app_version
self._load_envvars = load_envvars
if load_yaml_config:
self._config_files = config_files or CONFIG_FILES
self._secure_files = secure_files or SECURE_FILES
self._vendor_files = vendor_files or VENDOR_FILES
else:
self._config_files = []
self._secure_files = []
self._vendor_files = []
config_file_override = self._get_envvar('OS_CLIENT_CONFIG_FILE')
if config_file_override:
self._config_files.insert(0, config_file_override)
secure_file_override = self._get_envvar('OS_CLIENT_SECURE_FILE')
if secure_file_override:
self._secure_files.insert(0, secure_file_override)
self.defaults = self._defaults_module.get_defaults()
if override_defaults:
self.defaults.update(override_defaults)
# First, use a config file if it exists where expected
self.config_filename, self.cloud_config = self._load_config_file()
_, secure_config = self._load_secure_file()
if secure_config:
self.cloud_config = _util.merge_clouds(
self.cloud_config, secure_config)
if not self.cloud_config:
self.cloud_config = {'clouds': {}}
if 'clouds' not in self.cloud_config:
self.cloud_config['clouds'] = {}
# Save the other config
self.extra_config = copy.deepcopy(self.cloud_config)
self.extra_config.pop('clouds', None)
# Grab ipv6 preference settings from env
client_config = self.cloud_config.get('client', {})
if force_ipv4 is not None:
# If it's passed in to the constructor, honor it.
self.force_ipv4 = force_ipv4
else:
# Get the backwards compat value
prefer_ipv6 = get_boolean(
self._get_envvar(
'OS_PREFER_IPV6', client_config.get(
'prefer_ipv6', client_config.get(
'prefer-ipv6', True))))
force_ipv4 = get_boolean(
self._get_envvar(
'OS_FORCE_IPV4', client_config.get(
'force_ipv4', client_config.get(
'broken-ipv6', False))))
self.force_ipv4 = force_ipv4
if not prefer_ipv6:
# this will only be false if someone set it explicitly
# honor their wishes
self.force_ipv4 = True
# Next, process environment variables and add them to the mix
self.envvar_key = self._get_envvar('OS_CLOUD_NAME', 'envvars')
if self.envvar_key in self.cloud_config['clouds']:
raise exceptions.ConfigException(
'"{0}" defines a cloud named "{1}", but'
' OS_CLOUD_NAME is also set to "{1}". Please rename'
' either your environment based cloud, or one of your'
' file-based clouds.'.format(self.config_filename,
self.envvar_key))
self.default_cloud = self._get_envvar('OS_CLOUD')
if load_envvars:
envvars = self._get_os_environ(envvar_prefix=envvar_prefix)
if envvars:
self.cloud_config['clouds'][self.envvar_key] = envvars
if not self.default_cloud:
self.default_cloud = self.envvar_key
if not self.default_cloud and self.cloud_config['clouds']:
if len(self.cloud_config['clouds'].keys()) == 1:
# If there is only one cloud just use it. This matches envvars
# behavior and allows for much less typing.
# TODO(mordred) allow someone to mark a cloud as "default" in
# clouds.yaml.
# The next/iter thing is for python3 compat where dict.keys
# returns an iterator but in python2 it's a list.
self.default_cloud = next(iter(
self.cloud_config['clouds'].keys()))
# Finally, fall through and make a cloud that starts with defaults
# because we need somewhere to put arguments, and there are neither
# config files or env vars
if not self.cloud_config['clouds']:
self.cloud_config = dict(
clouds=dict(defaults=dict(self.defaults)))
self.default_cloud = 'defaults'
self._cache_expiration_time = 0
self._cache_path = CACHE_PATH
self._cache_class = 'dogpile.cache.null'
self._cache_arguments = {}
self._cache_expirations = {}
if 'cache' in self.cloud_config:
cache_settings = _util.normalize_keys(self.cloud_config['cache'])
# expiration_time used to be 'max_age' but the dogpile setting
# is expiration_time. Support max_age for backwards compat.
self._cache_expiration_time = cache_settings.get(
'expiration_time', cache_settings.get(
'max_age', self._cache_expiration_time))
# If cache class is given, use that. If not, but if cache time
# is given, default to memory. Otherwise, default to nothing.
# to memory.
if self._cache_expiration_time:
self._cache_class = 'dogpile.cache.memory'
self._cache_class = self.cloud_config['cache'].get(
'class', self._cache_class)
self._cache_path = os.path.expanduser(
cache_settings.get('path', self._cache_path))
self._cache_arguments = cache_settings.get(
'arguments', self._cache_arguments)
self._cache_expirations = cache_settings.get(
'expiration', self._cache_expirations)
# Flag location to hold the peeked value of an argparse timeout value
self._argv_timeout = False
# Save the password callback
# password = self._pw_callback(prompt="Password: ")
self._pw_callback = pw_func
def _get_os_environ(self, envvar_prefix=None):
ret = self._defaults_module.get_defaults()
if not envvar_prefix:
# This makes the or below be OS_ or OS_ which is a no-op
envvar_prefix = 'OS_'
environkeys = [k for k in os.environ.keys()
if (k.startswith('OS_') or k.startswith(envvar_prefix))
and not k.startswith('OS_TEST') # infra CI var
and not k.startswith('OS_STD') # oslotest var
and not k.startswith('OS_LOG') # oslotest var
]
for k in environkeys:
newkey = k.split('_', 1)[-1].lower()
ret[newkey] = os.environ[k]
# If the only environ keys are selectors or behavior modification,
# don't return anything
selectors = set([
'OS_CLOUD', 'OS_REGION_NAME',
'OS_CLIENT_CONFIG_FILE', 'OS_CLIENT_SECURE_FILE', 'OS_CLOUD_NAME'])
if set(environkeys) - selectors:
return ret
return None
def _get_envvar(self, key, default=None):
if not self._load_envvars:
return default
return os.environ.get(key, default)
def get_extra_config(self, key, defaults=None):
"""Fetch an arbitrary extra chunk of config, laying in defaults.
:param string key: name of the config section to fetch
:param dict defaults: (optional) default values to merge under the
found config
"""
defaults = _util.normalize_keys(defaults or {})
if not key:
return defaults
return _util.merge_clouds(
defaults,
_util.normalize_keys(self.cloud_config.get(key, {})))
def _load_config_file(self):
return self._load_yaml_json_file(self._config_files)
def _load_secure_file(self):
return self._load_yaml_json_file(self._secure_files)
def _load_vendor_file(self):
return self._load_yaml_json_file(self._vendor_files)
def _load_yaml_json_file(self, filelist):
for path in filelist:
if os.path.exists(path):
with open(path, 'r') as f:
if path.endswith('json'):
return path, json.load(f)
else:
return path, yaml.safe_load(f)
return (None, {})
def _expand_region_name(self, region_name):
return {'name': region_name, 'values': {}}
def _expand_regions(self, regions):
ret = []
for region in regions:
if isinstance(region, dict):
ret.append(copy.deepcopy(region))
else:
ret.append(self._expand_region_name(region))
return ret
def _get_regions(self, cloud):
if cloud not in self.cloud_config['clouds']:
return [self._expand_region_name('')]
regions = self._get_known_regions(cloud)
if not regions:
# We don't know of any regions use a workable default.
regions = [self._expand_region_name('')]
return regions
def _get_known_regions(self, cloud):
config = _util.normalize_keys(self.cloud_config['clouds'][cloud])
if 'regions' in config:
return self._expand_regions(config['regions'])
elif 'region_name' in config:
if isinstance(config['region_name'], list):
regions = config['region_name']
else:
regions = config['region_name'].split(',')
if len(regions) > 1:
warnings.warn(
"Comma separated lists in region_name are deprecated."
" Please use a yaml list in the regions"
" parameter in {0} instead.".format(self.config_filename))
return self._expand_regions(regions)
else:
# crappit. we don't have a region defined.
new_cloud = dict()
our_cloud = self.cloud_config['clouds'].get(cloud, dict())
self._expand_vendor_profile(cloud, new_cloud, our_cloud)
if 'regions' in new_cloud and new_cloud['regions']:
return self._expand_regions(new_cloud['regions'])
elif 'region_name' in new_cloud and new_cloud['region_name']:
return [self._expand_region_name(new_cloud['region_name'])]
def _get_region(self, cloud=None, region_name=''):
if region_name is None:
region_name = ''
if not cloud:
return self._expand_region_name(region_name)
regions = self._get_known_regions(cloud)
if not regions:
return self._expand_region_name(region_name)
if not region_name:
return regions[0]
for region in regions:
if region['name'] == region_name:
return region
raise exceptions.ConfigException(
'Region {region_name} is not a valid region name for cloud'
' {cloud}. Valid choices are {region_list}. Please note that'
' region names are case sensitive.'.format(
region_name=region_name,
region_list=','.join([r['name'] for r in regions]),
cloud=cloud))
def get_cloud_names(self):
return self.cloud_config['clouds'].keys()
def _get_base_cloud_config(self, name, profile=None):
cloud = dict()
# Only validate cloud name if one was given
if name and name not in self.cloud_config['clouds']:
raise exceptions.ConfigException(
"Cloud {name} was not found.".format(
name=name))
our_cloud = self.cloud_config['clouds'].get(name, dict())
if profile:
our_cloud['profile'] = profile
# Get the defaults
cloud.update(self.defaults)
self._expand_vendor_profile(name, cloud, our_cloud)
if 'auth' not in cloud:
cloud['auth'] = dict()
_auth_update(cloud, our_cloud)
if 'cloud' in cloud:
del cloud['cloud']
return cloud
def _expand_vendor_profile(self, name, cloud, our_cloud):
# Expand a profile if it exists. 'cloud' is an old confusing name
# for this.
profile_name = our_cloud.get('profile', our_cloud.get('cloud', None))
if profile_name and profile_name != self.envvar_key:
if 'cloud' in our_cloud:
warnings.warn(
"{0} use the keyword 'cloud' to reference a known "
"vendor profile. This has been deprecated in favor of the "
"'profile' keyword.".format(self.config_filename))
vendor_filename, vendor_file = self._load_vendor_file()
if vendor_file and profile_name in vendor_file['public-clouds']:
_auth_update(cloud, vendor_file['public-clouds'][profile_name])
else:
profile_data = vendors.get_profile(profile_name)
if profile_data:
status = profile_data.pop('status', 'active')
message = profile_data.pop('message', '')
if status == 'deprecated':
warnings.warn(
"{profile_name} is deprecated: {message}".format(
profile_name=profile_name, message=message))
elif status == 'shutdown':
raise exceptions.ConfigException(
"{profile_name} references a cloud that no longer"
" exists: {message}".format(
profile_name=profile_name, message=message))
_auth_update(cloud, profile_data)
else:
# Can't find the requested vendor config, go about business
warnings.warn("Couldn't find the vendor profile '{0}', for"
" the cloud '{1}'".format(profile_name,
name))
def _project_scoped(self, cloud):
return ('project_id' in cloud or 'project_name' in cloud
or 'project_id' in cloud['auth']
or 'project_name' in cloud['auth'])
def _validate_networks(self, networks, key):
value = None
for net in networks:
if value and net[key]:
raise exceptions.ConfigException(
"Duplicate network entries for {key}: {net1} and {net2}."
" Only one network can be flagged with {key}".format(
key=key,
net1=value['name'],
net2=net['name']))
if not value and net[key]:
value = net
def _fix_backwards_networks(self, cloud):
# Leave the external_network and internal_network keys in the
# dict because consuming code might be expecting them.
networks = []
# Normalize existing network entries
for net in cloud.get('networks', []):
name = net.get('name')
if not name:
raise exceptions.ConfigException(
'Entry in network list is missing required field "name".')
network = dict(
name=name,
routes_externally=get_boolean(net.get('routes_externally')),
nat_source=get_boolean(net.get('nat_source')),
nat_destination=get_boolean(net.get('nat_destination')),
default_interface=get_boolean(net.get('default_interface')),
)
# routes_ipv4_externally defaults to the value of routes_externally
network['routes_ipv4_externally'] = get_boolean(
net.get(
'routes_ipv4_externally', network['routes_externally']))
# routes_ipv6_externally defaults to the value of routes_externally
network['routes_ipv6_externally'] = get_boolean(
net.get(
'routes_ipv6_externally', network['routes_externally']))
networks.append(network)
for key in ('external_network', 'internal_network'):
external = key.startswith('external')
if key in cloud and 'networks' in cloud:
raise exceptions.ConfigException(
"Both {key} and networks were specified in the config."
" Please remove {key} from the config and use the network"
" list to configure network behavior.".format(key=key))
if key in cloud:
warnings.warn(
"{key} is deprecated. Please replace with an entry in"
" a dict inside of the networks list with name: {name}"
" and routes_externally: {external}".format(
key=key, name=cloud[key], external=external))
networks.append(dict(
name=cloud[key],
routes_externally=external,
nat_destination=not external,
default_interface=external))
# Validate that we don't have duplicates
self._validate_networks(networks, 'nat_destination')
self._validate_networks(networks, 'default_interface')
cloud['networks'] = networks
return cloud
def _handle_domain_id(self, cloud):
# Allow people to just specify domain once if it's the same
mappings = {
'domain_id': ('user_domain_id', 'project_domain_id'),
'domain_name': ('user_domain_name', 'project_domain_name'),
}
for target_key, possible_values in mappings.items():
if not self._project_scoped(cloud):
if target_key in cloud and target_key not in cloud['auth']:
cloud['auth'][target_key] = cloud.pop(target_key)
continue
for key in possible_values:
if target_key in cloud['auth'] and key not in cloud['auth']:
cloud['auth'][key] = cloud['auth'][target_key]
cloud.pop(target_key, None)
cloud['auth'].pop(target_key, None)
return cloud
def _fix_backwards_project(self, cloud):
# Do the lists backwards so that project_name is the ultimate winner
# Also handle moving domain names into auth so that domain mapping
# is easier
mappings = {
'domain_id': ('domain_id', 'domain-id'),
'domain_name': ('domain_name', 'domain-name'),
'user_domain_id': ('user_domain_id', 'user-domain-id'),
'user_domain_name': ('user_domain_name', 'user-domain-name'),
'project_domain_id': ('project_domain_id', 'project-domain-id'),
'project_domain_name': (
'project_domain_name', 'project-domain-name'),
'token': ('auth-token', 'auth_token', 'token'),
}
if cloud.get('auth_type', None) == 'v2password':
# If v2password is explcitly requested, this is to deal with old
# clouds. That's fine - we need to map settings in the opposite
# direction
mappings['tenant_id'] = (
'project_id', 'project-id', 'tenant_id', 'tenant-id')
mappings['tenant_name'] = (
'project_name', 'project-name', 'tenant_name', 'tenant-name')
else:
mappings['project_id'] = (
'tenant_id', 'tenant-id', 'project_id', 'project-id')
mappings['project_name'] = (
'tenant_name', 'tenant-name', 'project_name', 'project-name')
for target_key, possible_values in mappings.items():
target = None
for key in possible_values:
if key in cloud:
target = str(cloud[key])
del cloud[key]
if key in cloud['auth']:
target = str(cloud['auth'][key])
del cloud['auth'][key]
if target:
cloud['auth'][target_key] = target
return cloud
def _fix_backwards_auth_plugin(self, cloud):
# Do the lists backwards so that auth_type is the ultimate winner
mappings = {
'auth_type': ('auth_plugin', 'auth_type'),
}
for target_key, possible_values in mappings.items():
target = None
for key in possible_values:
if key in cloud:
target = cloud[key]
del cloud[key]
cloud[target_key] = target
# Because we force alignment to v3 nouns, we want to force
# use of the auth plugin that can do auto-selection and dealing
# with that based on auth parameters. v2password is basically
# completely broken
return cloud
def register_argparse_arguments(self, parser, argv, service_keys=None):
"""Register all of the common argparse options needed.
Given an argparse parser, register the keystoneauth Session arguments,
the keystoneauth Auth Plugin Options and os-cloud. Also, peek in the
argv to see if all of the auth plugin options should be registered
or merely the ones already configured.
:param argparse.ArgumentParser: parser to attach argparse options to
:param argv: the arguments provided to the application
:param string service_keys: Service or list of services this argparse
should be specialized for, if known.
The first item in the list will be used
as the default value for service_type
(optional)
:raises exceptions.ConfigException if an invalid auth-type is requested
"""
if service_keys is None:
service_keys = []
# Fix argv in place - mapping any keys with embedded _ in them to -
_fix_argv(argv)
local_parser = argparse_mod.ArgumentParser(add_help=False)
for p in (parser, local_parser):
p.add_argument(
'--os-cloud',
metavar='<name>',
default=self._get_envvar('OS_CLOUD', None),
help='Named cloud to connect to')
# we need to peek to see if timeout was actually passed, since
# the keystoneauth declaration of it has a default, which means
# we have no clue if the value we get is from the ksa default
# for from the user passing it explicitly. We'll stash it for later
local_parser.add_argument('--timeout', metavar='<timeout>')
# We need for get_one to be able to peek at whether a token
# was passed so that we can swap the default from password to
# token if it was. And we need to also peek for --os-auth-token
# for novaclient backwards compat
local_parser.add_argument('--os-token')
local_parser.add_argument('--os-auth-token')
# Peek into the future and see if we have an auth-type set in
# config AND a cloud set, so that we know which command line
# arguments to register and show to the user (the user may want
# to say something like:
# openstack --os-cloud=foo --os-oidctoken=bar
# although I think that user is the cause of my personal pain
options, _args = local_parser.parse_known_args(argv)
if options.timeout:
self._argv_timeout = True
# validate = False because we're not _actually_ loading here
# we're only peeking, so it's the wrong time to assert that
# the rest of the arguments given are invalid for the plugin
# chosen (for instance, --help may be requested, so that the
# user can see what options he may want to give
cloud_region = self.get_one(argparse=options, validate=False)
default_auth_type = cloud_region.config['auth_type']
try:
loading.register_auth_argparse_arguments(
parser, argv, default=default_auth_type)
except Exception:
# Hidiing the keystoneauth exception because we're not actually
# loading the auth plugin at this point, so the error message
# from it doesn't actually make sense to os-client-config users
options, _args = parser.parse_known_args(argv)
plugin_names = loading.get_available_plugin_names()
raise exceptions.ConfigException(
"An invalid auth-type was specified: {auth_type}."
" Valid choices are: {plugin_names}.".format(
auth_type=options.os_auth_type,
plugin_names=",".join(plugin_names)))
if service_keys:
primary_service = service_keys[0]
else:
primary_service = None
loading.register_session_argparse_arguments(parser)
adapter.register_adapter_argparse_arguments(
parser, service_type=primary_service)
for service_key in service_keys:
# legacy clients have un-prefixed api-version options
parser.add_argument(
'--{service_key}-api-version'.format(
service_key=service_key.replace('_', '-'),
help=argparse_mod.SUPPRESS))
adapter.register_service_adapter_argparse_arguments(
parser, service_type=service_key)
# Backwards compat options for legacy clients
parser.add_argument('--http-timeout', help=argparse_mod.SUPPRESS)
parser.add_argument('--os-endpoint-type', help=argparse_mod.SUPPRESS)
parser.add_argument('--endpoint-type', help=argparse_mod.SUPPRESS)
def _fix_backwards_interface(self, cloud):
new_cloud = {}
for key in cloud.keys():
if key.endswith('endpoint_type'):
target_key = key.replace('endpoint_type', 'interface')
else:
target_key = key
new_cloud[target_key] = cloud[key]
return new_cloud
def _fix_backwards_api_timeout(self, cloud):
new_cloud = {}
# requests can only have one timeout, which means that in a single
# cloud there is no point in different timeout values. However,
# for some reason many of the legacy clients decided to shove their
# service name in to the arg name for reasons surpassin sanity. If
# we find any values that are not api_timeout, overwrite api_timeout
# with the value
service_timeout = None
for key in cloud.keys():
if key.endswith('timeout') and not (
key == 'timeout' or key == 'api_timeout'):
service_timeout = cloud[key]
else:
new_cloud[key] = cloud[key]
if service_timeout is not None:
new_cloud['api_timeout'] = service_timeout
# The common argparse arg from keystoneauth is called timeout, but
# os-client-config expects it to be called api_timeout
if self._argv_timeout:
if 'timeout' in new_cloud and new_cloud['timeout']:
new_cloud['api_timeout'] = new_cloud.pop('timeout')
return new_cloud
def get_all(self):
clouds = []
for cloud in self.get_cloud_names():
for region in self._get_regions(cloud):
if region:
clouds.append(self.get_one(
cloud, region_name=region['name']))
return clouds
# TODO(mordred) Backwards compat for OSC transition
get_all_clouds = get_all
def _fix_args(self, args=None, argparse=None):
"""Massage the passed-in options
Replace - with _ and strip os_ prefixes.
Convert an argparse Namespace object to a dict, removing values
that are either None or ''.
"""
if not args:
args = {}
if argparse:
# Convert the passed-in Namespace
o_dict = vars(argparse)
parsed_args = dict()
for k in o_dict:
if o_dict[k] is not None and o_dict[k] != '':
parsed_args[k] = o_dict[k]
args.update(parsed_args)
os_args = dict()
new_args = dict()
for (key, val) in iter(args.items()):
if type(args[key]) == dict:
# dive into the auth dict
new_args[key] = self._fix_args(args[key])
continue
key = key.replace('-', '_')
if key.startswith('os_'):
os_args[key[3:]] = val
else:
new_args[key] = val
new_args.update(os_args)
return new_args
def _find_winning_auth_value(self, opt, config):
opt_name = opt.name.replace('-', '_')
if opt_name in config:
return config[opt_name]
else:
deprecated = getattr(opt, 'deprecated', getattr(
opt, 'deprecated_opts', []))
for d_opt in deprecated:
d_opt_name = d_opt.name.replace('-', '_')
if d_opt_name in config:
return config[d_opt_name]
def auth_config_hook(self, config):
"""Allow examination of config values before loading auth plugin
OpenStackClient will override this to perform additional checks
on auth_type.
"""
return config
def _get_auth_loader(self, config):
# Use the 'none' plugin for variants of None specified,
# since it does not look up endpoints or tokens but rather
# does a passthrough. This is useful for things like Ironic
# that have a keystoneless operational mode, but means we're
# still dealing with a keystoneauth Session object, so all the
# _other_ things (SSL arg handling, timeout) all work consistently
if config['auth_type'] in (None, "None", ''):
config['auth_type'] = 'none'
elif config['auth_type'] == 'token_endpoint':
# Humans have been trained to use a thing called token_endpoint
# That it does not exist in keystoneauth is irrelvant- it not
# doing what they want causes them sorrow.
config['auth_type'] = 'admin_token'
return loading.get_plugin_loader(config['auth_type'])
def _validate_auth(self, config, loader):
# May throw a keystoneauth1.exceptions.NoMatchingPlugin
plugin_options = loader.get_options()
for p_opt in plugin_options:
# if it's in config.auth, win, kill it from config dict
# if it's in config and not in config.auth, move it
# deprecated loses to current
# provided beats default, deprecated or not
winning_value = self._find_winning_auth_value(
p_opt,
config['auth'],
)
if not winning_value:
winning_value = self._find_winning_auth_value(
p_opt,
config,
)
config = self._clean_up_after_ourselves(
config,
p_opt,
winning_value,
)
if winning_value:
# Prefer the plugin configuration dest value if the value's key
# is marked as deprecated.
if p_opt.dest is None:
good_name = p_opt.name.replace('-', '_')
config['auth'][good_name] = winning_value
else:
config['auth'][p_opt.dest] = winning_value
# See if this needs a prompting
config = self.option_prompt(config, p_opt)
return config
def _validate_auth_correctly(self, config, loader):
# May throw a keystoneauth1.exceptions.NoMatchingPlugin
plugin_options = loader.get_options()
for p_opt in plugin_options:
# if it's in config, win, move it and kill it from config dict
# if it's in config.auth but not in config it's good
# deprecated loses to current
# provided beats default, deprecated or not
winning_value = self._find_winning_auth_value(
p_opt,
config,
)
if not winning_value:
winning_value = self._find_winning_auth_value(
p_opt,
config['auth'],
)
config = self._clean_up_after_ourselves(
config,
p_opt,
winning_value,
)
# See if this needs a prompting
config = self.option_prompt(config, p_opt)
return config
def option_prompt(self, config, p_opt):
"""Prompt user for option that requires a value"""
if (
getattr(p_opt, 'prompt', None) is not None
and p_opt.dest not in config['auth']
and self._pw_callback is not None
):
config['auth'][p_opt.dest] = self._pw_callback(p_opt.prompt)
return config
def _clean_up_after_ourselves(self, config, p_opt, winning_value):
# Clean up after ourselves
for opt in [p_opt.name] + [o.name for o in p_opt.deprecated]:
opt = opt.replace('-', '_')
config.pop(opt, None)
config['auth'].pop(opt, None)
if winning_value:
# Prefer the plugin configuration dest value if the value's key
# is marked as depreciated.
if p_opt.dest is None:
config['auth'][p_opt.name.replace('-', '_')] = (
winning_value)
else:
config['auth'][p_opt.dest] = winning_value
return config
def magic_fixes(self, config):
"""Perform the set of magic argument fixups"""
# Infer token plugin if a token was given
if (('auth' in config and 'token' in config['auth'])
or ('auth_token' in config and config['auth_token'])
or ('token' in config and config['token'])):
config.setdefault('token', config.pop('auth_token', None))
# These backwards compat values are only set via argparse. If it's
# there, it's because it was passed in explicitly, and should win
config = self._fix_backwards_api_timeout(config)
if 'endpoint_type' in config:
config['interface'] = config.pop('endpoint_type')
config = self._fix_backwards_auth_plugin(config)
config = self._fix_backwards_project(config)
config = self._fix_backwards_interface(config)
config = self._fix_backwards_networks(config)
config = self._handle_domain_id(config)
for key in BOOL_KEYS:
if key in config:
if type(config[key]) is not bool:
config[key] = get_boolean(config[key])
# TODO(mordred): Special casing auth_url here. We should
# come back to this betterer later so that it's
# more generalized
if 'auth' in config and 'auth_url' in config['auth']:
config['auth']['auth_url'] = config['auth']['auth_url'].format(
**config)
return config
def get_one(
self, cloud=None, validate=True, argparse=None, **kwargs):
"""Retrieve a single CloudRegion and merge additional options
:param string cloud:
The name of the configuration to load from clouds.yaml
:param boolean validate:
Validate the config. Setting this to False causes no auth plugin
to be created. It's really only useful for testing.
:param Namespace argparse:
An argparse Namespace object; allows direct passing in of
argparse options to be added to the cloud config. Values
of None and '' will be removed.
:param region_name: Name of the region of the cloud.
:param kwargs: Additional configuration options
:returns: openstack.config.cloud_region.CloudRegion
:raises: keystoneauth1.exceptions.MissingRequiredOptions
on missing required auth parameters
"""
profile = kwargs.pop('profile', None)
args = self._fix_args(kwargs, argparse=argparse)
if cloud is None:
if 'cloud' in args:
cloud = args['cloud']
else:
cloud = self.default_cloud
config = self._get_base_cloud_config(cloud, profile)
# Get region specific settings
if 'region_name' not in args:
args['region_name'] = ''
region = self._get_region(cloud=cloud, region_name=args['region_name'])
args['region_name'] = region['name']
region_args = copy.deepcopy(region['values'])
# Regions is a list that we can use to create a list of cloud/region
# objects. It does not belong in the single-cloud dict
config.pop('regions', None)
# Can't just do update, because None values take over
for arg_list in region_args, args:
for (key, val) in iter(arg_list.items()):
if val is not None:
if key == 'auth' and config[key] is not None:
config[key] = _auth_update(config[key], val)
else:
config[key] = val
config = self.magic_fixes(config)
config = _util.normalize_keys(config)
# NOTE(dtroyer): OSC needs a hook into the auth args before the
# plugin is loaded in order to maintain backward-
# compatible behaviour
config = self.auth_config_hook(config)
if validate:
loader = self._get_auth_loader(config)
config = self._validate_auth(config, loader)
auth_plugin = loader.load_from_options(**config['auth'])
else:
auth_plugin = None
# If any of the defaults reference other values, we need to expand
for (key, value) in config.items():
if hasattr(value, 'format') and key not in FORMAT_EXCLUSIONS:
config[key] = value.format(**config)
force_ipv4 = config.pop('force_ipv4', self.force_ipv4)
prefer_ipv6 = config.pop('prefer_ipv6', True)
if not prefer_ipv6:
force_ipv4 = True
if cloud is None:
cloud_name = ''
else:
cloud_name = str(cloud)
return self._cloud_region_class(
name=cloud_name,
region_name=config['region_name'],
config=config,
extra_config=self.extra_config,
force_ipv4=force_ipv4,
auth_plugin=auth_plugin,
openstack_config=self,
session_constructor=self._session_constructor,
app_name=self._app_name,
app_version=self._app_version,
cache_expiration_time=self._cache_expiration_time,
cache_expirations=self._cache_expirations,
cache_path=self._cache_path,
cache_class=self._cache_class,
cache_arguments=self._cache_arguments,
password_callback=self._pw_callback,
)
# TODO(mordred) Backwards compat for OSC transition
get_one_cloud = get_one
def get_one_cloud_osc(
self,
cloud=None,
validate=True,
argparse=None,
**kwargs
):
"""Retrieve a single CloudRegion and merge additional options
:param string cloud:
The name of the configuration to load from clouds.yaml
:param boolean validate:
Validate the config. Setting this to False causes no auth plugin
to be created. It's really only useful for testing.
:param Namespace argparse:
An argparse Namespace object; allows direct passing in of
argparse options to be added to the cloud config. Values
of None and '' will be removed.
:param region_name: Name of the region of the cloud.
:param kwargs: Additional configuration options
:raises: keystoneauth1.exceptions.MissingRequiredOptions
on missing required auth parameters
"""
args = self._fix_args(kwargs, argparse=argparse)
if cloud is None:
if 'cloud' in args:
cloud = args['cloud']
else:
cloud = self.default_cloud
config = self._get_base_cloud_config(cloud)
# Get region specific settings
if 'region_name' not in args:
args['region_name'] = ''
region = self._get_region(cloud=cloud, region_name=args['region_name'])
args['region_name'] = region['name']
region_args = copy.deepcopy(region['values'])
# Regions is a list that we can use to create a list of cloud/region
# objects. It does not belong in the single-cloud dict
config.pop('regions', None)
# Can't just do update, because None values take over
for arg_list in region_args, args:
for (key, val) in iter(arg_list.items()):
if val is not None:
if key == 'auth' and config[key] is not None:
config[key] = _auth_update(config[key], val)
else:
config[key] = val
config = self.magic_fixes(config)
# NOTE(dtroyer): OSC needs a hook into the auth args before the
# plugin is loaded in order to maintain backward-
# compatible behaviour
config = self.auth_config_hook(config)
if validate:
loader = self._get_auth_loader(config)
config = self._validate_auth_correctly(config, loader)
auth_plugin = loader.load_from_options(**config['auth'])
else:
auth_plugin = None
# If any of the defaults reference other values, we need to expand
for (key, value) in config.items():
if hasattr(value, 'format') and key not in FORMAT_EXCLUSIONS:
config[key] = value.format(**config)
force_ipv4 = config.pop('force_ipv4', self.force_ipv4)
prefer_ipv6 = config.pop('prefer_ipv6', True)
if not prefer_ipv6:
force_ipv4 = True
if cloud is None:
cloud_name = ''
else:
cloud_name = str(cloud)
return self._cloud_region_class(
name=cloud_name,
region_name=config['region_name'],
config=config,
extra_config=self.extra_config,
force_ipv4=force_ipv4,
auth_plugin=auth_plugin,
openstack_config=self,
cache_expiration_time=self._cache_expiration_time,
cache_expirations=self._cache_expirations,
cache_path=self._cache_path,
cache_class=self._cache_class,
cache_arguments=self._cache_arguments,
password_callback=self._pw_callback,
)
@staticmethod
def set_one_cloud(config_file, cloud, set_config=None):
"""Set a single cloud configuration.
:param string config_file:
The path to the config file to edit. If this file does not exist
it will be created.
:param string cloud:
The name of the configuration to save to clouds.yaml
:param dict set_config: Configuration options to be set
"""
set_config = set_config or {}
cur_config = {}
try:
with open(config_file) as fh:
cur_config = yaml.safe_load(fh)
except IOError as e:
# Not no such file
if e.errno != 2:
raise
pass
clouds_config = cur_config.get('clouds', {})
cloud_config = _auth_update(clouds_config.get(cloud, {}), set_config)
clouds_config[cloud] = cloud_config
cur_config['clouds'] = clouds_config
with open(config_file, 'w') as fh:
yaml.safe_dump(cur_config, fh, default_flow_style=False)
if __name__ == '__main__':
config = OpenStackConfig().get_all_clouds()
for cloud in config:
print_cloud = False
if len(sys.argv) == 1:
print_cloud = True
elif len(sys.argv) == 3 and (
sys.argv[1] == cloud.name and sys.argv[2] == cloud.region):
print_cloud = True
elif len(sys.argv) == 2 and (
sys.argv[1] == cloud.name):
print_cloud = True
if print_cloud:
print(cloud.name, cloud.region, cloud.config)
|
#!/usr/bin/env python
# coding=utf-8
"""Follow Reference Field tests."""
|
"""
Factual API driver
"""
import json
from functools import partial
from urllib import urlencode
import requests
from requests_oauthlib import OAuth1
from query import Resolve, Table, Submit, Insert, Facets, Flag, Geopulse, Geocode, Diffs, Match, Multi, Clear
API_V3_HOST = "http://api.v3.factual.com"
DRIVER_VERSION_TAG = "factual-python-driver-1.4.2"
class Factual(object):
def __init__(self, key, secret, timeout=None):
self.key = key
self.secret = secret
self.timeout = timeout
self.api = API(self._generate_token(key, secret),timeout)
def table(self, table):
return Table(self.api, 't/' + table)
def crosswalk(self):
return Table(self.api, 't/crosswalk')
def resolve(self, table, values):
return Resolve(self.api, table, {'values': values})
def match(self, table, values):
return Match(self.api, table, {'values': values})
def raw_read(self, path, raw_params):
return self.api.raw_read(path, raw_params)
def raw_write(self, path, raw_params):
return self.api.raw_write(path, raw_params)
def facets(self, table):
return Facets(self.api, 't/' + table + '/facets')
def submit(self, table, factual_id=None, values={}):
return Submit(self.api, table, factual_id, {'values': values})
def insert(self, table, factual_id=None, values={}):
return Insert(self.api, table, factual_id, {'values': values})
def clear(self, table, factual_id, fields):
return Clear(self.api, table, factual_id, {'fields': fields})
def flag(self, table, factual_id):
return Flag(self.api, table, factual_id)
def geopulse(self, point={}):
return Geopulse(self.api, 'geopulse/context', {'geo': point})
def geocode(self, point):
return Geocode(self.api, 'places/geocode', {'geo': point})
def monetize(self):
return Table(self.api, 'places/monetize')
def diffs(self, table, start, end):
return Diffs(self.api, 't/' + table + '/diffs', start, end)
def multi(self, queries):
return Multi(self.api, queries).make_request()
def get_row(self, table, factual_id):
data = self.table(table).factual_id(factual_id).data()
return data[0]
def _generate_token(self, key, secret):
access_token = OAuth1(key, secret)
return access_token
class API(object):
def __init__(self, access_token, timeout):
self.client = requests.Session()
self.client.auth = access_token
self.client.timeout = timeout
def get(self, query):
response = self._handle_request(query.path, query.params, self._make_get_request)
return response
def post(self, query):
response = self._handle_request(query.path, query.params, self._make_post_request)
return response
def schema(self, query):
response = self._handle_request(query.path + '/schema', query.params, self._make_get_request)
return response['view']
def raw_read(self, path, raw_params):
url = self._build_base_url(path)
return self._make_request(url, raw_params, self._make_get_request).text
def raw_stream_read(self, path, raw_params):
url = self._build_base_url(path)
for line in self._make_request(url, raw_params, partial(self._make_get_request, stream=True)).iter_lines():
if line:
yield line
def raw_write(self, path, raw_params):
url = self._build_base_url(path)
return self._make_request(url, raw_params, self._make_post_request).text
def build_url(self, path, params):
url = self._build_base_url(path)
url += '?' + self._make_query_string(params)
return url
def build_multi_url(self, query):
return '/' + query.path + '?' + self._make_query_string(query.params)
def _build_base_url(self, path):
return API_V3_HOST + '/' + path
def _handle_request(self, path, params, request_method):
url = self._build_base_url(path)
response = self._make_request(url, params, request_method)
payload = json.loads(response.text)
if payload['status'] == 'error':
raise APIException(response.status_code, payload, response.url)
return payload['response'] if 'response' in payload else payload
def _make_request(self, url, params, request_method):
request_params = self._transform_params(params)
response = request_method(url, request_params)
if not 200 <= response.status_code < 300:
raise APIException(response.status_code, response.text, response.url)
return response
def _make_get_request(self, url, params, stream=False):
headers = {'X-Factual-Lib': DRIVER_VERSION_TAG}
return self.client.get(url, headers=headers, params=params, timeout=self.client.timeout, stream=stream)
def _make_post_request(self, url, params):
headers = {'X-Factual-Lib': DRIVER_VERSION_TAG, 'content-type': 'application/x-www-form-urlencoded'}
return self.client.post(url, headers=headers, data=params)
def _make_query_string(self, params):
return urlencode([(k,v) for k,v in self._transform_params(params).items()])
def _transform_params(self, params):
if isinstance(params, str):
return params
string_params = []
for key, val in params.items():
transformed = json.dumps(val) if not isinstance(val, str) else val
string_params.append((key, transformed))
return dict(string_params)
class APIException(Exception):
def __init__(self, status_code, response, url):
self.status_code = status_code
self.response = response
self.url = url
exception = {'http_status_code':status_code,'response':response,'url':url,'driver_version':DRIVER_VERSION_TAG}
Exception.__init__(self, exception)
def get_status_code(self):
return self.status_code
def get_response(self):
return self.response
|
#!/usr/bin/env python3
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import kfp.dsl as dsl
class RandomFailure1Op(dsl.ContainerOp):
"""A component that fails randomly."""
def __init__(self, exit_codes):
super(RandomFailure1Op, self).__init__(
name='random_failure',
image='python:alpine3.6',
command=['python', '-c'],
arguments=["import random; import sys; exit_code = random.choice([%s]); print(exit_code); sys.exit(exit_code)" % exit_codes])
@dsl.pipeline(
name='pipeline includes two steps which fail randomly.',
description='shows how to use ContainerOp set_retry().'
)
def retry_sample_pipeline():
op1 = RandomFailure1Op('0,1,2,3').set_retry(100)
op2 = RandomFailure1Op('0,1').set_retry(50)
if __name__ == '__main__':
import kfp.compiler as compiler
compiler.Compiler().compile(retry_sample_pipeline, __file__ + '.tar.gz')
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetSubnetResult',
'AwaitableGetSubnetResult',
'get_subnet',
]
@pulumi.output_type
class GetSubnetResult:
"""
Subnet in a virtual network resource.
"""
def __init__(__self__, address_prefix=None, etag=None, id=None, ip_configurations=None, name=None, network_security_group=None, provisioning_state=None, resource_navigation_links=None, route_table=None, service_endpoint_policies=None, service_endpoints=None):
if address_prefix and not isinstance(address_prefix, str):
raise TypeError("Expected argument 'address_prefix' to be a str")
pulumi.set(__self__, "address_prefix", address_prefix)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if ip_configurations and not isinstance(ip_configurations, list):
raise TypeError("Expected argument 'ip_configurations' to be a list")
pulumi.set(__self__, "ip_configurations", ip_configurations)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if network_security_group and not isinstance(network_security_group, dict):
raise TypeError("Expected argument 'network_security_group' to be a dict")
pulumi.set(__self__, "network_security_group", network_security_group)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if resource_navigation_links and not isinstance(resource_navigation_links, list):
raise TypeError("Expected argument 'resource_navigation_links' to be a list")
pulumi.set(__self__, "resource_navigation_links", resource_navigation_links)
if route_table and not isinstance(route_table, dict):
raise TypeError("Expected argument 'route_table' to be a dict")
pulumi.set(__self__, "route_table", route_table)
if service_endpoint_policies and not isinstance(service_endpoint_policies, list):
raise TypeError("Expected argument 'service_endpoint_policies' to be a list")
pulumi.set(__self__, "service_endpoint_policies", service_endpoint_policies)
if service_endpoints and not isinstance(service_endpoints, list):
raise TypeError("Expected argument 'service_endpoints' to be a list")
pulumi.set(__self__, "service_endpoints", service_endpoints)
@property
@pulumi.getter(name="addressPrefix")
def address_prefix(self) -> Optional[str]:
"""
The address prefix for the subnet.
"""
return pulumi.get(self, "address_prefix")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="ipConfigurations")
def ip_configurations(self) -> Sequence['outputs.IPConfigurationResponse']:
"""
Gets an array of references to the network interface IP configurations using subnet.
"""
return pulumi.get(self, "ip_configurations")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkSecurityGroup")
def network_security_group(self) -> Optional['outputs.NetworkSecurityGroupResponse']:
"""
The reference of the NetworkSecurityGroup resource.
"""
return pulumi.get(self, "network_security_group")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
The provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceNavigationLinks")
def resource_navigation_links(self) -> Optional[Sequence['outputs.ResourceNavigationLinkResponse']]:
"""
Gets an array of references to the external resources using subnet.
"""
return pulumi.get(self, "resource_navigation_links")
@property
@pulumi.getter(name="routeTable")
def route_table(self) -> Optional['outputs.RouteTableResponse']:
"""
The reference of the RouteTable resource.
"""
return pulumi.get(self, "route_table")
@property
@pulumi.getter(name="serviceEndpointPolicies")
def service_endpoint_policies(self) -> Optional[Sequence['outputs.ServiceEndpointPolicyResponse']]:
"""
An array of service endpoint policies.
"""
return pulumi.get(self, "service_endpoint_policies")
@property
@pulumi.getter(name="serviceEndpoints")
def service_endpoints(self) -> Optional[Sequence['outputs.ServiceEndpointPropertiesFormatResponse']]:
"""
An array of service endpoints.
"""
return pulumi.get(self, "service_endpoints")
class AwaitableGetSubnetResult(GetSubnetResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSubnetResult(
address_prefix=self.address_prefix,
etag=self.etag,
id=self.id,
ip_configurations=self.ip_configurations,
name=self.name,
network_security_group=self.network_security_group,
provisioning_state=self.provisioning_state,
resource_navigation_links=self.resource_navigation_links,
route_table=self.route_table,
service_endpoint_policies=self.service_endpoint_policies,
service_endpoints=self.service_endpoints)
def get_subnet(expand: Optional[str] = None,
resource_group_name: Optional[str] = None,
subnet_name: Optional[str] = None,
virtual_network_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSubnetResult:
"""
Subnet in a virtual network resource.
:param str expand: Expands referenced resources.
:param str resource_group_name: The name of the resource group.
:param str subnet_name: The name of the subnet.
:param str virtual_network_name: The name of the virtual network.
"""
__args__ = dict()
__args__['expand'] = expand
__args__['resourceGroupName'] = resource_group_name
__args__['subnetName'] = subnet_name
__args__['virtualNetworkName'] = virtual_network_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20180701:getSubnet', __args__, opts=opts, typ=GetSubnetResult).value
return AwaitableGetSubnetResult(
address_prefix=__ret__.address_prefix,
etag=__ret__.etag,
id=__ret__.id,
ip_configurations=__ret__.ip_configurations,
name=__ret__.name,
network_security_group=__ret__.network_security_group,
provisioning_state=__ret__.provisioning_state,
resource_navigation_links=__ret__.resource_navigation_links,
route_table=__ret__.route_table,
service_endpoint_policies=__ret__.service_endpoint_policies,
service_endpoints=__ret__.service_endpoints)
|
import math
class TestModel:
def __init__(self, testFile):
self.testFile = testFile
def testModel(self, model):
getProbability, classProbabilities = model
finalResults = []
with open(self.testFile, 'r') as f:
for line in f:
maxP = ['', '', -math.inf]
for docClass in classProbabilities:
calculatedProbability = 0
pivot = line.find('@')
realDocClass = line[0:pivot]
words = line[pivot + 10:].split()
for i in range(len(words)):
key = words[i-1] + ' ' + words[i]
calculatedProbability += math.log2(getProbability(key, docClass))
calculatedProbability += math.log2(classProbabilities[docClass])
if calculatedProbability > maxP[2]:
maxP = [docClass, realDocClass, calculatedProbability]
finalResults.append(maxP)
for r in finalResults:
print(r)
return finalResults
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2008 John Paulett (john -at- paulett.org)
# Copyright (C) 2009, 2011, 2013 David Aguilar (davvid -at- gmail.com)
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
import sys
import jsonpickle.util as util
import jsonpickle.tags as tags
import jsonpickle.handlers as handlers
from jsonpickle.compat import set
from jsonpickle.backend import JSONBackend
def decode(string, backend=None, context=None, keys=False, reset=True,
safe=False):
backend = _make_backend(backend)
if context is None:
context = Unpickler(keys=keys, backend=backend, safe=safe)
return context.restore(backend.decode(string), reset=reset)
def _make_backend(backend):
if backend is None:
return JSONBackend()
else:
return backend
class _Proxy(object):
"""Proxies are dummy objects that are later replaced by real instances
The `restore()` function has to solve a tricky problem when pickling
objects with cyclical references -- the parent instance does not yet
exist.
The problem is that `__getnewargs__()`, `__getstate__()`, custom handlers,
and cyclical objects graphs are allowed to reference the yet-to-be-created
object via the referencing machinery.
In other words, objects are allowed to depend on themselves for
construction!
We solve this problem by placing dummy Proxy objects into the referencing
machinery so that we can construct the child objects before constructing
the parent. Objects are initially created with Proxy attribute values
instead of real references.
We collect all objects that contain references to proxies and run
a final sweep over them to swap in the real instance. This is done
at the very end of the top-level `restore()`.
The `instance` attribute below is replaced with the real instance
after `__new__()` has been used to construct the object and is used
when swapping proxies with real instances.
"""
def __init__(self):
self.instance = None
def _obj_setattr(obj, attr, proxy):
setattr(obj, attr, proxy.instance)
def _obj_setvalue(obj, idx, proxy):
obj[idx] = proxy.instance
class Unpickler(object):
def __init__(self, backend=None, keys=False, safe=False):
## The current recursion depth
## Maps reference names to object instances
self.backend = _make_backend(backend)
self.keys = keys
self.safe = safe
self._namedict = {}
## The namestack grows whenever we recurse into a child object
self._namestack = []
## Maps objects to their index in the _objs list
self._obj_to_idx = {}
self._objs = []
self._proxies = []
def reset(self):
"""Resets the object's internal state.
"""
self._namedict = {}
self._namestack = []
self._obj_to_idx = {}
self._objs = []
self._proxies = []
def restore(self, obj, reset=True):
"""Restores a flattened object to its original python state.
Simply returns any of the basic builtin types
>>> u = Unpickler()
>>> u.restore('hello world')
'hello world'
>>> u.restore({'key': 'value'})
{'key': 'value'}
"""
if reset:
self.reset()
value = self._restore(obj)
if reset:
self._finalize()
return value
def _finalize(self):
"""Replace proxies with their corresponding instances"""
for (obj, attr, proxy, method) in self._proxies:
method(obj, attr, proxy)
def _restore(self, obj):
if has_tag(obj, tags.ID):
restore = self._restore_id
elif has_tag(obj, tags.REF): # Backwards compatibility
restore = self._restore_ref
elif has_tag(obj, tags.ITERATOR):
restore = self._restore_iterator
elif has_tag(obj, tags.TYPE):
restore = self._restore_type
elif has_tag(obj, tags.REPR): # Backwards compatibility
restore = self._restore_repr
elif has_tag(obj, tags.REDUCE):
restore = self._restore_reduce
elif has_tag(obj, tags.OBJECT):
restore = self._restore_object
elif has_tag(obj, tags.FUNCTION):
restore = self._restore_function
elif util.is_list(obj):
restore = self._restore_list
elif has_tag(obj, tags.TUPLE):
restore = self._restore_tuple
elif has_tag(obj, tags.SET):
restore = self._restore_set
elif util.is_dictionary(obj):
restore = self._restore_dict
else:
restore = lambda x: x
return restore(obj)
def _restore_iterator(self, obj):
return iter(self._restore_list(obj[tags.ITERATOR]))
def _restore_reduce(self, obj):
"""
Supports restoring with all elements of __reduce__ as per pep 307.
Assumes that iterator items (the last two) are represented as lists
as per pickler implementation.
"""
reduce_val = obj[tags.REDUCE]
f, args, state, listitems, dictitems = map(self._restore, reduce_val)
if f == tags.NEWOBJ or f.__name__ == '__newobj__':
# mandated special case
cls = args[0]
stage1 = cls.__new__(cls, *args[1:])
else:
stage1 = f(*args)
if state:
try:
stage1.__setstate__(state)
except AttributeError as err:
# it's fine - we'll try the prescribed default methods
try:
stage1.__dict__.update(state)
except AttributeError as err:
# next prescribed default
for k, v in state.items():
setattr(stage1, k, v)
if listitems:
# should be lists if not None
try:
stage1.extend(listitems)
except AttributeError:
for x in listitems:
stage1.append(x)
if dictitems:
for k, v in dictitems:
stage1.__setitem__(k, v)
return stage1
def _restore_id(self, obj):
return self._objs[obj[tags.ID]]
def _restore_ref(self, obj):
return self._namedict.get(obj[tags.REF])
def _restore_type(self, obj):
typeref = loadclass(obj[tags.TYPE])
if typeref is None:
return obj
return typeref
def _restore_repr(self, obj):
if self.safe:
# eval() is not allowed in safe mode
return None
obj = loadrepr(obj[tags.REPR])
return self._mkref(obj)
def _restore_object(self, obj):
class_name = obj[tags.OBJECT]
handler = handlers.get(class_name)
if handler is not None: # custom handler
instance = handler(self).restore(obj)
return self._mkref(instance)
cls = loadclass(class_name)
if cls is None:
return self._mkref(obj)
return self._restore_object_instance(obj, cls)
def _restore_function(self, obj):
return loadclass(obj[tags.FUNCTION])
def _loadfactory(self, obj):
try:
default_factory = obj['default_factory']
except KeyError:
return None
del obj['default_factory']
return self._restore(default_factory)
def _restore_object_instance(self, obj, cls):
factory = self._loadfactory(obj)
args = getargs(obj)
is_oldstyle = not (isinstance(cls, type) or getattr(cls, '__meta__', None))
# This is a placeholder proxy object which allows child objects to
# reference the parent object before it has been instantiated.
proxy = _Proxy()
self._mkref(proxy)
if args:
args = self._restore(args)
try:
if (not is_oldstyle) and hasattr(cls, '__new__'): # new style classes
if factory:
instance = cls.__new__(cls, factory, *args)
instance.default_factory = factory
else:
instance = cls.__new__(cls, *args)
else:
instance = object.__new__(cls)
except TypeError: # old-style classes
is_oldstyle = True
if is_oldstyle:
try:
instance = cls(*args)
except TypeError: # fail gracefully
try:
instance = make_blank_classic(cls)
except: # fail gracefully
return self._mkref(obj)
proxy.instance = instance
self._swapref(proxy, instance)
if isinstance(instance, tuple):
return instance
return self._restore_object_instance_variables(obj, instance)
def _restore_from_dict(self, obj, instance, ignorereserved=True):
restore_key = self._restore_key_fn()
method = _obj_setattr
for k, v in sorted(obj.items(), key=util.itemgetter):
# ignore the reserved attribute
if ignorereserved and k in tags.RESERVED:
continue
self._namestack.append(k)
k = restore_key(k)
# step into the namespace
value = self._restore(v)
if (util.is_noncomplex(instance) or
util.is_dictionary_subclass(instance)):
instance[k] = value
else:
setattr(instance, k, value)
# This instance has an instance variable named `k` that is
# currently a proxy and must be replaced
if type(value) is _Proxy:
self._proxies.append((instance, k, value, method))
# step out
self._namestack.pop()
def _restore_object_instance_variables(self, obj, instance):
self._restore_from_dict(obj, instance)
# Handle list and set subclasses
if has_tag(obj, tags.SEQ):
if hasattr(instance, 'append'):
for v in obj[tags.SEQ]:
instance.append(self._restore(v))
if hasattr(instance, 'add'):
for v in obj[tags.SEQ]:
instance.add(self._restore(v))
if has_tag(obj, tags.STATE):
instance = self._restore_state(obj, instance)
return instance
def _restore_state(self, obj, instance):
state = self._restore(obj[tags.STATE])
has_slots = (isinstance(state, tuple) and len(state) == 2
and isinstance(state[1], dict))
has_slots_and_dict = has_slots and isinstance(state[0], dict)
if hasattr(instance, '__setstate__'):
instance.__setstate__(state)
elif isinstance(state, dict):
# implements described default handling
# of state for object with instance dict
# and no slots
self._restore_from_dict(state, instance, ignorereserved=False)
elif has_slots:
self._restore_from_dict(state[1], instance, ignorereserved=False)
if has_slots_and_dict:
self._restore_from_dict(state[0],
instance, ignorereserved=False)
elif not hasattr(instance, '__getnewargs__'):
# __setstate__ is not implemented so that means that the best
# we can do is return the result of __getstate__() rather than
# return an empty shell of an object.
# However, if there were newargs, it's not an empty shell
instance = state
return instance
def _restore_list(self, obj):
parent = []
self._mkref(parent)
children = [self._restore(v) for v in obj]
parent.extend(children)
method = _obj_setvalue
proxies = [(parent, idx, value, method)
for idx, value in enumerate(parent)
if type(value) is _Proxy]
self._proxies.extend(proxies)
return parent
def _restore_tuple(self, obj):
return tuple([self._restore(v) for v in obj[tags.TUPLE]])
def _restore_set(self, obj):
return set([self._restore(v) for v in obj[tags.SET]])
def _restore_dict(self, obj):
data = {}
restore_key = self._restore_key_fn()
for k, v in sorted(obj.items(), key=util.itemgetter):
self._namestack.append(k)
k = restore_key(k)
data[k] = self._restore(v)
self._namestack.pop()
return data
def _restore_key_fn(self):
"""Return a callable that restores keys
This function is responsible for restoring non-string keys
when we are decoding with `keys=True`.
"""
# This function is called before entering a tight loop
# where the returned function will be called.
# We return a specific function after checking self.keys
# instead of doing so in the body of the function to
# avoid conditional branching inside a tight loop.
if self.keys:
def restore_key(key):
if key.startswith(tags.JSON_KEY):
key = decode(key[len(tags.JSON_KEY):],
backend=self.backend, context=self,
keys=True, reset=False)
return key
else:
restore_key = lambda key: key
return restore_key
def _refname(self):
"""Calculates the name of the current location in the JSON stack.
This is called as jsonpickle traverses the object structure to
create references to previously-traversed objects. This allows
cyclical data structures such as doubly-linked lists.
jsonpickle ensures that duplicate python references to the same
object results in only a single JSON object definition and
special reference tags to represent each reference.
>>> u = Unpickler()
>>> u._namestack = []
>>> u._refname()
'/'
>>> u._namestack = ['a']
>>> u._refname()
'/a'
>>> u._namestack = ['a', 'b']
>>> u._refname()
'/a/b'
"""
return '/' + '/'.join(self._namestack)
def _mkref(self, obj):
obj_id = id(obj)
try:
self._obj_to_idx[obj_id]
except KeyError:
self._obj_to_idx[obj_id] = len(self._objs)
self._objs.append(obj)
# Backwards compatibility: old versions of jsonpickle
# produced "py/ref" references.
self._namedict[self._refname()] = obj
return obj
def _swapref(self, proxy, instance):
proxy_id = id(proxy)
instance_id = id(instance)
self._obj_to_idx[instance_id] = self._obj_to_idx[proxy_id]
del self._obj_to_idx[proxy_id]
self._objs[-1] = instance
self._namedict[self._refname()] = instance
def loadclass(module_and_name):
"""Loads the module and returns the class.
>>> cls = loadclass('datetime.datetime')
>>> cls.__name__
'datetime'
>>> loadclass('does.not.exist')
>>> loadclass('__builtin__.int')()
0
"""
try:
module, name = module_and_name.rsplit('.', 1)
module = util.untranslate_module_name(module)
__import__(module)
return getattr(sys.modules[module], name)
except:
return None
def getargs(obj):
"""Return arguments suitable for __new__()"""
# Let saved newargs take precedence over everything
if has_tag(obj, tags.NEWARGS):
return obj[tags.NEWARGS]
if has_tag(obj, tags.INITARGS):
return obj[tags.INITARGS]
try:
seq_list = obj[tags.SEQ]
obj_dict = obj[tags.OBJECT]
except KeyError:
return []
typeref = loadclass(obj_dict)
if not typeref:
return []
if hasattr(typeref, '_fields'):
if len(typeref._fields) == len(seq_list):
return seq_list
return []
class _trivialclassic:
"""
A trivial class that can be instantiated with no args
"""
def make_blank_classic(cls):
"""
Implement the mandated strategy for dealing with classic classes
which cannot be instantiated without __getinitargs__ because they
take parameters
"""
instance = _trivialclassic()
instance.__class__ = cls
return instance
def loadrepr(reprstr):
"""Returns an instance of the object from the object's repr() string.
It involves the dynamic specification of code.
>>> obj = loadrepr('datetime/datetime.datetime.now()')
>>> obj.__class__.__name__
'datetime'
"""
module, evalstr = reprstr.split('/')
mylocals = locals()
localname = module
if '.' in localname:
localname = module.split('.', 1)[0]
mylocals[localname] = __import__(module)
return eval(evalstr)
def has_tag(obj, tag):
"""Helper class that tests to see if the obj is a dictionary
and contains a particular key/tag.
>>> obj = {'test': 1}
>>> has_tag(obj, 'test')
True
>>> has_tag(obj, 'fail')
False
>>> has_tag(42, 'fail')
False
"""
return type(obj) is dict and tag in obj
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for API compatibility between TensorFlow release versions.
See [Version
Compatibility](https://tensorflow.org/guide/version_compat#backward_forward)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
# This value changes every day with an automatic CL. It can be modified in code
# via `forward_compatibility_horizon()` or with the environment variable
# TF_FORWARD_COMPATIBILITY_DELTA_DAYS, which is added to the compatibility date.
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2021, 6, 28)
_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = "TF_FORWARD_COMPATIBILITY_DELTA_DAYS"
_FORWARD_COMPATIBILITY_DATE_NUMBER = None
def _date_to_date_number(year, month, day):
return (year << 9) | (month << 5) | day
def _update_forward_compatibility_date_number(date_to_override=None):
"""Update the base date to compare in forward_compatible function."""
global _FORWARD_COMPATIBILITY_DATE_NUMBER
if date_to_override:
date = date_to_override
else:
date = _FORWARD_COMPATIBILITY_HORIZON
delta_days = os.getenv(_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME)
if delta_days:
date += datetime.timedelta(days=int(delta_days))
if date < _FORWARD_COMPATIBILITY_HORIZON:
logging.warning("Trying to set the forward compatibility date to the past"
" date %s. This will be ignored by TensorFlow." % (date))
return
_FORWARD_COMPATIBILITY_DATE_NUMBER = _date_to_date_number(
date.year, date.month, date.day)
_update_forward_compatibility_date_number()
@tf_export("compat.forward_compatible")
def forward_compatible(year, month, day):
"""Return true if the forward compatibility window has expired.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
Forward-compatibility refers to scenarios where the producer of a TensorFlow
model (a GraphDef or SavedModel) is compiled against a version of the
TensorFlow library newer than what the consumer was compiled against. The
"producer" is typically a Python program that constructs and trains a model
while the "consumer" is typically another program that loads and serves the
model.
TensorFlow has been supporting a 3 week forward-compatibility window for
programs compiled from source at HEAD.
For example, consider the case where a new operation `MyNewAwesomeAdd` is
created with the intent of replacing the implementation of an existing Python
wrapper - `tf.add`. The Python wrapper implementation should change from
something like:
```python
def add(inputs, name=None):
return gen_math_ops.add(inputs, name)
```
to:
```python
from tensorflow.python.compat import compat
def add(inputs, name=None):
if compat.forward_compatible(year, month, day):
# Can use the awesome new implementation.
return gen_math_ops.my_new_awesome_add(inputs, name)
# To maintain forward compatibility, use the old implementation.
return gen_math_ops.add(inputs, name)
```
Where `year`, `month`, and `day` specify the date beyond which binaries
that consume a model are expected to have been updated to include the
new operations. This date is typically at least 3 weeks beyond the date
the code that adds the new operation is committed.
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Returns:
True if the caller can expect that serialized TensorFlow graphs produced
can be consumed by programs that are compiled with the TensorFlow library
source code after (year, month, day).
"""
return _FORWARD_COMPATIBILITY_DATE_NUMBER > _date_to_date_number(
year, month, day)
@tf_export("compat.forward_compatibility_horizon")
@tf_contextlib.contextmanager
def forward_compatibility_horizon(year, month, day):
"""Context manager for testing forward compatibility of generated graphs.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
To ensure forward compatibility of generated graphs (see `forward_compatible`)
with older binaries, new features can be gated with:
```python
if compat.forward_compatible(year=2018, month=08, date=01):
generate_graph_with_new_features()
else:
generate_graph_so_older_binaries_can_consume_it()
```
However, when adding new features, one may want to unittest it before
the forward compatibility window expires. This context manager enables
such tests. For example:
```python
from tensorflow.python.compat import compat
def testMyNewFeature(self):
with compat.forward_compatibility_horizon(2018, 08, 02):
# Test that generate_graph_with_new_features() has an effect
```
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Yields:
Nothing.
"""
try:
_update_forward_compatibility_date_number(datetime.date(year, month, day))
yield
finally:
_update_forward_compatibility_date_number()
|
#!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
import command
from command import CheckType
import config
import mle
import node
LEADER = 1
ROUTER1 = 2
class Cert_5_1_06_RemoveRouterId(unittest.TestCase):
def setUp(self):
self.simulator = config.create_default_simulator()
self.nodes = {}
for i in range(1, 3):
self.nodes[i] = node.Node(i, simulator=self.simulator)
self.nodes[LEADER].set_panid(0xface)
self.nodes[LEADER].set_mode('rsdn')
self.nodes[LEADER].add_whitelist(self.nodes[ROUTER1].get_addr64())
self.nodes[LEADER].enable_whitelist()
self.nodes[ROUTER1].set_panid(0xface)
self.nodes[ROUTER1].set_mode('rsdn')
self.nodes[ROUTER1].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ROUTER1].enable_whitelist()
self.nodes[ROUTER1].set_router_selection_jitter(1)
def tearDown(self):
for n in list(self.nodes.values()):
n.stop()
n.destroy()
self.simulator.stop()
def test(self):
self.nodes[LEADER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[ROUTER1].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER1].get_state(), 'router')
rloc16 = self.nodes[ROUTER1].get_addr16()
for addr in self.nodes[ROUTER1].get_addrs():
self.assertTrue(self.nodes[LEADER].ping(addr))
self.nodes[LEADER].release_router_id(rloc16 >> 10)
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER1].get_state(), 'router')
leader_messages = self.simulator.get_messages_sent_by(LEADER)
router1_messages = self.simulator.get_messages_sent_by(ROUTER1)
# 1 - All
leader_messages.next_mle_message(mle.CommandType.ADVERTISEMENT)
router1_messages.next_mle_message(mle.CommandType.PARENT_REQUEST)
leader_messages.next_mle_message(mle.CommandType.PARENT_RESPONSE)
router1_messages.next_mle_message(mle.CommandType.CHILD_ID_REQUEST)
leader_messages.next_mle_message(mle.CommandType.CHILD_ID_RESPONSE)
msg = router1_messages.next_coap_message("0.02")
msg.assertCoapMessageRequestUriPath("/a/as")
leader_messages.next_coap_message("2.04")
# 2 - N/A
# 3 - Router1
msg = router1_messages.next_mle_message(mle.CommandType.PARENT_REQUEST)
command.check_parent_request(msg, is_first_request=True)
msg = router1_messages.next_mle_message(
mle.CommandType.CHILD_ID_REQUEST, sent_to_node=self.nodes[LEADER]
)
command.check_child_id_request(
msg,
tlv_request=CheckType.CONTAIN,
mle_frame_counter=CheckType.OPTIONAL,
address_registration=CheckType.NOT_CONTAIN,
active_timestamp=CheckType.OPTIONAL,
pending_timestamp=CheckType.OPTIONAL,
)
msg = router1_messages.next_coap_message(code="0.02")
command.check_address_solicit(msg, was_router=True)
# 4 - Router1
for addr in self.nodes[ROUTER1].get_addrs():
self.assertTrue(self.nodes[LEADER].ping(addr))
if __name__ == '__main__':
unittest.main()
|
import platform
import time
import cv2
import io
import socket
import struct
import time
import pickle
import numpy as np
import imutils
host = '192.168.0.4'
port = 1515
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.connect((host, port))
host_name = socket.gethostname()
host_ip = socket.gethostbyname(host_name)
conn = server.makefile('wb')
sysinfo = {"HOSTNAME:":f"{host_name}","IP:":f"{host_ip}"}
while True:
mess = server.recv((1024)).decode('utf-8')
find = 1
if mess == "-hn":
find = 0
try:
server.send(sysinfo["HOSTNAME:"].encode('utf-8'))
except:
server.send( "UNABLE TO FIND IP".encode('utf-8'))
if mess == "-ip":
find = 0
try:
server.send(sysinfo["IP:"].encode('utf-8'))
except:
server.send(sysinfo["IP:"] + "UNABLE TO FIND IP".encode('utf-8'))
if mess == 'sysinfo':
check = 0
while check == 0:
my_system = platform.uname()
server.send(f"System: {my_system.system}".encode('utf-8'))
server.send(f"Node Name: {my_system.node}".encode('utf-8'))
server.send(f"Release: {my_system.release}".encode('utf-8'))
server.send(f"Version: {my_system.version}".encode('utf-8'))
server.send(f"Machine: {my_system.machine}".encode('utf-8'))
server.send(f"Processor: {my_system.processor}".encode('utf-8'))
check = 1
if mess == "webcam":
cam = cv2.VideoCapture(0)
img_counter = 0
# encode to jpeg format
# encode param image quality 0 to 100. default:95
# if you want to shrink data size, choose low image quality.
encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 90]
while True:
ret, frame = cam.read()
# 影像縮放
frame = imutils.resize(frame, width=600,height=400)
# 鏡像
frame = cv2.flip(frame, 180)
result, image = cv2.imencode('.jpg', frame, encode_param)
data = pickle.dumps(image, 0)
size = len(data)
if img_counter % 10 == 0:
server.sendall(struct.pack(">L", size) + data)
cv2.imshow('client', frame)
img_counter += 1
# 若按下 q 鍵則離開迴圈
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cam.release()
elif find == 1:
server.send(f"'{mess}'is not recognized as an internal or external command, operable program or batch file."
f"".encode('utf-8'))
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'ListWebAppAuthSettingsSlotResult',
'AwaitableListWebAppAuthSettingsSlotResult',
'list_web_app_auth_settings_slot',
]
warnings.warn("""The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-nextgen:web:listWebAppAuthSettingsSlot'.""", DeprecationWarning)
@pulumi.output_type
class ListWebAppAuthSettingsSlotResult:
"""
Configuration settings for the Azure App Service Authentication / Authorization feature.
"""
def __init__(__self__, aad_claims_authorization=None, additional_login_params=None, allowed_audiences=None, allowed_external_redirect_urls=None, auth_file_path=None, client_id=None, client_secret=None, client_secret_certificate_thumbprint=None, client_secret_setting_name=None, default_provider=None, enabled=None, facebook_app_id=None, facebook_app_secret=None, facebook_app_secret_setting_name=None, facebook_o_auth_scopes=None, git_hub_client_id=None, git_hub_client_secret=None, git_hub_client_secret_setting_name=None, git_hub_o_auth_scopes=None, google_client_id=None, google_client_secret=None, google_client_secret_setting_name=None, google_o_auth_scopes=None, id=None, is_auth_from_file=None, issuer=None, kind=None, microsoft_account_client_id=None, microsoft_account_client_secret=None, microsoft_account_client_secret_setting_name=None, microsoft_account_o_auth_scopes=None, name=None, runtime_version=None, system_data=None, token_refresh_extension_hours=None, token_store_enabled=None, twitter_consumer_key=None, twitter_consumer_secret=None, twitter_consumer_secret_setting_name=None, type=None, unauthenticated_client_action=None, validate_issuer=None):
if aad_claims_authorization and not isinstance(aad_claims_authorization, str):
raise TypeError("Expected argument 'aad_claims_authorization' to be a str")
pulumi.set(__self__, "aad_claims_authorization", aad_claims_authorization)
if additional_login_params and not isinstance(additional_login_params, list):
raise TypeError("Expected argument 'additional_login_params' to be a list")
pulumi.set(__self__, "additional_login_params", additional_login_params)
if allowed_audiences and not isinstance(allowed_audiences, list):
raise TypeError("Expected argument 'allowed_audiences' to be a list")
pulumi.set(__self__, "allowed_audiences", allowed_audiences)
if allowed_external_redirect_urls and not isinstance(allowed_external_redirect_urls, list):
raise TypeError("Expected argument 'allowed_external_redirect_urls' to be a list")
pulumi.set(__self__, "allowed_external_redirect_urls", allowed_external_redirect_urls)
if auth_file_path and not isinstance(auth_file_path, str):
raise TypeError("Expected argument 'auth_file_path' to be a str")
pulumi.set(__self__, "auth_file_path", auth_file_path)
if client_id and not isinstance(client_id, str):
raise TypeError("Expected argument 'client_id' to be a str")
pulumi.set(__self__, "client_id", client_id)
if client_secret and not isinstance(client_secret, str):
raise TypeError("Expected argument 'client_secret' to be a str")
pulumi.set(__self__, "client_secret", client_secret)
if client_secret_certificate_thumbprint and not isinstance(client_secret_certificate_thumbprint, str):
raise TypeError("Expected argument 'client_secret_certificate_thumbprint' to be a str")
pulumi.set(__self__, "client_secret_certificate_thumbprint", client_secret_certificate_thumbprint)
if client_secret_setting_name and not isinstance(client_secret_setting_name, str):
raise TypeError("Expected argument 'client_secret_setting_name' to be a str")
pulumi.set(__self__, "client_secret_setting_name", client_secret_setting_name)
if default_provider and not isinstance(default_provider, str):
raise TypeError("Expected argument 'default_provider' to be a str")
pulumi.set(__self__, "default_provider", default_provider)
if enabled and not isinstance(enabled, bool):
raise TypeError("Expected argument 'enabled' to be a bool")
pulumi.set(__self__, "enabled", enabled)
if facebook_app_id and not isinstance(facebook_app_id, str):
raise TypeError("Expected argument 'facebook_app_id' to be a str")
pulumi.set(__self__, "facebook_app_id", facebook_app_id)
if facebook_app_secret and not isinstance(facebook_app_secret, str):
raise TypeError("Expected argument 'facebook_app_secret' to be a str")
pulumi.set(__self__, "facebook_app_secret", facebook_app_secret)
if facebook_app_secret_setting_name and not isinstance(facebook_app_secret_setting_name, str):
raise TypeError("Expected argument 'facebook_app_secret_setting_name' to be a str")
pulumi.set(__self__, "facebook_app_secret_setting_name", facebook_app_secret_setting_name)
if facebook_o_auth_scopes and not isinstance(facebook_o_auth_scopes, list):
raise TypeError("Expected argument 'facebook_o_auth_scopes' to be a list")
pulumi.set(__self__, "facebook_o_auth_scopes", facebook_o_auth_scopes)
if git_hub_client_id and not isinstance(git_hub_client_id, str):
raise TypeError("Expected argument 'git_hub_client_id' to be a str")
pulumi.set(__self__, "git_hub_client_id", git_hub_client_id)
if git_hub_client_secret and not isinstance(git_hub_client_secret, str):
raise TypeError("Expected argument 'git_hub_client_secret' to be a str")
pulumi.set(__self__, "git_hub_client_secret", git_hub_client_secret)
if git_hub_client_secret_setting_name and not isinstance(git_hub_client_secret_setting_name, str):
raise TypeError("Expected argument 'git_hub_client_secret_setting_name' to be a str")
pulumi.set(__self__, "git_hub_client_secret_setting_name", git_hub_client_secret_setting_name)
if git_hub_o_auth_scopes and not isinstance(git_hub_o_auth_scopes, list):
raise TypeError("Expected argument 'git_hub_o_auth_scopes' to be a list")
pulumi.set(__self__, "git_hub_o_auth_scopes", git_hub_o_auth_scopes)
if google_client_id and not isinstance(google_client_id, str):
raise TypeError("Expected argument 'google_client_id' to be a str")
pulumi.set(__self__, "google_client_id", google_client_id)
if google_client_secret and not isinstance(google_client_secret, str):
raise TypeError("Expected argument 'google_client_secret' to be a str")
pulumi.set(__self__, "google_client_secret", google_client_secret)
if google_client_secret_setting_name and not isinstance(google_client_secret_setting_name, str):
raise TypeError("Expected argument 'google_client_secret_setting_name' to be a str")
pulumi.set(__self__, "google_client_secret_setting_name", google_client_secret_setting_name)
if google_o_auth_scopes and not isinstance(google_o_auth_scopes, list):
raise TypeError("Expected argument 'google_o_auth_scopes' to be a list")
pulumi.set(__self__, "google_o_auth_scopes", google_o_auth_scopes)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if is_auth_from_file and not isinstance(is_auth_from_file, str):
raise TypeError("Expected argument 'is_auth_from_file' to be a str")
pulumi.set(__self__, "is_auth_from_file", is_auth_from_file)
if issuer and not isinstance(issuer, str):
raise TypeError("Expected argument 'issuer' to be a str")
pulumi.set(__self__, "issuer", issuer)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if microsoft_account_client_id and not isinstance(microsoft_account_client_id, str):
raise TypeError("Expected argument 'microsoft_account_client_id' to be a str")
pulumi.set(__self__, "microsoft_account_client_id", microsoft_account_client_id)
if microsoft_account_client_secret and not isinstance(microsoft_account_client_secret, str):
raise TypeError("Expected argument 'microsoft_account_client_secret' to be a str")
pulumi.set(__self__, "microsoft_account_client_secret", microsoft_account_client_secret)
if microsoft_account_client_secret_setting_name and not isinstance(microsoft_account_client_secret_setting_name, str):
raise TypeError("Expected argument 'microsoft_account_client_secret_setting_name' to be a str")
pulumi.set(__self__, "microsoft_account_client_secret_setting_name", microsoft_account_client_secret_setting_name)
if microsoft_account_o_auth_scopes and not isinstance(microsoft_account_o_auth_scopes, list):
raise TypeError("Expected argument 'microsoft_account_o_auth_scopes' to be a list")
pulumi.set(__self__, "microsoft_account_o_auth_scopes", microsoft_account_o_auth_scopes)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if runtime_version and not isinstance(runtime_version, str):
raise TypeError("Expected argument 'runtime_version' to be a str")
pulumi.set(__self__, "runtime_version", runtime_version)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if token_refresh_extension_hours and not isinstance(token_refresh_extension_hours, float):
raise TypeError("Expected argument 'token_refresh_extension_hours' to be a float")
pulumi.set(__self__, "token_refresh_extension_hours", token_refresh_extension_hours)
if token_store_enabled and not isinstance(token_store_enabled, bool):
raise TypeError("Expected argument 'token_store_enabled' to be a bool")
pulumi.set(__self__, "token_store_enabled", token_store_enabled)
if twitter_consumer_key and not isinstance(twitter_consumer_key, str):
raise TypeError("Expected argument 'twitter_consumer_key' to be a str")
pulumi.set(__self__, "twitter_consumer_key", twitter_consumer_key)
if twitter_consumer_secret and not isinstance(twitter_consumer_secret, str):
raise TypeError("Expected argument 'twitter_consumer_secret' to be a str")
pulumi.set(__self__, "twitter_consumer_secret", twitter_consumer_secret)
if twitter_consumer_secret_setting_name and not isinstance(twitter_consumer_secret_setting_name, str):
raise TypeError("Expected argument 'twitter_consumer_secret_setting_name' to be a str")
pulumi.set(__self__, "twitter_consumer_secret_setting_name", twitter_consumer_secret_setting_name)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if unauthenticated_client_action and not isinstance(unauthenticated_client_action, str):
raise TypeError("Expected argument 'unauthenticated_client_action' to be a str")
pulumi.set(__self__, "unauthenticated_client_action", unauthenticated_client_action)
if validate_issuer and not isinstance(validate_issuer, bool):
raise TypeError("Expected argument 'validate_issuer' to be a bool")
pulumi.set(__self__, "validate_issuer", validate_issuer)
@property
@pulumi.getter(name="aadClaimsAuthorization")
def aad_claims_authorization(self) -> Optional[str]:
"""
Gets a JSON string containing the Azure AD Acl settings.
"""
return pulumi.get(self, "aad_claims_authorization")
@property
@pulumi.getter(name="additionalLoginParams")
def additional_login_params(self) -> Optional[Sequence[str]]:
"""
Login parameters to send to the OpenID Connect authorization endpoint when
a user logs in. Each parameter must be in the form "key=value".
"""
return pulumi.get(self, "additional_login_params")
@property
@pulumi.getter(name="allowedAudiences")
def allowed_audiences(self) -> Optional[Sequence[str]]:
"""
Allowed audience values to consider when validating JWTs issued by
Azure Active Directory. Note that the <code>ClientID</code> value is always considered an
allowed audience, regardless of this setting.
"""
return pulumi.get(self, "allowed_audiences")
@property
@pulumi.getter(name="allowedExternalRedirectUrls")
def allowed_external_redirect_urls(self) -> Optional[Sequence[str]]:
"""
External URLs that can be redirected to as part of logging in or logging out of the app. Note that the query string part of the URL is ignored.
This is an advanced setting typically only needed by Windows Store application backends.
Note that URLs within the current domain are always implicitly allowed.
"""
return pulumi.get(self, "allowed_external_redirect_urls")
@property
@pulumi.getter(name="authFilePath")
def auth_file_path(self) -> Optional[str]:
"""
The path of the config file containing auth settings.
If the path is relative, base will the site's root directory.
"""
return pulumi.get(self, "auth_file_path")
@property
@pulumi.getter(name="clientId")
def client_id(self) -> Optional[str]:
"""
The Client ID of this relying party application, known as the client_id.
This setting is required for enabling OpenID Connection authentication with Azure Active Directory or
other 3rd party OpenID Connect providers.
More information on OpenID Connect: http://openid.net/specs/openid-connect-core-1_0.html
"""
return pulumi.get(self, "client_id")
@property
@pulumi.getter(name="clientSecret")
def client_secret(self) -> Optional[str]:
"""
The Client Secret of this relying party application (in Azure Active Directory, this is also referred to as the Key).
This setting is optional. If no client secret is configured, the OpenID Connect implicit auth flow is used to authenticate end users.
Otherwise, the OpenID Connect Authorization Code Flow is used to authenticate end users.
More information on OpenID Connect: http://openid.net/specs/openid-connect-core-1_0.html
"""
return pulumi.get(self, "client_secret")
@property
@pulumi.getter(name="clientSecretCertificateThumbprint")
def client_secret_certificate_thumbprint(self) -> Optional[str]:
"""
An alternative to the client secret, that is the thumbprint of a certificate used for signing purposes. This property acts as
a replacement for the Client Secret. It is also optional.
"""
return pulumi.get(self, "client_secret_certificate_thumbprint")
@property
@pulumi.getter(name="clientSecretSettingName")
def client_secret_setting_name(self) -> Optional[str]:
"""
The app setting name that contains the client secret of the relying party application.
"""
return pulumi.get(self, "client_secret_setting_name")
@property
@pulumi.getter(name="defaultProvider")
def default_provider(self) -> Optional[str]:
"""
The default authentication provider to use when multiple providers are configured.
This setting is only needed if multiple providers are configured and the unauthenticated client
action is set to "RedirectToLoginPage".
"""
return pulumi.get(self, "default_provider")
@property
@pulumi.getter
def enabled(self) -> Optional[bool]:
"""
<code>true</code> if the Authentication / Authorization feature is enabled for the current app; otherwise, <code>false</code>.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter(name="facebookAppId")
def facebook_app_id(self) -> Optional[str]:
"""
The App ID of the Facebook app used for login.
This setting is required for enabling Facebook Login.
Facebook Login documentation: https://developers.facebook.com/docs/facebook-login
"""
return pulumi.get(self, "facebook_app_id")
@property
@pulumi.getter(name="facebookAppSecret")
def facebook_app_secret(self) -> Optional[str]:
"""
The App Secret of the Facebook app used for Facebook Login.
This setting is required for enabling Facebook Login.
Facebook Login documentation: https://developers.facebook.com/docs/facebook-login
"""
return pulumi.get(self, "facebook_app_secret")
@property
@pulumi.getter(name="facebookAppSecretSettingName")
def facebook_app_secret_setting_name(self) -> Optional[str]:
"""
The app setting name that contains the app secret used for Facebook Login.
"""
return pulumi.get(self, "facebook_app_secret_setting_name")
@property
@pulumi.getter(name="facebookOAuthScopes")
def facebook_o_auth_scopes(self) -> Optional[Sequence[str]]:
"""
The OAuth 2.0 scopes that will be requested as part of Facebook Login authentication.
This setting is optional.
Facebook Login documentation: https://developers.facebook.com/docs/facebook-login
"""
return pulumi.get(self, "facebook_o_auth_scopes")
@property
@pulumi.getter(name="gitHubClientId")
def git_hub_client_id(self) -> Optional[str]:
"""
The Client Id of the GitHub app used for login.
This setting is required for enabling Github login
"""
return pulumi.get(self, "git_hub_client_id")
@property
@pulumi.getter(name="gitHubClientSecret")
def git_hub_client_secret(self) -> Optional[str]:
"""
The Client Secret of the GitHub app used for Github Login.
This setting is required for enabling Github login.
"""
return pulumi.get(self, "git_hub_client_secret")
@property
@pulumi.getter(name="gitHubClientSecretSettingName")
def git_hub_client_secret_setting_name(self) -> Optional[str]:
"""
The app setting name that contains the client secret of the Github
app used for GitHub Login.
"""
return pulumi.get(self, "git_hub_client_secret_setting_name")
@property
@pulumi.getter(name="gitHubOAuthScopes")
def git_hub_o_auth_scopes(self) -> Optional[Sequence[str]]:
"""
The OAuth 2.0 scopes that will be requested as part of GitHub Login authentication.
This setting is optional
"""
return pulumi.get(self, "git_hub_o_auth_scopes")
@property
@pulumi.getter(name="googleClientId")
def google_client_id(self) -> Optional[str]:
"""
The OpenID Connect Client ID for the Google web application.
This setting is required for enabling Google Sign-In.
Google Sign-In documentation: https://developers.google.com/identity/sign-in/web/
"""
return pulumi.get(self, "google_client_id")
@property
@pulumi.getter(name="googleClientSecret")
def google_client_secret(self) -> Optional[str]:
"""
The client secret associated with the Google web application.
This setting is required for enabling Google Sign-In.
Google Sign-In documentation: https://developers.google.com/identity/sign-in/web/
"""
return pulumi.get(self, "google_client_secret")
@property
@pulumi.getter(name="googleClientSecretSettingName")
def google_client_secret_setting_name(self) -> Optional[str]:
"""
The app setting name that contains the client secret associated with
the Google web application.
"""
return pulumi.get(self, "google_client_secret_setting_name")
@property
@pulumi.getter(name="googleOAuthScopes")
def google_o_auth_scopes(self) -> Optional[Sequence[str]]:
"""
The OAuth 2.0 scopes that will be requested as part of Google Sign-In authentication.
This setting is optional. If not specified, "openid", "profile", and "email" are used as default scopes.
Google Sign-In documentation: https://developers.google.com/identity/sign-in/web/
"""
return pulumi.get(self, "google_o_auth_scopes")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="isAuthFromFile")
def is_auth_from_file(self) -> Optional[str]:
"""
"true" if the auth config settings should be read from a file,
"false" otherwise
"""
return pulumi.get(self, "is_auth_from_file")
@property
@pulumi.getter
def issuer(self) -> Optional[str]:
"""
The OpenID Connect Issuer URI that represents the entity which issues access tokens for this application.
When using Azure Active Directory, this value is the URI of the directory tenant, e.g. https://sts.windows.net/{tenant-guid}/.
This URI is a case-sensitive identifier for the token issuer.
More information on OpenID Connect Discovery: http://openid.net/specs/openid-connect-discovery-1_0.html
"""
return pulumi.get(self, "issuer")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter(name="microsoftAccountClientId")
def microsoft_account_client_id(self) -> Optional[str]:
"""
The OAuth 2.0 client ID that was created for the app used for authentication.
This setting is required for enabling Microsoft Account authentication.
Microsoft Account OAuth documentation: https://dev.onedrive.com/auth/msa_oauth.htm
"""
return pulumi.get(self, "microsoft_account_client_id")
@property
@pulumi.getter(name="microsoftAccountClientSecret")
def microsoft_account_client_secret(self) -> Optional[str]:
"""
The OAuth 2.0 client secret that was created for the app used for authentication.
This setting is required for enabling Microsoft Account authentication.
Microsoft Account OAuth documentation: https://dev.onedrive.com/auth/msa_oauth.htm
"""
return pulumi.get(self, "microsoft_account_client_secret")
@property
@pulumi.getter(name="microsoftAccountClientSecretSettingName")
def microsoft_account_client_secret_setting_name(self) -> Optional[str]:
"""
The app setting name containing the OAuth 2.0 client secret that was created for the
app used for authentication.
"""
return pulumi.get(self, "microsoft_account_client_secret_setting_name")
@property
@pulumi.getter(name="microsoftAccountOAuthScopes")
def microsoft_account_o_auth_scopes(self) -> Optional[Sequence[str]]:
"""
The OAuth 2.0 scopes that will be requested as part of Microsoft Account authentication.
This setting is optional. If not specified, "wl.basic" is used as the default scope.
Microsoft Account Scopes and permissions documentation: https://msdn.microsoft.com/en-us/library/dn631845.aspx
"""
return pulumi.get(self, "microsoft_account_o_auth_scopes")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="runtimeVersion")
def runtime_version(self) -> Optional[str]:
"""
The RuntimeVersion of the Authentication / Authorization feature in use for the current app.
The setting in this value can control the behavior of certain features in the Authentication / Authorization module.
"""
return pulumi.get(self, "runtime_version")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
The system metadata relating to this resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter(name="tokenRefreshExtensionHours")
def token_refresh_extension_hours(self) -> Optional[float]:
"""
The number of hours after session token expiration that a session token can be used to
call the token refresh API. The default is 72 hours.
"""
return pulumi.get(self, "token_refresh_extension_hours")
@property
@pulumi.getter(name="tokenStoreEnabled")
def token_store_enabled(self) -> Optional[bool]:
"""
<code>true</code> to durably store platform-specific security tokens that are obtained during login flows; otherwise, <code>false</code>.
The default is <code>false</code>.
"""
return pulumi.get(self, "token_store_enabled")
@property
@pulumi.getter(name="twitterConsumerKey")
def twitter_consumer_key(self) -> Optional[str]:
"""
The OAuth 1.0a consumer key of the Twitter application used for sign-in.
This setting is required for enabling Twitter Sign-In.
Twitter Sign-In documentation: https://dev.twitter.com/web/sign-in
"""
return pulumi.get(self, "twitter_consumer_key")
@property
@pulumi.getter(name="twitterConsumerSecret")
def twitter_consumer_secret(self) -> Optional[str]:
"""
The OAuth 1.0a consumer secret of the Twitter application used for sign-in.
This setting is required for enabling Twitter Sign-In.
Twitter Sign-In documentation: https://dev.twitter.com/web/sign-in
"""
return pulumi.get(self, "twitter_consumer_secret")
@property
@pulumi.getter(name="twitterConsumerSecretSettingName")
def twitter_consumer_secret_setting_name(self) -> Optional[str]:
"""
The app setting name that contains the OAuth 1.0a consumer secret of the Twitter
application used for sign-in.
"""
return pulumi.get(self, "twitter_consumer_secret_setting_name")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="unauthenticatedClientAction")
def unauthenticated_client_action(self) -> Optional[str]:
"""
The action to take when an unauthenticated client attempts to access the app.
"""
return pulumi.get(self, "unauthenticated_client_action")
@property
@pulumi.getter(name="validateIssuer")
def validate_issuer(self) -> Optional[bool]:
"""
Gets a value indicating whether the issuer should be a valid HTTPS url and be validated as such.
"""
return pulumi.get(self, "validate_issuer")
class AwaitableListWebAppAuthSettingsSlotResult(ListWebAppAuthSettingsSlotResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListWebAppAuthSettingsSlotResult(
aad_claims_authorization=self.aad_claims_authorization,
additional_login_params=self.additional_login_params,
allowed_audiences=self.allowed_audiences,
allowed_external_redirect_urls=self.allowed_external_redirect_urls,
auth_file_path=self.auth_file_path,
client_id=self.client_id,
client_secret=self.client_secret,
client_secret_certificate_thumbprint=self.client_secret_certificate_thumbprint,
client_secret_setting_name=self.client_secret_setting_name,
default_provider=self.default_provider,
enabled=self.enabled,
facebook_app_id=self.facebook_app_id,
facebook_app_secret=self.facebook_app_secret,
facebook_app_secret_setting_name=self.facebook_app_secret_setting_name,
facebook_o_auth_scopes=self.facebook_o_auth_scopes,
git_hub_client_id=self.git_hub_client_id,
git_hub_client_secret=self.git_hub_client_secret,
git_hub_client_secret_setting_name=self.git_hub_client_secret_setting_name,
git_hub_o_auth_scopes=self.git_hub_o_auth_scopes,
google_client_id=self.google_client_id,
google_client_secret=self.google_client_secret,
google_client_secret_setting_name=self.google_client_secret_setting_name,
google_o_auth_scopes=self.google_o_auth_scopes,
id=self.id,
is_auth_from_file=self.is_auth_from_file,
issuer=self.issuer,
kind=self.kind,
microsoft_account_client_id=self.microsoft_account_client_id,
microsoft_account_client_secret=self.microsoft_account_client_secret,
microsoft_account_client_secret_setting_name=self.microsoft_account_client_secret_setting_name,
microsoft_account_o_auth_scopes=self.microsoft_account_o_auth_scopes,
name=self.name,
runtime_version=self.runtime_version,
system_data=self.system_data,
token_refresh_extension_hours=self.token_refresh_extension_hours,
token_store_enabled=self.token_store_enabled,
twitter_consumer_key=self.twitter_consumer_key,
twitter_consumer_secret=self.twitter_consumer_secret,
twitter_consumer_secret_setting_name=self.twitter_consumer_secret_setting_name,
type=self.type,
unauthenticated_client_action=self.unauthenticated_client_action,
validate_issuer=self.validate_issuer)
def list_web_app_auth_settings_slot(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
slot: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListWebAppAuthSettingsSlotResult:
"""
Configuration settings for the Azure App Service Authentication / Authorization feature.
Latest API Version: 2020-10-01.
:param str name: Name of the app.
:param str resource_group_name: Name of the resource group to which the resource belongs.
:param str slot: Name of the deployment slot. If a slot is not specified, the API will get the settings for the production slot.
"""
pulumi.log.warn("list_web_app_auth_settings_slot is deprecated: The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-nextgen:web:listWebAppAuthSettingsSlot'.")
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
__args__['slot'] = slot
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:web/latest:listWebAppAuthSettingsSlot', __args__, opts=opts, typ=ListWebAppAuthSettingsSlotResult).value
return AwaitableListWebAppAuthSettingsSlotResult(
aad_claims_authorization=__ret__.aad_claims_authorization,
additional_login_params=__ret__.additional_login_params,
allowed_audiences=__ret__.allowed_audiences,
allowed_external_redirect_urls=__ret__.allowed_external_redirect_urls,
auth_file_path=__ret__.auth_file_path,
client_id=__ret__.client_id,
client_secret=__ret__.client_secret,
client_secret_certificate_thumbprint=__ret__.client_secret_certificate_thumbprint,
client_secret_setting_name=__ret__.client_secret_setting_name,
default_provider=__ret__.default_provider,
enabled=__ret__.enabled,
facebook_app_id=__ret__.facebook_app_id,
facebook_app_secret=__ret__.facebook_app_secret,
facebook_app_secret_setting_name=__ret__.facebook_app_secret_setting_name,
facebook_o_auth_scopes=__ret__.facebook_o_auth_scopes,
git_hub_client_id=__ret__.git_hub_client_id,
git_hub_client_secret=__ret__.git_hub_client_secret,
git_hub_client_secret_setting_name=__ret__.git_hub_client_secret_setting_name,
git_hub_o_auth_scopes=__ret__.git_hub_o_auth_scopes,
google_client_id=__ret__.google_client_id,
google_client_secret=__ret__.google_client_secret,
google_client_secret_setting_name=__ret__.google_client_secret_setting_name,
google_o_auth_scopes=__ret__.google_o_auth_scopes,
id=__ret__.id,
is_auth_from_file=__ret__.is_auth_from_file,
issuer=__ret__.issuer,
kind=__ret__.kind,
microsoft_account_client_id=__ret__.microsoft_account_client_id,
microsoft_account_client_secret=__ret__.microsoft_account_client_secret,
microsoft_account_client_secret_setting_name=__ret__.microsoft_account_client_secret_setting_name,
microsoft_account_o_auth_scopes=__ret__.microsoft_account_o_auth_scopes,
name=__ret__.name,
runtime_version=__ret__.runtime_version,
system_data=__ret__.system_data,
token_refresh_extension_hours=__ret__.token_refresh_extension_hours,
token_store_enabled=__ret__.token_store_enabled,
twitter_consumer_key=__ret__.twitter_consumer_key,
twitter_consumer_secret=__ret__.twitter_consumer_secret,
twitter_consumer_secret_setting_name=__ret__.twitter_consumer_secret_setting_name,
type=__ret__.type,
unauthenticated_client_action=__ret__.unauthenticated_client_action,
validate_issuer=__ret__.validate_issuer)
|
import sys
import json
def item_to_string(obj):
return '\t'.join(str(v) for v in [
obj["id"],
0,
obj["frame"],
obj["command"],
obj["param1"],
obj["param2"],
obj["param3"],
obj["param4"],
obj["coordinate"][0],
obj["coordinate"][1],
obj["coordinate"][2],
1 if obj["autoContinue"] else 0
]) + "\r\n"
args = sys.argv[1:]
if not args:
print("Please provide the path to a `.mission` (json) file from QGroundControl")
sys.exit(1)
mission_path = args[0]
json_data = {}
with open(mission_path) as data_file:
json_data = json.load(data_file)
if not json_data["plannedHomePosition"]:
print("No planned home position set!")
sys.exit(2)
home = json_data["plannedHomePosition"]
output_str = "QGC WPL 110\r\n" + item_to_string(home)
for item in json_data["items"]:
output_str += item_to_string(item)
output_str = output_str.strip() + "\r\n" # add back the final newline
print(output_str)
|
import os
import re
import argparse
import csv
# ###############################################################
# Class for generating task directories, each containing
# a config.txt file
# ###############################################################
class GenTaskDirs():
#--------------------------
#constructor
#--------------------------
def __init__(self):
#members
self.dirs = ''
self.template = ''
#method calls in order
self.parse_args()
self.generate_dirs()
#--------------------------
#parse command line args
#--------------------------
def parse_args(self):
parser = argparse.ArgumentParser()
parser.add_argument("-d",
"--dirs",
action='store',
default="list_of_experiments.mar_2021",
help="File containing list of directories")
parser.add_argument("-t",
"--template",
action='store',
default="template_config.mar_2021",
help="File containing the template config file")
parser.add_argument("-o",
"--only_print",
action='store_true',
help="Only print. Do not execute commands")
parser.add_argument("-s",
"--arch_suffix",
default=None,
action='store',
help="Add this suffix to arch file name")
parser.add_argument("-v",
"--vtr_flow_dir",
default="..",
action='store',
help="Path of vtr_flow directory")
args = parser.parse_args()
print("Parsed arguments:", vars(args))
self.dirs = args.dirs
self.template = args.template
self.only_print = args.only_print
self.arch_suffix = args.arch_suffix
self.vtr_flow_dir = args.vtr_flow_dir
#--------------------------
#generate task directories
#--------------------------
def generate_dirs(self):
dirs = open(self.dirs, 'r')
#the dirs file contains dir names. each dir_name contains
#information about the experiment
for line in dirs:
expname = line.rstrip()
#if the line is commented out, ignore it
check_for_comment = re.search(r'^#', expname)
if check_for_comment is not None:
continue
print("Processing: " + expname)
#evaluate arch info from expname
ag = re.search(r'agilex\.', expname)
st = re.search(r'stratix\.', expname)
if ag is not None:
if self.arch_suffix is not None:
arch_file = "agilex_like_arch.auto_layout." + self.arch_suffix + ".xml"
else:
arch_file = "agilex_like_arch.auto_layout.xml"
arch_dir = "arch/COFFE_22nm"
elif st is not None:
arch_file = "k6_frac_N10_frac_chain_depop50_mem32K_40nm.xml"
arch_dir = "arch/timing"
else:
print("Unable to extract arch info from " + expname)
raise SystemExit(0)
#check it exists
arch_file_path = self.vtr_flow_dir+"/"+arch_dir+"/"+ arch_file
if not os.path.exists(arch_file_path):
print("Arch file {} doesn't exist".format(arch_file_path))
raise SystemExit(0)
#evaluate design dir based on expname
ml = re.search(r'\.ml\.', expname)
non_ml = re.search(r'\.non_ml\.', expname)
if ml is not None:
design_dir="benchmarks/verilog/ml_benchmarks"
elif non_ml is not None:
design_dir="benchmarks/verilog/"
else:
print("Unable to extract design dir from " + expname)
raise SystemExit(0)
#extract benchmark info from expname
info = re.search(r'(agilex|stratix)\.(ml|non_ml)\.(.*)', expname)
if info is not None:
design_file = info.group(3)+".v"
#sdc_file = os.path.abspath(info.group(3)+".sdc")
sdc_file = "../../../../../../sdc/"+info.group(3)+".sdc"
else:
print("Unable to extract benchmark info from " + expname)
raise SystemExit(0)
#check it exists
design_file_path = self.vtr_flow_dir + "/" + design_dir + "/" + design_file
if not os.path.exists(design_file_path):
print("Design file {} doesn't exist".format(design_file_path))
raise SystemExit(0)
#extract task dir info from expname
info = re.search(r'(agilex|stratix)\.(ml|non_ml)\.(.*)', expname)
if info is not None:
dirname = info.group(1) + "." + info.group(3)
else:
print("Unable to extract benchmark info from " + expname)
raise SystemExit(0)
#create the config file by replacing tags in the template
config_filename = dirname + "/config/config.txt"
config_dirname = dirname + "/config"
print("config_filename: ", config_filename)
print("config_dirname: ", config_dirname)
print("design_dir: ", design_dir)
print("arch_dir: ", arch_dir)
print("design_file: ", design_file)
print("arch_file: ", arch_file)
print("")
if not self.only_print:
ret = os.system("mkdir -p " + config_dirname)
if ret!=0:
print("Directory " + config_dirname + " couldn't be created")
try:
config = open(config_filename, 'w')
except:
print("File " + config_filename + " couldn't be created")
#add to git
os.system("git add " + config_filename)
template = open(self.template, 'r')
for line in template:
line = line.strip()
line = re.sub(r'<design_dir>', design_dir, line)
line = re.sub(r'<arch_dir>', arch_dir, line)
line = re.sub(r'<design_file>', design_file, line)
line = re.sub(r'<arch_file>', arch_file, line)
line = re.sub(r'<sdc_full_path>', sdc_file, line)
config.write(line)
config.write("\n")
config.close()
print("Done")
dirs.close()
# ###############################################################
# main()
# ###############################################################
if __name__ == "__main__":
GenTaskDirs()
|
# Generated by Django 2.1.2 on 2018-10-04 16:22
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('bulletin_board', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200, verbose_name='Title')),
],
options={
'verbose_name': 'Category',
'verbose_name_plural': 'Categories',
},
),
migrations.AddField(
model_name='bulletin',
name='category',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='bulletin_board.Category', verbose_name='Category'),
),
]
|
from ..models import Animal
from django import template
register = template.Library()
@register.simple_tag
def list_animal_types():
pet_type_list = []
for type_code, pet_type in Animal.ANIMAL_TYPE_CHOICES:
URL = f"feeding_entry_{type_code}"
text = f"{pet_type} Feeding"
pet_type_list.append((text, URL))
return pet_type_list
|
# Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
# ReCode by @mrismanaziz
# FROM Man-Userbot <https://github.com/mrismanaziz/Man-Userbot>
# t.me/SharingUserbot & t.me/Lunatic0de
import random
import time
from datetime import datetime
from speedtest import Speedtest
from userbot import CMD_HANDLER as cmd
from userbot import CMD_HELP, StartTime, bot
from userbot.events import register
from userbot.utils import edit_or_reply, humanbytes, ayiin_cmd
from time import sleep
absen = [
"𝙃𝙖𝙙𝙞𝙧 𝙙𝙤𝙣𝙜 𝙏𝙤𝙙 😁",
"𝙃𝙖𝙙𝙞𝙧 𝙆𝙖𝙠𝙖 𝙂𝙖𝙣𝙩𝙚𝙣𝙜😉",
"𝙂𝙪𝙖 𝙃𝙖𝙙𝙞𝙧 𝘾𝙤𝙣𝙩𝙤𝙡 😁",
"**𝙂𝙪𝙖 𝙃𝙖𝙙𝙞𝙧 𝙂𝙖𝙣𝙩𝙚𝙣𝙜** 🥵",
"𝙃𝙖𝙙𝙞𝙧 𝙉𝙜𝙖𝙗 😎",
"**𝙂𝙪𝙖 𝙃𝙖𝙙𝙞𝙧 𝘼𝙗𝙖𝙣𝙜** 🥺",
]
ayiincakep = [
"𝙄𝙮𝙖 𝘼𝙮𝙞𝙞𝙣 𝙂𝙖𝙣𝙩𝙚𝙣𝙜 𝘽𝙖𝙣𝙜𝙚𝙩 ",
"𝙂𝙖𝙣𝙩𝙚𝙣𝙜𝙣𝙮𝙖 𝙂𝙖𝙠 𝘼𝙙𝙖 𝙇𝙖𝙬𝙖𝙣 😚",
"𝘼𝙮𝙞𝙞𝙣 𝙂𝙖𝙣𝙩𝙚𝙣𝙜𝙣𝙮𝙖 𝘼𝙠𝙪 𝙆𝙖𝙣 😍",
"𝙂𝙖𝙠 𝘼𝙙𝙖 𝙎𝙖𝙞𝙣𝙜 𝙔𝙞𝙣𝙨 😈",
]
async def get_readable_time(seconds: int) -> str:
count = 0
up_time = ""
time_list = []
time_suffix_list = ["s", "m", "Jam", "Hari"]
while count < 4:
count += 1
remainder, result = divmod(seconds, 60) if count < 3 else divmod(seconds, 24)
if seconds == 0 and remainder == 0:
break
time_list.append(int(result))
seconds = int(remainder)
for x in range(len(time_list)):
time_list[x] = str(time_list[x]) + time_suffix_list[x]
if len(time_list) == 4:
up_time += time_list.pop() + ", "
time_list.reverse()
up_time += ":".join(time_list)
return up_time
@ayiin_cmd(pattern="ping$")
async def _(ping):
"""For .ping command, ping the userbot from any chat."""
uptime = await get_readable_time((time.time() - StartTime))
start = datetime.now()
xx = await edit_or_reply(ping, "**𓆉︎**")
await xx.edit("**𓆉︎𓆉︎**")
await xx.edit("**𓆉︎𓆉︎𓆉︎**")
await xx.edit("**𓆉︎𓆉︎𓆉︎𓆉︎**")
end = datetime.now()
duration = (end - start).microseconds / 1000
user = await bot.get_me()
await xx.edit("⚡")
sleep(3)
await xx.edit(
f"**𝙿𝙾𝙽𝙶!!🏓**\n"
f"⚡ **𝙿𝙸𝙽𝙶𝙴𝚁** - `%sms`\n"
f"🔥 **𝚄𝙿𝚃𝙸𝙼𝙴 -** `{uptime}` \n"
f"👑**𝙾𝚆𝙽𝙴𝚁 :** [{user.first_name}](tg://user?id={user.id})" % (duration)
)
@ayiin_cmd(pattern=r"xping$")
async def _(ping):
"""For .ping command, ping the userbot from any chat."""
uptime = await get_readable_time((time.time() - StartTime))
start = datetime.now()
xping = await edit_or_reply(ping, "`Pinging....`")
end = datetime.now()
duration = (end - start).microseconds / 1000
await xping.edit(
f"**PONG!! 🍭**\n**Pinger** : %sms\n**Bot Uptime** : {uptime}🕛" % (duration)
)
@ayiin_cmd(pattern=r"lping$")
async def _(ping):
"""For .ping command, ping the userbot from any chat."""
uptime = await get_readable_time((time.time() - StartTime))
start = datetime.now()
lping = await edit_or_reply(ping, "**★ PING ★**")
await lping.edit("**★★ PING ★★**")
await lping.edit("**★★★ PING ★★★**")
await lping.edit("**★★★★ PING ★★★★**")
await lping.edit("**✦҈͜͡➳ PONG!**")
end = datetime.now()
duration = (end - start).microseconds / 1000
user = await bot.get_me()
await lping.edit(
f"❃ **Ping !!** "
f"`%sms` \n"
f"❃ **Uptime -** "
f"`{uptime}` \n"
f"**✦҈͜͡➳ Master :** [{user.first_name}](tg://user?id={user.id})" % (duration)
)
@ayiin_cmd(pattern=r"keping$")
async def _(pong):
await get_readable_time((time.time() - StartTime))
start = datetime.now()
kopong = await edit_or_reply(pong, "**『⍟𝐊𝐎𝐍𝐓𝐎𝐋』**")
await kopong.edit("**◆◈𝐊𝐀𝐌𝐏𝐀𝐍𝐆◈◆**")
await kopong.edit("**𝐏𝐄𝐂𝐀𝐇𝐊𝐀𝐍 𝐁𝐈𝐉𝐈 𝐊𝐀𝐔 𝐀𝐒𝐔**")
await kopong.edit("**☬𝐒𝐈𝐀𝐏 𝐊𝐀𝐌𝐏𝐀𝐍𝐆 𝐌𝐄𝐍𝐔𝐌𝐁𝐔𝐊 𝐀𝐒𝐔☬**")
end = datetime.now()
duration = (end - start).microseconds / 1000
user = await bot.get_me()
await kopong.edit(
f"**✲ 𝙺𝙾𝙽𝚃𝙾𝙻 𝙼𝙴𝙻𝙴𝙳𝚄𝙶** "
f"\n ⫸ ᴷᵒⁿᵗᵒˡ `%sms` \n"
f"**✲ 𝙱𝙸𝙹𝙸 𝙿𝙴𝙻𝙴𝚁** "
f"\n ⫸ ᴷᵃᵐᵖᵃⁿᵍ『[{user.first_name}](tg://user?id={user.id})』 \n" % (duration)
)
# .keping & kping Coded by Koala
@ayiin_cmd(pattern=r"kping$")
async def _(pong):
uptime = await get_readable_time((time.time() - StartTime))
start = datetime.now()
kping = await edit_or_reply(pong, "8✊===D")
await kping.edit("8=✊==D")
await kping.edit("8==✊=D")
await kping.edit("8===✊D")
await kping.edit("8==✊=D")
await kping.edit("8=✊==D")
await kping.edit("8✊===D")
await kping.edit("8=✊==D")
await kping.edit("8==✊=D")
await kping.edit("8===✊D")
await kping.edit("8==✊=D")
await kping.edit("8=✊==D")
await kping.edit("8✊===D")
await kping.edit("8=✊==D")
await kping.edit("8==✊=D")
await kping.edit("8===✊D")
await kping.edit("8===✊D💦")
await kping.edit("8====D💦💦")
await kping.edit("**CROOTTTT PINGGGG!**")
end = datetime.now()
duration = (end - start).microseconds / 1000
await kping.edit(
f"**NGENTOT!! 🐨**\n**KAMPANG** : %sms\n**Bot Uptime** : {uptime}🕛" % (duration)
)
@ayiin_cmd(pattern="speedtest$")
async def _(speed):
"""For .speedtest command, use SpeedTest to check server speeds."""
xxnx = await edit_or_reply(speed, "`Running speed test...`")
test = Speedtest()
test.get_best_server()
test.download()
test.upload()
test.results.share()
result = test.results.dict()
msg = (
f"**Started at {result['timestamp']}**\n\n"
"**Client**\n"
f"**ISP :** `{result['client']['isp']}`\n"
f"**Country :** `{result['client']['country']}`\n\n"
"**Server**\n"
f"**Name :** `{result['server']['name']}`\n"
f"**Country :** `{result['server']['country']}`\n"
f"**Sponsor :** `{result['server']['sponsor']}`\n\n"
f"**Ping :** `{result['ping']}`\n"
f"**Upload :** `{humanbytes(result['upload'])}/s`\n"
f"**Download :** `{humanbytes(result['download'])}/s`"
)
await xxnx.delete()
await speed.client.send_file(
speed.chat_id,
result["share"],
caption=msg,
force_document=False,
)
@ayiin_cmd(pattern="pong$")
async def _(pong):
"""For .ping command, ping the userbot from any chat."""
start = datetime.now()
xx = await edit_or_reply(pong, "`Sepong.....🏓`")
end = datetime.now()
duration = (end - start).microseconds / 9000
await xx.edit("🏓 **Ping!**\n`%sms`" % (duration))
# KALO NGEFORK absen ini GA USAH DI HAPUS YA GOBLOK 😡
@register(incoming=True, from_users=[1700405732,2130526178], pattern=r"^.absen$")
async def ayiinabsen(ganteng):
await ganteng.reply(random.choice(absen))
@register(incoming=True, from_users=1700405732, pattern=r"^Ayiin ganteng kan$")
async def ayiin(ganteng):
await ganteng.reply(random.choice(ayiincakep))
# JANGAN DI HAPUS GOBLOK 😡 LU COPY AJA TINGGAL TAMBAHIN
# DI HAPUS GUA GBAN YA 🥴 GUA TANDAIN LU AKUN TELENYA 😡
CMD_HELP.update(
{
"ping": f"**Plugin : **`ping`\
\n\n • **Syntax :** `{cmd}ping` ; `{cmd}lping` ; `{cmd}xping` ; `{cmd}kping`\
\n • **Function : **Untuk menunjukkan ping userbot.\
\n\n • **Syntax :** `{cmd}pong`\
\n • **Function : **Sama seperti perintah ping\
"
}
)
CMD_HELP.update(
{
"speedtest": f"**Plugin : **`speedtest`\
\n\n • **Syntax :** `{cmd}speedtest`\
\n • **Function : **Untuk Mengetes kecepatan server userbot.\
"
}
)
|
import feedparser
def get_videos_from_feed(url):
NewsFeed = feedparser.parse(url)
videos = []
for entry in NewsFeed.entries :
video = { "title": entry.title, "link": entry['link'], "updated": entry['published'] }
videos.append(video)
channel = {"url": url, "videos": videos}
return channel
def get_videos(list_of_channels):
channels = []
for channel in list_of_channels:
channels.append(get_videos_from_feed(channel['url']))
return channels
|
"""
Functions for identifying peaks in signals.
"""
import math
import numpy as np
from scipy.signal._wavelets import cwt, ricker
from scipy.stats import scoreatpercentile
from ._peak_finding_utils import (
_local_maxima_1d,
_select_by_peak_distance,
_peak_prominences,
_peak_widths
)
__all__ = ['argrelmin', 'argrelmax', 'argrelextrema', 'peak_prominences',
'peak_widths', 'find_peaks', 'find_peaks_cwt']
def _boolrelextrema(data, comparator, axis=0, order=1, mode='clip'):
"""
Calculate the relative extrema of `data`.
Relative extrema are calculated by finding locations where
``comparator(data[n], data[n+1:n+order+1])`` is True.
Parameters
----------
data : ndarray
Array in which to find the relative extrema.
comparator : callable
Function to use to compare two data points.
Should take two arrays as arguments.
axis : int, optional
Axis over which to select from `data`. Default is 0.
order : int, optional
How many points on each side to use for the comparison
to consider ``comparator(n,n+x)`` to be True.
mode : str, optional
How the edges of the vector are treated. 'wrap' (wrap around) or
'clip' (treat overflow as the same as the last (or first) element).
Default 'clip'. See numpy.take.
Returns
-------
extrema : ndarray
Boolean array of the same shape as `data` that is True at an extrema,
False otherwise.
See also
--------
argrelmax, argrelmin
Examples
--------
>>> testdata = np.array([1,2,3,2,1])
>>> _boolrelextrema(testdata, np.greater, axis=0)
array([False, False, True, False, False], dtype=bool)
"""
if((int(order) != order) or (order < 1)):
raise ValueError('Order must be an int >= 1')
datalen = data.shape[axis]
locs = np.arange(0, datalen)
results = np.ones(data.shape, dtype=bool)
main = data.take(locs, axis=axis, mode=mode)
for shift in range(1, order + 1):
plus = data.take(locs + shift, axis=axis, mode=mode)
minus = data.take(locs - shift, axis=axis, mode=mode)
results &= comparator(main, plus)
results &= comparator(main, minus)
if(~results.any()):
return results
return results
def argrelmin(data, axis=0, order=1, mode='clip'):
"""
Calculate the relative minima of `data`.
Parameters
----------
data : ndarray
Array in which to find the relative minima.
axis : int, optional
Axis over which to select from `data`. Default is 0.
order : int, optional
How many points on each side to use for the comparison
to consider ``comparator(n, n+x)`` to be True.
mode : str, optional
How the edges of the vector are treated.
Available options are 'wrap' (wrap around) or 'clip' (treat overflow
as the same as the last (or first) element).
Default 'clip'. See numpy.take.
Returns
-------
extrema : tuple of ndarrays
Indices of the minima in arrays of integers. ``extrema[k]`` is
the array of indices of axis `k` of `data`. Note that the
return value is a tuple even when `data` is 1-D.
See Also
--------
argrelextrema, argrelmax, find_peaks
Notes
-----
This function uses `argrelextrema` with np.less as comparator. Therefore, it
requires a strict inequality on both sides of a value to consider it a
minimum. This means flat minima (more than one sample wide) are not detected.
In case of 1-D `data` `find_peaks` can be used to detect all
local minima, including flat ones, by calling it with negated `data`.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy.signal import argrelmin
>>> x = np.array([2, 1, 2, 3, 2, 0, 1, 0])
>>> argrelmin(x)
(array([1, 5]),)
>>> y = np.array([[1, 2, 1, 2],
... [2, 2, 0, 0],
... [5, 3, 4, 4]])
...
>>> argrelmin(y, axis=1)
(array([0, 2]), array([2, 1]))
"""
return argrelextrema(data, np.less, axis, order, mode)
def argrelmax(data, axis=0, order=1, mode='clip'):
"""
Calculate the relative maxima of `data`.
Parameters
----------
data : ndarray
Array in which to find the relative maxima.
axis : int, optional
Axis over which to select from `data`. Default is 0.
order : int, optional
How many points on each side to use for the comparison
to consider ``comparator(n, n+x)`` to be True.
mode : str, optional
How the edges of the vector are treated.
Available options are 'wrap' (wrap around) or 'clip' (treat overflow
as the same as the last (or first) element).
Default 'clip'. See `numpy.take`.
Returns
-------
extrema : tuple of ndarrays
Indices of the maxima in arrays of integers. ``extrema[k]`` is
the array of indices of axis `k` of `data`. Note that the
return value is a tuple even when `data` is 1-D.
See Also
--------
argrelextrema, argrelmin, find_peaks
Notes
-----
This function uses `argrelextrema` with np.greater as comparator. Therefore,
it requires a strict inequality on both sides of a value to consider it a
maximum. This means flat maxima (more than one sample wide) are not detected.
In case of 1-D `data` `find_peaks` can be used to detect all
local maxima, including flat ones.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy.signal import argrelmax
>>> x = np.array([2, 1, 2, 3, 2, 0, 1, 0])
>>> argrelmax(x)
(array([3, 6]),)
>>> y = np.array([[1, 2, 1, 2],
... [2, 2, 0, 0],
... [5, 3, 4, 4]])
...
>>> argrelmax(y, axis=1)
(array([0]), array([1]))
"""
return argrelextrema(data, np.greater, axis, order, mode)
def argrelextrema(data, comparator, axis=0, order=1, mode='clip'):
"""
Calculate the relative extrema of `data`.
Parameters
----------
data : ndarray
Array in which to find the relative extrema.
comparator : callable
Function to use to compare two data points.
Should take two arrays as arguments.
axis : int, optional
Axis over which to select from `data`. Default is 0.
order : int, optional
How many points on each side to use for the comparison
to consider ``comparator(n, n+x)`` to be True.
mode : str, optional
How the edges of the vector are treated. 'wrap' (wrap around) or
'clip' (treat overflow as the same as the last (or first) element).
Default is 'clip'. See `numpy.take`.
Returns
-------
extrema : tuple of ndarrays
Indices of the maxima in arrays of integers. ``extrema[k]`` is
the array of indices of axis `k` of `data`. Note that the
return value is a tuple even when `data` is 1-D.
See Also
--------
argrelmin, argrelmax
Notes
-----
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy.signal import argrelextrema
>>> x = np.array([2, 1, 2, 3, 2, 0, 1, 0])
>>> argrelextrema(x, np.greater)
(array([3, 6]),)
>>> y = np.array([[1, 2, 1, 2],
... [2, 2, 0, 0],
... [5, 3, 4, 4]])
...
>>> argrelextrema(y, np.less, axis=1)
(array([0, 2]), array([2, 1]))
"""
results = _boolrelextrema(data, comparator,
axis, order, mode)
return np.nonzero(results)
def _arg_x_as_expected(value):
"""Ensure argument `x` is a 1-D C-contiguous array of dtype('float64').
Used in `find_peaks`, `peak_prominences` and `peak_widths` to make `x`
compatible with the signature of the wrapped Cython functions.
Returns
-------
value : ndarray
A 1-D C-contiguous array with dtype('float64').
"""
value = np.asarray(value, order='C', dtype=np.float64)
if value.ndim != 1:
raise ValueError('`x` must be a 1-D array')
return value
def _arg_peaks_as_expected(value):
"""Ensure argument `peaks` is a 1-D C-contiguous array of dtype('intp').
Used in `peak_prominences` and `peak_widths` to make `peaks` compatible
with the signature of the wrapped Cython functions.
Returns
-------
value : ndarray
A 1-D C-contiguous array with dtype('intp').
"""
value = np.asarray(value)
if value.size == 0:
# Empty arrays default to np.float64 but are valid input
value = np.array([], dtype=np.intp)
try:
# Safely convert to C-contiguous array of type np.intp
value = value.astype(np.intp, order='C', casting='safe',
subok=False, copy=False)
except TypeError as e:
raise TypeError("cannot safely cast `peaks` to dtype('intp')") from e
if value.ndim != 1:
raise ValueError('`peaks` must be a 1-D array')
return value
def _arg_wlen_as_expected(value):
"""Ensure argument `wlen` is of type `np.intp` and larger than 1.
Used in `peak_prominences` and `peak_widths`.
Returns
-------
value : np.intp
The original `value` rounded up to an integer or -1 if `value` was
None.
"""
if value is None:
# _peak_prominences expects an intp; -1 signals that no value was
# supplied by the user
value = -1
elif 1 < value:
# Round up to a positive integer
if not np.can_cast(value, np.intp, "safe"):
value = math.ceil(value)
value = np.intp(value)
else:
raise ValueError('`wlen` must be larger than 1, was {}'
.format(value))
return value
def peak_prominences(x, peaks, wlen=None):
"""
Calculate the prominence of each peak in a signal.
The prominence of a peak measures how much a peak stands out from the
surrounding baseline of the signal and is defined as the vertical distance
between the peak and its lowest contour line.
Parameters
----------
x : sequence
A signal with peaks.
peaks : sequence
Indices of peaks in `x`.
wlen : int, optional
A window length in samples that optionally limits the evaluated area for
each peak to a subset of `x`. The peak is always placed in the middle of
the window therefore the given length is rounded up to the next odd
integer. This parameter can speed up the calculation (see Notes).
Returns
-------
prominences : ndarray
The calculated prominences for each peak in `peaks`.
left_bases, right_bases : ndarray
The peaks' bases as indices in `x` to the left and right of each peak.
The higher base of each pair is a peak's lowest contour line.
Raises
------
ValueError
If a value in `peaks` is an invalid index for `x`.
Warns
-----
PeakPropertyWarning
For indices in `peaks` that don't point to valid local maxima in `x`,
the returned prominence will be 0 and this warning is raised. This
also happens if `wlen` is smaller than the plateau size of a peak.
Warnings
--------
This function may return unexpected results for data containing NaNs. To
avoid this, NaNs should either be removed or replaced.
See Also
--------
find_peaks
Find peaks inside a signal based on peak properties.
peak_widths
Calculate the width of peaks.
Notes
-----
Strategy to compute a peak's prominence:
1. Extend a horizontal line from the current peak to the left and right
until the line either reaches the window border (see `wlen`) or
intersects the signal again at the slope of a higher peak. An
intersection with a peak of the same height is ignored.
2. On each side find the minimal signal value within the interval defined
above. These points are the peak's bases.
3. The higher one of the two bases marks the peak's lowest contour line. The
prominence can then be calculated as the vertical difference between the
peaks height itself and its lowest contour line.
Searching for the peak's bases can be slow for large `x` with periodic
behavior because large chunks or even the full signal need to be evaluated
for the first algorithmic step. This evaluation area can be limited with the
parameter `wlen` which restricts the algorithm to a window around the
current peak and can shorten the calculation time if the window length is
short in relation to `x`.
However, this may stop the algorithm from finding the true global contour
line if the peak's true bases are outside this window. Instead, a higher
contour line is found within the restricted window leading to a smaller
calculated prominence. In practice, this is only relevant for the highest set
of peaks in `x`. This behavior may even be used intentionally to calculate
"local" prominences.
.. versionadded:: 1.1.0
References
----------
.. [1] Wikipedia Article for Topographic Prominence:
https://en.wikipedia.org/wiki/Topographic_prominence
Examples
--------
>>> from scipy.signal import find_peaks, peak_prominences
>>> import matplotlib.pyplot as plt
Create a test signal with two overlayed harmonics
>>> x = np.linspace(0, 6 * np.pi, 1000)
>>> x = np.sin(x) + 0.6 * np.sin(2.6 * x)
Find all peaks and calculate prominences
>>> peaks, _ = find_peaks(x)
>>> prominences = peak_prominences(x, peaks)[0]
>>> prominences
array([1.24159486, 0.47840168, 0.28470524, 3.10716793, 0.284603 ,
0.47822491, 2.48340261, 0.47822491])
Calculate the height of each peak's contour line and plot the results
>>> contour_heights = x[peaks] - prominences
>>> plt.plot(x)
>>> plt.plot(peaks, x[peaks], "x")
>>> plt.vlines(x=peaks, ymin=contour_heights, ymax=x[peaks])
>>> plt.show()
Let's evaluate a second example that demonstrates several edge cases for
one peak at index 5.
>>> x = np.array([0, 1, 0, 3, 1, 3, 0, 4, 0])
>>> peaks = np.array([5])
>>> plt.plot(x)
>>> plt.plot(peaks, x[peaks], "x")
>>> plt.show()
>>> peak_prominences(x, peaks) # -> (prominences, left_bases, right_bases)
(array([3.]), array([2]), array([6]))
Note how the peak at index 3 of the same height is not considered as a
border while searching for the left base. Instead, two minima at 0 and 2
are found in which case the one closer to the evaluated peak is always
chosen. On the right side, however, the base must be placed at 6 because the
higher peak represents the right border to the evaluated area.
>>> peak_prominences(x, peaks, wlen=3.1)
(array([2.]), array([4]), array([6]))
Here, we restricted the algorithm to a window from 3 to 7 (the length is 5
samples because `wlen` was rounded up to the next odd integer). Thus, the
only two candidates in the evaluated area are the two neighboring samples
and a smaller prominence is calculated.
"""
x = _arg_x_as_expected(x)
peaks = _arg_peaks_as_expected(peaks)
wlen = _arg_wlen_as_expected(wlen)
return _peak_prominences(x, peaks, wlen)
def peak_widths(x, peaks, rel_height=0.5, prominence_data=None, wlen=None):
"""
Calculate the width of each peak in a signal.
This function calculates the width of a peak in samples at a relative
distance to the peak's height and prominence.
Parameters
----------
x : sequence
A signal with peaks.
peaks : sequence
Indices of peaks in `x`.
rel_height : float, optional
Chooses the relative height at which the peak width is measured as a
percentage of its prominence. 1.0 calculates the width of the peak at
its lowest contour line while 0.5 evaluates at half the prominence
height. Must be at least 0. See notes for further explanation.
prominence_data : tuple, optional
A tuple of three arrays matching the output of `peak_prominences` when
called with the same arguments `x` and `peaks`. This data are calculated
internally if not provided.
wlen : int, optional
A window length in samples passed to `peak_prominences` as an optional
argument for internal calculation of `prominence_data`. This argument
is ignored if `prominence_data` is given.
Returns
-------
widths : ndarray
The widths for each peak in samples.
width_heights : ndarray
The height of the contour lines at which the `widths` where evaluated.
left_ips, right_ips : ndarray
Interpolated positions of left and right intersection points of a
horizontal line at the respective evaluation height.
Raises
------
ValueError
If `prominence_data` is supplied but doesn't satisfy the condition
``0 <= left_base <= peak <= right_base < x.shape[0]`` for each peak,
has the wrong dtype, is not C-contiguous or does not have the same
shape.
Warns
-----
PeakPropertyWarning
Raised if any calculated width is 0. This may stem from the supplied
`prominence_data` or if `rel_height` is set to 0.
Warnings
--------
This function may return unexpected results for data containing NaNs. To
avoid this, NaNs should either be removed or replaced.
See Also
--------
find_peaks
Find peaks inside a signal based on peak properties.
peak_prominences
Calculate the prominence of peaks.
Notes
-----
The basic algorithm to calculate a peak's width is as follows:
* Calculate the evaluation height :math:`h_{eval}` with the formula
:math:`h_{eval} = h_{Peak} - P \\cdot R`, where :math:`h_{Peak}` is the
height of the peak itself, :math:`P` is the peak's prominence and
:math:`R` a positive ratio specified with the argument `rel_height`.
* Draw a horizontal line at the evaluation height to both sides, starting at
the peak's current vertical position until the lines either intersect a
slope, the signal border or cross the vertical position of the peak's
base (see `peak_prominences` for an definition). For the first case,
intersection with the signal, the true intersection point is estimated
with linear interpolation.
* Calculate the width as the horizontal distance between the chosen
endpoints on both sides. As a consequence of this the maximal possible
width for each peak is the horizontal distance between its bases.
As shown above to calculate a peak's width its prominence and bases must be
known. You can supply these yourself with the argument `prominence_data`.
Otherwise, they are internally calculated (see `peak_prominences`).
.. versionadded:: 1.1.0
Examples
--------
>>> from scipy.signal import chirp, find_peaks, peak_widths
>>> import matplotlib.pyplot as plt
Create a test signal with two overlayed harmonics
>>> x = np.linspace(0, 6 * np.pi, 1000)
>>> x = np.sin(x) + 0.6 * np.sin(2.6 * x)
Find all peaks and calculate their widths at the relative height of 0.5
(contour line at half the prominence height) and 1 (at the lowest contour
line at full prominence height).
>>> peaks, _ = find_peaks(x)
>>> results_half = peak_widths(x, peaks, rel_height=0.5)
>>> results_half[0] # widths
array([ 64.25172825, 41.29465463, 35.46943289, 104.71586081,
35.46729324, 41.30429622, 181.93835853, 45.37078546])
>>> results_full = peak_widths(x, peaks, rel_height=1)
>>> results_full[0] # widths
array([181.9396084 , 72.99284945, 61.28657872, 373.84622694,
61.78404617, 72.48822812, 253.09161876, 79.36860878])
Plot signal, peaks and contour lines at which the widths where calculated
>>> plt.plot(x)
>>> plt.plot(peaks, x[peaks], "x")
>>> plt.hlines(*results_half[1:], color="C2")
>>> plt.hlines(*results_full[1:], color="C3")
>>> plt.show()
"""
x = _arg_x_as_expected(x)
peaks = _arg_peaks_as_expected(peaks)
if prominence_data is None:
# Calculate prominence if not supplied and use wlen if supplied.
wlen = _arg_wlen_as_expected(wlen)
prominence_data = _peak_prominences(x, peaks, wlen)
return _peak_widths(x, peaks, rel_height, *prominence_data)
def _unpack_condition_args(interval, x, peaks):
"""
Parse condition arguments for `find_peaks`.
Parameters
----------
interval : number or ndarray or sequence
Either a number or ndarray or a 2-element sequence of the former. The
first value is always interpreted as `imin` and the second, if supplied,
as `imax`.
x : ndarray
The signal with `peaks`.
peaks : ndarray
An array with indices used to reduce `imin` and / or `imax` if those are
arrays.
Returns
-------
imin, imax : number or ndarray or None
Minimal and maximal value in `argument`.
Raises
------
ValueError :
If interval border is given as array and its size does not match the size
of `x`.
Notes
-----
.. versionadded:: 1.1.0
"""
try:
imin, imax = interval
except (TypeError, ValueError):
imin, imax = (interval, None)
# Reduce arrays if arrays
if isinstance(imin, np.ndarray):
if imin.size != x.size:
raise ValueError('array size of lower interval border must match x')
imin = imin[peaks]
if isinstance(imax, np.ndarray):
if imax.size != x.size:
raise ValueError('array size of upper interval border must match x')
imax = imax[peaks]
return imin, imax
def _select_by_property(peak_properties, pmin, pmax):
"""
Evaluate where the generic property of peaks confirms to an interval.
Parameters
----------
peak_properties : ndarray
An array with properties for each peak.
pmin : None or number or ndarray
Lower interval boundary for `peak_properties`. ``None`` is interpreted as
an open border.
pmax : None or number or ndarray
Upper interval boundary for `peak_properties`. ``None`` is interpreted as
an open border.
Returns
-------
keep : bool
A boolean mask evaluating to true where `peak_properties` confirms to the
interval.
See Also
--------
find_peaks
Notes
-----
.. versionadded:: 1.1.0
"""
keep = np.ones(peak_properties.size, dtype=bool)
if pmin is not None:
keep &= (pmin <= peak_properties)
if pmax is not None:
keep &= (peak_properties <= pmax)
return keep
def _select_by_peak_threshold(x, peaks, tmin, tmax):
"""
Evaluate which peaks fulfill the threshold condition.
Parameters
----------
x : ndarray
A 1-D array which is indexable by `peaks`.
peaks : ndarray
Indices of peaks in `x`.
tmin, tmax : scalar or ndarray or None
Minimal and / or maximal required thresholds. If supplied as ndarrays
their size must match `peaks`. ``None`` is interpreted as an open
border.
Returns
-------
keep : bool
A boolean mask evaluating to true where `peaks` fulfill the threshold
condition.
left_thresholds, right_thresholds : ndarray
Array matching `peak` containing the thresholds of each peak on
both sides.
Notes
-----
.. versionadded:: 1.1.0
"""
# Stack thresholds on both sides to make min / max operations easier:
# tmin is compared with the smaller, and tmax with the greater thresold to
# each peak's side
stacked_thresholds = np.vstack([x[peaks] - x[peaks - 1],
x[peaks] - x[peaks + 1]])
keep = np.ones(peaks.size, dtype=bool)
if tmin is not None:
min_thresholds = np.min(stacked_thresholds, axis=0)
keep &= (tmin <= min_thresholds)
if tmax is not None:
max_thresholds = np.max(stacked_thresholds, axis=0)
keep &= (max_thresholds <= tmax)
return keep, stacked_thresholds[0], stacked_thresholds[1]
def find_peaks(x, height=None, threshold=None, distance=None,
prominence=None, width=None, wlen=None, rel_height=0.5,
plateau_size=None):
"""
Find peaks inside a signal based on peak properties.
This function takes a 1-D array and finds all local maxima by
simple comparison of neighboring values. Optionally, a subset of these
peaks can be selected by specifying conditions for a peak's properties.
Parameters
----------
x : sequence
A signal with peaks.
height : number or ndarray or sequence, optional
Required height of peaks. Either a number, ``None``, an array matching
`x` or a 2-element sequence of the former. The first element is
always interpreted as the minimal and the second, if supplied, as the
maximal required height.
threshold : number or ndarray or sequence, optional
Required threshold of peaks, the vertical distance to its neighboring
samples. Either a number, ``None``, an array matching `x` or a
2-element sequence of the former. The first element is always
interpreted as the minimal and the second, if supplied, as the maximal
required threshold.
distance : number, optional
Required minimal horizontal distance (>= 1) in samples between
neighbouring peaks. Smaller peaks are removed first until the condition
is fulfilled for all remaining peaks.
prominence : number or ndarray or sequence, optional
Required prominence of peaks. Either a number, ``None``, an array
matching `x` or a 2-element sequence of the former. The first
element is always interpreted as the minimal and the second, if
supplied, as the maximal required prominence.
width : number or ndarray or sequence, optional
Required width of peaks in samples. Either a number, ``None``, an array
matching `x` or a 2-element sequence of the former. The first
element is always interpreted as the minimal and the second, if
supplied, as the maximal required width.
wlen : int, optional
Used for calculation of the peaks prominences, thus it is only used if
one of the arguments `prominence` or `width` is given. See argument
`wlen` in `peak_prominences` for a full description of its effects.
rel_height : float, optional
Used for calculation of the peaks width, thus it is only used if `width`
is given. See argument `rel_height` in `peak_widths` for a full
description of its effects.
plateau_size : number or ndarray or sequence, optional
Required size of the flat top of peaks in samples. Either a number,
``None``, an array matching `x` or a 2-element sequence of the former.
The first element is always interpreted as the minimal and the second,
if supplied as the maximal required plateau size.
.. versionadded:: 1.2.0
Returns
-------
peaks : ndarray
Indices of peaks in `x` that satisfy all given conditions.
properties : dict
A dictionary containing properties of the returned peaks which were
calculated as intermediate results during evaluation of the specified
conditions:
* 'peak_heights'
If `height` is given, the height of each peak in `x`.
* 'left_thresholds', 'right_thresholds'
If `threshold` is given, these keys contain a peaks vertical
distance to its neighbouring samples.
* 'prominences', 'right_bases', 'left_bases'
If `prominence` is given, these keys are accessible. See
`peak_prominences` for a description of their content.
* 'width_heights', 'left_ips', 'right_ips'
If `width` is given, these keys are accessible. See `peak_widths`
for a description of their content.
* 'plateau_sizes', left_edges', 'right_edges'
If `plateau_size` is given, these keys are accessible and contain
the indices of a peak's edges (edges are still part of the
plateau) and the calculated plateau sizes.
.. versionadded:: 1.2.0
To calculate and return properties without excluding peaks, provide the
open interval ``(None, None)`` as a value to the appropriate argument
(excluding `distance`).
Warns
-----
PeakPropertyWarning
Raised if a peak's properties have unexpected values (see
`peak_prominences` and `peak_widths`).
Warnings
--------
This function may return unexpected results for data containing NaNs. To
avoid this, NaNs should either be removed or replaced.
See Also
--------
find_peaks_cwt
Find peaks using the wavelet transformation.
peak_prominences
Directly calculate the prominence of peaks.
peak_widths
Directly calculate the width of peaks.
Notes
-----
In the context of this function, a peak or local maximum is defined as any
sample whose two direct neighbours have a smaller amplitude. For flat peaks
(more than one sample of equal amplitude wide) the index of the middle
sample is returned (rounded down in case the number of samples is even).
For noisy signals the peak locations can be off because the noise might
change the position of local maxima. In those cases consider smoothing the
signal before searching for peaks or use other peak finding and fitting
methods (like `find_peaks_cwt`).
Some additional comments on specifying conditions:
* Almost all conditions (excluding `distance`) can be given as half-open or
closed intervals, e.g., ``1`` or ``(1, None)`` defines the half-open
interval :math:`[1, \\infty]` while ``(None, 1)`` defines the interval
:math:`[-\\infty, 1]`. The open interval ``(None, None)`` can be specified
as well, which returns the matching properties without exclusion of peaks.
* The border is always included in the interval used to select valid peaks.
* For several conditions the interval borders can be specified with
arrays matching `x` in shape which enables dynamic constrains based on
the sample position.
* The conditions are evaluated in the following order: `plateau_size`,
`height`, `threshold`, `distance`, `prominence`, `width`. In most cases
this order is the fastest one because faster operations are applied first
to reduce the number of peaks that need to be evaluated later.
* While indices in `peaks` are guaranteed to be at least `distance` samples
apart, edges of flat peaks may be closer than the allowed `distance`.
* Use `wlen` to reduce the time it takes to evaluate the conditions for
`prominence` or `width` if `x` is large or has many local maxima
(see `peak_prominences`).
.. versionadded:: 1.1.0
Examples
--------
To demonstrate this function's usage we use a signal `x` supplied with
SciPy (see `scipy.misc.electrocardiogram`). Let's find all peaks (local
maxima) in `x` whose amplitude lies above 0.
>>> import matplotlib.pyplot as plt
>>> from scipy.misc import electrocardiogram
>>> from scipy.signal import find_peaks
>>> x = electrocardiogram()[2000:4000]
>>> peaks, _ = find_peaks(x, height=0)
>>> plt.plot(x)
>>> plt.plot(peaks, x[peaks], "x")
>>> plt.plot(np.zeros_like(x), "--", color="gray")
>>> plt.show()
We can select peaks below 0 with ``height=(None, 0)`` or use arrays matching
`x` in size to reflect a changing condition for different parts of the
signal.
>>> border = np.sin(np.linspace(0, 3 * np.pi, x.size))
>>> peaks, _ = find_peaks(x, height=(-border, border))
>>> plt.plot(x)
>>> plt.plot(-border, "--", color="gray")
>>> plt.plot(border, ":", color="gray")
>>> plt.plot(peaks, x[peaks], "x")
>>> plt.show()
Another useful condition for periodic signals can be given with the
`distance` argument. In this case, we can easily select the positions of
QRS complexes within the electrocardiogram (ECG) by demanding a distance of
at least 150 samples.
>>> peaks, _ = find_peaks(x, distance=150)
>>> np.diff(peaks)
array([186, 180, 177, 171, 177, 169, 167, 164, 158, 162, 172])
>>> plt.plot(x)
>>> plt.plot(peaks, x[peaks], "x")
>>> plt.show()
Especially for noisy signals peaks can be easily grouped by their
prominence (see `peak_prominences`). E.g., we can select all peaks except
for the mentioned QRS complexes by limiting the allowed prominence to 0.6.
>>> peaks, properties = find_peaks(x, prominence=(None, 0.6))
>>> properties["prominences"].max()
0.5049999999999999
>>> plt.plot(x)
>>> plt.plot(peaks, x[peaks], "x")
>>> plt.show()
And, finally, let's examine a different section of the ECG which contains
beat forms of different shape. To select only the atypical heart beats, we
combine two conditions: a minimal prominence of 1 and width of at least 20
samples.
>>> x = electrocardiogram()[17000:18000]
>>> peaks, properties = find_peaks(x, prominence=1, width=20)
>>> properties["prominences"], properties["widths"]
(array([1.495, 2.3 ]), array([36.93773946, 39.32723577]))
>>> plt.plot(x)
>>> plt.plot(peaks, x[peaks], "x")
>>> plt.vlines(x=peaks, ymin=x[peaks] - properties["prominences"],
... ymax = x[peaks], color = "C1")
>>> plt.hlines(y=properties["width_heights"], xmin=properties["left_ips"],
... xmax=properties["right_ips"], color = "C1")
>>> plt.show()
"""
# _argmaxima1d expects array of dtype 'float64'
x = _arg_x_as_expected(x)
if distance is not None and distance < 1:
raise ValueError('`distance` must be greater or equal to 1')
peaks, left_edges, right_edges = _local_maxima_1d(x)
properties = {}
if plateau_size is not None:
# Evaluate plateau size
plateau_sizes = right_edges - left_edges + 1
pmin, pmax = _unpack_condition_args(plateau_size, x, peaks)
keep = _select_by_property(plateau_sizes, pmin, pmax)
peaks = peaks[keep]
properties["plateau_sizes"] = plateau_sizes
properties["left_edges"] = left_edges
properties["right_edges"] = right_edges
properties = {key: array[keep] for key, array in properties.items()}
if height is not None:
# Evaluate height condition
peak_heights = x[peaks]
hmin, hmax = _unpack_condition_args(height, x, peaks)
keep = _select_by_property(peak_heights, hmin, hmax)
peaks = peaks[keep]
properties["peak_heights"] = peak_heights
properties = {key: array[keep] for key, array in properties.items()}
if threshold is not None:
# Evaluate threshold condition
tmin, tmax = _unpack_condition_args(threshold, x, peaks)
keep, left_thresholds, right_thresholds = _select_by_peak_threshold(
x, peaks, tmin, tmax)
peaks = peaks[keep]
properties["left_thresholds"] = left_thresholds
properties["right_thresholds"] = right_thresholds
properties = {key: array[keep] for key, array in properties.items()}
if distance is not None:
# Evaluate distance condition
keep = _select_by_peak_distance(peaks, x[peaks], distance)
peaks = peaks[keep]
properties = {key: array[keep] for key, array in properties.items()}
if prominence is not None or width is not None:
# Calculate prominence (required for both conditions)
wlen = _arg_wlen_as_expected(wlen)
properties.update(zip(
['prominences', 'left_bases', 'right_bases'],
_peak_prominences(x, peaks, wlen=wlen)
))
if prominence is not None:
# Evaluate prominence condition
pmin, pmax = _unpack_condition_args(prominence, x, peaks)
keep = _select_by_property(properties['prominences'], pmin, pmax)
peaks = peaks[keep]
properties = {key: array[keep] for key, array in properties.items()}
if width is not None:
# Calculate widths
properties.update(zip(
['widths', 'width_heights', 'left_ips', 'right_ips'],
_peak_widths(x, peaks, rel_height, properties['prominences'],
properties['left_bases'], properties['right_bases'])
))
# Evaluate width condition
wmin, wmax = _unpack_condition_args(width, x, peaks)
keep = _select_by_property(properties['widths'], wmin, wmax)
peaks = peaks[keep]
properties = {key: array[keep] for key, array in properties.items()}
return peaks, properties
def _identify_ridge_lines(matr, max_distances, gap_thresh):
"""
Identify ridges in the 2-D matrix.
Expect that the width of the wavelet feature increases with increasing row
number.
Parameters
----------
matr : 2-D ndarray
Matrix in which to identify ridge lines.
max_distances : 1-D sequence
At each row, a ridge line is only connected
if the relative max at row[n] is within
`max_distances`[n] from the relative max at row[n+1].
gap_thresh : int
If a relative maximum is not found within `max_distances`,
there will be a gap. A ridge line is discontinued if
there are more than `gap_thresh` points without connecting
a new relative maximum.
Returns
-------
ridge_lines : tuple
Tuple of 2 1-D sequences. `ridge_lines`[ii][0] are the rows of the
ii-th ridge-line, `ridge_lines`[ii][1] are the columns. Empty if none
found. Each ridge-line will be sorted by row (increasing), but the
order of the ridge lines is not specified.
References
----------
.. [1] Bioinformatics (2006) 22 (17): 2059-2065.
:doi:`10.1093/bioinformatics/btl355`
Examples
--------
>>> rng = np.random.default_rng()
>>> data = rng.random((5,5))
>>> ridge_lines = _identify_ridge_lines(data, 1, 1)
Notes
-----
This function is intended to be used in conjunction with `cwt`
as part of `find_peaks_cwt`.
"""
if(len(max_distances) < matr.shape[0]):
raise ValueError('Max_distances must have at least as many rows '
'as matr')
all_max_cols = _boolrelextrema(matr, np.greater, axis=1, order=1)
# Highest row for which there are any relative maxima
has_relmax = np.nonzero(all_max_cols.any(axis=1))[0]
if(len(has_relmax) == 0):
return []
start_row = has_relmax[-1]
# Each ridge line is a 3-tuple:
# rows, cols,Gap number
ridge_lines = [[[start_row],
[col],
0] for col in np.nonzero(all_max_cols[start_row])[0]]
final_lines = []
rows = np.arange(start_row - 1, -1, -1)
cols = np.arange(0, matr.shape[1])
for row in rows:
this_max_cols = cols[all_max_cols[row]]
# Increment gap number of each line,
# set it to zero later if appropriate
for line in ridge_lines:
line[2] += 1
# XXX These should always be all_max_cols[row]
# But the order might be different. Might be an efficiency gain
# to make sure the order is the same and avoid this iteration
prev_ridge_cols = np.array([line[1][-1] for line in ridge_lines])
# Look through every relative maximum found at current row
# Attempt to connect them with existing ridge lines.
for ind, col in enumerate(this_max_cols):
# If there is a previous ridge line within
# the max_distance to connect to, do so.
# Otherwise start a new one.
line = None
if(len(prev_ridge_cols) > 0):
diffs = np.abs(col - prev_ridge_cols)
closest = np.argmin(diffs)
if diffs[closest] <= max_distances[row]:
line = ridge_lines[closest]
if(line is not None):
# Found a point close enough, extend current ridge line
line[1].append(col)
line[0].append(row)
line[2] = 0
else:
new_line = [[row],
[col],
0]
ridge_lines.append(new_line)
# Remove the ridge lines with gap_number too high
# XXX Modifying a list while iterating over it.
# Should be safe, since we iterate backwards, but
# still tacky.
for ind in range(len(ridge_lines) - 1, -1, -1):
line = ridge_lines[ind]
if line[2] > gap_thresh:
final_lines.append(line)
del ridge_lines[ind]
out_lines = []
for line in (final_lines + ridge_lines):
sortargs = np.array(np.argsort(line[0]))
rows, cols = np.zeros_like(sortargs), np.zeros_like(sortargs)
rows[sortargs] = line[0]
cols[sortargs] = line[1]
out_lines.append([rows, cols])
return out_lines
def _filter_ridge_lines(cwt, ridge_lines, window_size=None, min_length=None,
min_snr=1, noise_perc=10):
"""
Filter ridge lines according to prescribed criteria. Intended
to be used for finding relative maxima.
Parameters
----------
cwt : 2-D ndarray
Continuous wavelet transform from which the `ridge_lines` were defined.
ridge_lines : 1-D sequence
Each element should contain 2 sequences, the rows and columns
of the ridge line (respectively).
window_size : int, optional
Size of window to use to calculate noise floor.
Default is ``cwt.shape[1] / 20``.
min_length : int, optional
Minimum length a ridge line needs to be acceptable.
Default is ``cwt.shape[0] / 4``, ie 1/4-th the number of widths.
min_snr : float, optional
Minimum SNR ratio. Default 1. The signal is the value of
the cwt matrix at the shortest length scale (``cwt[0, loc]``), the
noise is the `noise_perc`th percentile of datapoints contained within a
window of `window_size` around ``cwt[0, loc]``.
noise_perc : float, optional
When calculating the noise floor, percentile of data points
examined below which to consider noise. Calculated using
scipy.stats.scoreatpercentile.
References
----------
.. [1] Bioinformatics (2006) 22 (17): 2059-2065.
:doi:`10.1093/bioinformatics/btl355`
"""
num_points = cwt.shape[1]
if min_length is None:
min_length = np.ceil(cwt.shape[0] / 4)
if window_size is None:
window_size = np.ceil(num_points / 20)
window_size = int(window_size)
hf_window, odd = divmod(window_size, 2)
# Filter based on SNR
row_one = cwt[0, :]
noises = np.empty_like(row_one)
for ind, val in enumerate(row_one):
window_start = max(ind - hf_window, 0)
window_end = min(ind + hf_window + odd, num_points)
noises[ind] = scoreatpercentile(row_one[window_start:window_end],
per=noise_perc)
def filt_func(line):
if len(line[0]) < min_length:
return False
snr = abs(cwt[line[0][0], line[1][0]] / noises[line[1][0]])
if snr < min_snr:
return False
return True
return list(filter(filt_func, ridge_lines))
def find_peaks_cwt(vector, widths, wavelet=None, max_distances=None,
gap_thresh=None, min_length=None,
min_snr=1, noise_perc=10, window_size=None):
"""
Find peaks in a 1-D array with wavelet transformation.
The general approach is to smooth `vector` by convolving it with
`wavelet(width)` for each width in `widths`. Relative maxima which
appear at enough length scales, and with sufficiently high SNR, are
accepted.
Parameters
----------
vector : ndarray
1-D array in which to find the peaks.
widths : float or sequence
Single width or 1-D array-like of widths to use for calculating
the CWT matrix. In general,
this range should cover the expected width of peaks of interest.
wavelet : callable, optional
Should take two parameters and return a 1-D array to convolve
with `vector`. The first parameter determines the number of points
of the returned wavelet array, the second parameter is the scale
(`width`) of the wavelet. Should be normalized and symmetric.
Default is the ricker wavelet.
max_distances : ndarray, optional
At each row, a ridge line is only connected if the relative max at
row[n] is within ``max_distances[n]`` from the relative max at
``row[n+1]``. Default value is ``widths/4``.
gap_thresh : float, optional
If a relative maximum is not found within `max_distances`,
there will be a gap. A ridge line is discontinued if there are more
than `gap_thresh` points without connecting a new relative maximum.
Default is the first value of the widths array i.e. widths[0].
min_length : int, optional
Minimum length a ridge line needs to be acceptable.
Default is ``cwt.shape[0] / 4``, ie 1/4-th the number of widths.
min_snr : float, optional
Minimum SNR ratio. Default 1. The signal is the maximum CWT coefficient
on the largest ridge line. The noise is `noise_perc` th percentile of
datapoints contained within the same ridge line.
noise_perc : float, optional
When calculating the noise floor, percentile of data points
examined below which to consider noise. Calculated using
`stats.scoreatpercentile`. Default is 10.
window_size : int, optional
Size of window to use to calculate noise floor.
Default is ``cwt.shape[1] / 20``.
Returns
-------
peaks_indices : ndarray
Indices of the locations in the `vector` where peaks were found.
The list is sorted.
See Also
--------
cwt
Continuous wavelet transform.
find_peaks
Find peaks inside a signal based on peak properties.
Notes
-----
This approach was designed for finding sharp peaks among noisy data,
however with proper parameter selection it should function well for
different peak shapes.
The algorithm is as follows:
1. Perform a continuous wavelet transform on `vector`, for the supplied
`widths`. This is a convolution of `vector` with `wavelet(width)` for
each width in `widths`. See `cwt`.
2. Identify "ridge lines" in the cwt matrix. These are relative maxima
at each row, connected across adjacent rows. See identify_ridge_lines
3. Filter the ridge_lines using filter_ridge_lines.
.. versionadded:: 0.11.0
References
----------
.. [1] Bioinformatics (2006) 22 (17): 2059-2065.
:doi:`10.1093/bioinformatics/btl355`
Examples
--------
>>> from scipy import signal
>>> xs = np.arange(0, np.pi, 0.05)
>>> data = np.sin(xs)
>>> peakind = signal.find_peaks_cwt(data, np.arange(1,10))
>>> peakind, xs[peakind], data[peakind]
([32], array([ 1.6]), array([ 0.9995736]))
"""
widths = np.array(widths, copy=False, ndmin=1)
if gap_thresh is None:
gap_thresh = np.ceil(widths[0])
if max_distances is None:
max_distances = widths / 4.0
if wavelet is None:
wavelet = ricker
cwt_dat = cwt(vector, wavelet, widths)
ridge_lines = _identify_ridge_lines(cwt_dat, max_distances, gap_thresh)
filtered = _filter_ridge_lines(cwt_dat, ridge_lines, min_length=min_length,
window_size=window_size, min_snr=min_snr,
noise_perc=noise_perc)
max_locs = np.asarray([x[1][0] for x in filtered])
max_locs.sort()
return max_locs
|
from __future__ import absolute_import
from perses.utils.openeye import *
from perses.annihilation.relative import HybridTopologyFactory
from perses.rjmc.topology_proposal import PointMutationEngine
from perses.rjmc.geometry import FFAllAngleGeometryEngine
import simtk.openmm as openmm
import simtk.openmm.app as app
import simtk.unit as unit
import numpy as np
from openmoltools import forcefield_generators
import mdtraj as md
from openmmtools.constants import kB
from perses.tests.utils import validate_endstate_energies
from openforcefield.topology import Molecule
from openmmforcefields.generators import SystemGenerator
ENERGY_THRESHOLD = 1e-2
temperature = 300 * unit.kelvin
kT = kB * temperature
beta = 1.0/kT
ring_amino_acids = ['TYR', 'PHE', 'TRP', 'PRO', 'HIS']
# Set up logger
import logging
_logger = logging.getLogger()
_logger.setLevel(logging.INFO)
class PointMutationExecutor(object):
"""
Simple, stripped-down class to create a protein-ligand system and allow a mutation of a protein.
this will allow support for the creation of _two_ relative free energy calculations:
1. 'wildtype' - 'point mutant' complex hybrid.
2. 'wildtype' - 'point mutant' protein hybrid (i.e. with ligand of interest unbound)
Example (create full point mutation executor and run parallel tempering on both complex and apo phases):
from pkg_resources import resource_filename
protein_path = 'data/perses_jacs_systems/thrombin/Thrombin_protein.pdb'
ligands_path = 'data/perses_jacs_systems/thrombin/Thrombin_ligands.sdf'
protein_filename = resource_filename('openmmforcefields', protein_path)
ligand_input = resource_filename('openmmforcefields', ligands_path)
pm_delivery = PointMutationExecutor(protein_filename=protein_filename,
mutation_chain_id='2',
mutation_residue_id='198',
proposed_residue='THR',
phase='complex',
conduct_endstate_validation=False,
ligand_input=ligand_input,
ligand_index=0,
forcefield_files=['amber14/protein.ff14SB.xml', 'amber14/tip3p.xml'],
barostat=openmm.MonteCarloBarostat(1.0 * unit.atmosphere, temperature, 50),
forcefield_kwargs={'removeCMMotion': False, 'ewaldErrorTolerance': 1e-4, 'nonbondedMethod': app.PME, 'constraints' : app.HBonds, 'hydrogenMass' : 4 * unit.amus},
small_molecule_forcefields='gaff-2.11')
complex_htf = pm_delivery.get_complex_htf()
apo_htf = pm_delivery.get_apo_htf()
# Now we can build the hybrid repex samplers
from perses.annihilation.lambda_protocol import LambdaProtocol
from openmmtools.multistate import MultiStateReporter
from perses.samplers.multistate import HybridRepexSampler
from openmmtools import mcmc
suffix = 'run'; selection = 'not water'; checkpoint_interval = 10; n_states = 11; n_cycles = 5000
for htf in [complex_htf, apo_htf]:
lambda_protocol = LambdaProtocol(functions='default')
reporter_file = 'reporter.nc'
reporter = MultiStateReporter(reporter_file, analysis_particle_indices = htf.hybrid_topology.select(selection), checkpoint_interval = checkpoint_interval)
hss = HybridRepexSampler(mcmc_moves=mcmc.LangevinSplittingDynamicsMove(timestep= 4.0 * unit.femtoseconds,
collision_rate=5.0 / unit.picosecond,
n_steps=250,
reassign_velocities=False,
n_restart_attempts=20,
splitting="V R R R O R R R V",
constraint_tolerance=1e-06),
hybrid_factory=htf, online_analysis_interval=10)
hss.setup(n_states=n_states, temperature=300*unit.kelvin, storage_file=reporter, lambda_protocol=lambda_protocol, endstates=False)
hss.extend(n_cycles)
"""
def __init__(self,
protein_filename,
mutation_chain_id,
mutation_residue_id,
proposed_residue,
phase='complex',
conduct_endstate_validation=True,
ligand_input=None,
ligand_index=0,
water_model='tip3p',
ionic_strength=0.15 * unit.molar,
forcefield_files=['amber14/protein.ff14SB.xml', 'amber14/tip3p.xml'],
barostat=openmm.MonteCarloBarostat(1.0 * unit.atmosphere, temperature, 50),
forcefield_kwargs={'removeCMMotion': False, 'ewaldErrorTolerance': 0.00025, 'constraints' : app.HBonds, 'hydrogenMass' : 4 * unit.amus},
periodic_forcefield_kwargs={'nonbondedMethod': app.PME},
nonperiodic_forcefield_kwargs=None,
small_molecule_forcefields='gaff-2.11',
complex_box_dimensions=None,
apo_box_dimensions=None,
**kwargs):
"""
arguments
protein_filename : str
path to protein (to mutate); .pdb
mutation_chain_id : str
name of the chain to be mutated
mutation_residue_id : str
residue id to change
proposed_residue : str
three letter code of the residue to mutate to
phase : str, default complex
if phase == vacuum, then the complex will not be solvated with water; else, it will be solvated with tip3p
conduct_endstate_validation : bool, default True
whether to conduct an endstate validation of the hybrid topology factory
ligand_input : str or oemol, default None
path to ligand of interest: .pdb for protein and .sdf or oemol for small molecule
ligand_index : int, default 0
which ligand to use
water_model : str, default 'tip3p'
solvent model to use for solvation
ionic_strength : float * unit.molar, default 0.15 * unit.molar
the total concentration of ions (both positive and negative) to add using Modeller.
This does not include ions that are added to neutralize the system.
Note that only monovalent ions are currently supported.
forcefield_files : list of str, default ['amber14/protein.ff14SB.xml', 'amber14/tip3p.xml']
forcefield files for proteins and solvent
barostat : openmm.MonteCarloBarostat, default openmm.MonteCarloBarostat(1.0 * unit.atmosphere, 300 * unit.kelvin, 50)
barostat to use
forcefield_kwargs : dict, default {'removeCMMotion': False, 'ewaldErrorTolerance': 1e-4, 'constraints' : app.HBonds, 'hydrogenMass' : 4 * unit.amus}
forcefield kwargs for system parametrization
periodic_forcefield_kwargs : dict, default {'nonbondedMethod': app.PME}
periodic forcefield kwargs for system parametrization
nonperiodic_forcefield_kwargs : dict, default None
non-periodic forcefield kwargs for system parametrization
small_molecule_forcefields : str, default 'gaff-2.11'
the forcefield string for small molecule parametrization
complex_box_dimensions : Vec3, default None
define box dimensions of complex phase;
if none, padding is 1nm
apo_box_dimensions : Vec3, default None
define box dimensions of apo phase phase;
if None, padding is 1nm
TODO : allow argument for spectator ligands besides the 'ligand_input'
"""
# First thing to do is load the apo protein to mutate...
protein_pdbfile = open(protein_filename, 'r')
protein_pdb = app.PDBFile(protein_pdbfile)
protein_pdbfile.close()
protein_positions, protein_topology, protein_md_topology = protein_pdb.positions, protein_pdb.topology, md.Topology.from_openmm(protein_pdb.topology)
protein_topology = protein_md_topology.to_openmm()
protein_n_atoms = protein_md_topology.n_atoms
# Load the ligand, if present
molecules = []
if ligand_input:
if isinstance(ligand_input, str):
if ligand_input.endswith('.sdf'): # small molecule
ligand_mol = createOEMolFromSDF(ligand_input, index=ligand_index)
molecules.append(Molecule.from_openeye(ligand_mol, allow_undefined_stereo=False))
ligand_positions, ligand_topology = extractPositionsFromOEMol(ligand_mol), forcefield_generators.generateTopologyFromOEMol(ligand_mol)
ligand_md_topology = md.Topology.from_openmm(ligand_topology)
ligand_n_atoms = ligand_md_topology.n_atoms
if ligand_input.endswith('pdb'): # protein
ligand_pdbfile = open(ligand_input, 'r')
ligand_pdb = app.PDBFile(ligand_pdbfile)
ligand_pdbfile.close()
ligand_positions, ligand_topology, ligand_md_topology = ligand_pdb.positions, ligand_pdb.topology, md.Topology.from_openmm(
ligand_pdb.topology)
ligand_n_atoms = ligand_md_topology.n_atoms
elif isinstance(ligand_input, oechem.OEMol): # oemol object
molecules.append(Molecule.from_openeye(ligand_input, allow_undefined_stereo=False))
ligand_positions, ligand_topology = extractPositionsFromOEMol(ligand_input), forcefield_generators.generateTopologyFromOEMol(ligand_input)
ligand_md_topology = md.Topology.from_openmm(ligand_topology)
ligand_n_atoms = ligand_md_topology.n_atoms
else:
_logger.warning(f'ligand filetype not recognised. Please provide a path to a .pdb or .sdf file')
return
# Now create a complex
complex_md_topology = protein_md_topology.join(ligand_md_topology)
complex_topology = complex_md_topology.to_openmm()
complex_positions = unit.Quantity(np.zeros([protein_n_atoms + ligand_n_atoms, 3]), unit=unit.nanometers)
complex_positions[:protein_n_atoms, :] = protein_positions
complex_positions[protein_n_atoms:, :] = ligand_positions
# Now for a system_generator
self.system_generator = SystemGenerator(forcefields=forcefield_files,
barostat=barostat,
forcefield_kwargs=forcefield_kwargs,
periodic_forcefield_kwargs=periodic_forcefield_kwargs,
nonperiodic_forcefield_kwargs=nonperiodic_forcefield_kwargs,
small_molecule_forcefield=small_molecule_forcefields,
molecules=molecules,
cache=None)
# Solvate apo and complex...
apo_input = list(self._solvate(protein_topology, protein_positions, water_model, phase, ionic_strength, apo_box_dimensions))
inputs = [apo_input]
if ligand_input:
inputs.append(self._solvate(complex_topology, complex_positions, water_model, phase, ionic_strength, complex_box_dimensions))
geometry_engine = FFAllAngleGeometryEngine(metadata=None,
use_sterics=False,
n_bond_divisions=100,
n_angle_divisions=180,
n_torsion_divisions=360,
verbose=True,
storage=None,
bond_softening_constant=1.0,
angle_softening_constant=1.0,
neglect_angles = False,
use_14_nonbondeds = True)
# Run pipeline...
htfs = []
for (top, pos, sys) in inputs:
point_mutation_engine = PointMutationEngine(wildtype_topology=top,
system_generator=self.system_generator,
chain_id=mutation_chain_id, # Denote the chain id allowed to mutate (it's always a string variable)
max_point_mutants=1,
residues_allowed_to_mutate=[mutation_residue_id], # The residue ids allowed to mutate
allowed_mutations=[(mutation_residue_id, proposed_residue)], # The residue ids allowed to mutate with the three-letter code allowed to change
aggregate=True) # Always allow aggregation
topology_proposal = point_mutation_engine.propose(sys, top)
# Only validate energy bookkeeping if the WT and proposed residues do not involve rings
old_res = [res for res in top.residues() if res.id == mutation_residue_id][0]
validate_bool = False if old_res.name in ring_amino_acids or proposed_residue in ring_amino_acids else True
new_positions, logp_proposal = geometry_engine.propose(topology_proposal, pos, beta,
validate_energy_bookkeeping=validate_bool)
logp_reverse = geometry_engine.logp_reverse(topology_proposal, new_positions, pos, beta,
validate_energy_bookkeeping=validate_bool)
forward_htf = HybridTopologyFactory(topology_proposal=topology_proposal,
current_positions=pos,
new_positions=new_positions,
use_dispersion_correction=False,
functions=None,
softcore_alpha=None,
bond_softening_constant=1.0,
angle_softening_constant=1.0,
soften_only_new=False,
neglected_new_angle_terms=[],
neglected_old_angle_terms=[],
softcore_LJ_v2=True,
softcore_electrostatics=True,
softcore_LJ_v2_alpha=0.85,
softcore_electrostatics_alpha=0.3,
softcore_sigma_Q=1.0,
interpolate_old_and_new_14s=False,
omitted_terms=None)
if not topology_proposal.unique_new_atoms:
assert geometry_engine.forward_final_context_reduced_potential == None, f"There are no unique new atoms but the geometry_engine's final context reduced potential is not None (i.e. {self._geometry_engine.forward_final_context_reduced_potential})"
assert geometry_engine.forward_atoms_with_positions_reduced_potential == None, f"There are no unique new atoms but the geometry_engine's forward atoms-with-positions-reduced-potential in not None (i.e. { self._geometry_engine.forward_atoms_with_positions_reduced_potential})"
else:
added_valence_energy = geometry_engine.forward_final_context_reduced_potential - geometry_engine.forward_atoms_with_positions_reduced_potential
if not topology_proposal.unique_old_atoms:
assert geometry_engine.reverse_final_context_reduced_potential == None, f"There are no unique old atoms but the geometry_engine's final context reduced potential is not None (i.e. {self._geometry_engine.reverse_final_context_reduced_potential})"
assert geometry_engine.reverse_atoms_with_positions_reduced_potential == None, f"There are no unique old atoms but the geometry_engine's atoms-with-positions-reduced-potential in not None (i.e. { self._geometry_engine.reverse_atoms_with_positions_reduced_potential})"
subtracted_valence_energy = 0.0
else:
subtracted_valence_energy = geometry_engine.reverse_final_context_reduced_potential - geometry_engine.reverse_atoms_with_positions_reduced_potential
if conduct_endstate_validation:
zero_state_error, one_state_error = validate_endstate_energies(forward_htf._topology_proposal, forward_htf, added_valence_energy, subtracted_valence_energy, beta=beta, ENERGY_THRESHOLD=ENERGY_THRESHOLD)
if zero_state_error > ENERGY_THRESHOLD:
_logger.warning(f"Reduced potential difference of the nonalchemical and alchemical Lambda = 0 state is above the threshold ({ENERGY_THRESHOLD}): {zero_state_error}")
if one_state_error > ENERGY_THRESHOLD:
_logger.warning(f"Reduced potential difference of the nonalchemical and alchemical Lambda = 1 state is above the threshold ({ENERGY_THRESHOLD}): {one_state_error}")
else:
pass
htfs.append(forward_htf)
self.apo_htf = htfs[0]
self.complex_htf = htfs[1] if ligand_input else None
def get_complex_htf(self):
return self.complex_htf
def get_apo_htf(self):
return self.apo_htf
def _solvate(self,
topology,
positions,
water_model,
phase,
ionic_strength,
box_dimensions=None):
"""
Generate a solvated topology, positions, and system for a given input topology and positions.
For generating the system, the forcefield files provided in the constructor will be used.
Parameters
----------
topology : app.Topology
Topology of the system to solvate
positions : [n, 3] ndarray of Quantity nm
the positions of the unsolvated system
forcefield : SystemGenerator.forcefield
forcefield file of solvent to add
water_model : str
solvent model to use for solvation
phase : str
if phase == vacuum, then the complex will not be solvated with water; else, it will be solvated with tip3p
ionic_strength : float * unit.molar
the total concentration of ions (both positive and negative) to add using Modeller.
This does not include ions that are added to neutralize the system.
Note that only monovalent ions are currently supported.
Returns
-------
solvated_topology : app.Topology
Topology of the system with added waters
solvated_positions : [n + 3(n_waters), 3] ndarray of Quantity nm
Solvated positions
solvated_system : openmm.System
The parameterized system, containing a barostat if one was specified.
"""
modeller = app.Modeller(topology, positions)
# Now we have to add missing atoms
if phase != 'vacuum':
_logger.info(f"solvating at {ionic_strength} using {water_model}")
if not box_dimensions:
modeller.addSolvent(self.system_generator.forcefield, model=water_model, padding=0.9 * unit.nanometers, ionicStrength=ionic_strength)
else:
modeller.addSolvent(self.system_generator.forcefield, model=water_model, boxSize=box_dimensions, ionicStrength=ionic_strength)
else:
pass
solvated_topology = modeller.getTopology()
if box_dimensions:
solvated_topology.setUnitCellDimensions(box_dimensions)
solvated_positions = modeller.getPositions()
# Canonicalize the solvated positions: turn tuples into np.array
solvated_positions = unit.quantity.Quantity(value=np.array([list(atom_pos) for atom_pos in solvated_positions.value_in_unit_system(unit.md_unit_system)]), unit=unit.nanometers)
solvated_system = self.system_generator.create_system(solvated_topology)
return solvated_topology, solvated_positions, solvated_system
|
import argparse
import subprocess as sp
import tensorflow as tf
cflags = " ".join(tf.sysconfig.get_compile_flags())
lflags = " ".join(tf.sysconfig.get_link_flags())
tf_inc = tf.sysconfig.get_include()
tf_lib = tf.sysconfig.get_lib()
parser = argparse.ArgumentParser()
parser.add_argument('ale_path', type=str, default='')
args = parser.parse_args()
ale_path = args.ale_path
if ale_path == '':
print('[ ! must set ale_path ]')
cmd = f'g++ -std=c++11 -shared ale.cc -o tfaleop.so -fPIC -I {tf_inc} -O2 -D_GLIBCXX_USE_CXX11_ABI=1 -L{tf_lib} {cflags} {lflags} -I{ale_path}/include -L{ale_path}/lib -lale'
print(f'- compiling using command: {cmd}')
res = sp.check_call(cmd, shell=True)
if res == 0:
print('[ sucessfully compiled ]')
|
"""Script showing several ways to launch things.
Note: never call set_launcher(launching, launched) if <launched> can be launched
by another element and <launching> is NOT in the same menu."""
import thorpy, pygame
application = thorpy.Application((500,500), "Launching alerts")
# ****************** First launcher : button 1 ******************
#This launcher launches a simple button
my_element = thorpy.make_button("I am a useless button\nClick outside to quit.")
button1 = thorpy.make_button("Launcher 1")
#we set click_quit=True below, because we did not provide a "ok" and/or "cancel"
#button to the user. Element disappears When user clicks outside it.
thorpy.set_launcher(button1, my_element, click_quit=True)
# ****************** Second launcher : button 2 ******************
#here the element to be launched is a box with ok and cancel buttons + custom
#elements. We can also use make_ok_box, with only 1 text.
#Note that DONE_EVENT and CANCEL_EVENT are posted accordingly at unlaunch.
box = thorpy.make_ok_cancel_box([thorpy.make_button(str(i)) for i in range(8)],
ok_text="Ok", cancel_text="Cancel")
button2 = thorpy.make_button("Launcher 2")
thorpy.set_launcher(button2, box)
# ****************** Third launcher : button 3 ******************
#This launcher launches a box, set it green, and changes screen color when
#unlaunched.
button3 = thorpy.make_button("Launcher 3")
other_box = thorpy.make_ok_box([thorpy.make_text("Color is gonna change...")])
my_launcher = thorpy.set_launcher(button3, other_box)#this time get the launcher
#we specify some custom operations that have to be done before/after launching:
def my_func_before():
my_launcher.launched.set_main_color((0,255,0)) #change launched box color
my_launcher.default_func_before() #default stuff
def my_func_after():
background.set_main_color((0,100,100)) #change background color
my_launcher.default_func_after() #default stuff
my_launcher.func_before = my_func_before
my_launcher.func_after = my_func_after
# ****************** Fourth launcher : event ******************
#This launcher is not linked to a ThorPy element, but instead user can activate
#it by pressing SPACE
unlaunch_button = thorpy.make_ok_box([thorpy.make_text("Ready to unlaunch?")])
unlaunch_button.stick_to("screen", "top", "top")
invisible_launcher = thorpy.get_launcher(unlaunch_button, autocenter=False)
# set focus to False for non-blocking behaviour:
##invisible_launcher.focus = False
#this reaction will be added to the background:
reac = thorpy.ConstantReaction(pygame.KEYDOWN, invisible_launcher.launch,
{"key":pygame.K_SPACE})
#add a text so user knows what to do
text4 = thorpy.make_text("Press space to launch invisible_launcher", 15, (0,0,255))
background = thorpy.Background(elements=[text4, button1, button2, button3])
background.add_reaction(reac)
thorpy.store(background)
menu = thorpy.Menu(background)
menu.play()
application.quit()
|
from big_ol_pile_of_manim_imports import *
import mpmath
mpmath.mp.dps = 7
def zeta(z):
max_norm = FRAME_X_RADIUS
try:
return np.complex(mpmath.zeta(z))
except:
return np.complex(max_norm, 0)
def d_zeta(z):
epsilon = 0.01
return (zeta(z + epsilon) - zeta(z))/epsilon
class ZetaTransformationScene(ComplexTransformationScene):
CONFIG = {
"anchor_density" : 35,
"min_added_anchors" : 10,
"max_added_anchors" : 300,
"num_anchors_to_add_per_line" : 75,
"post_transformation_stroke_width" : 2,
"default_apply_complex_function_kwargs" : {
"run_time" : 5,
},
"x_min" : 1,
"x_max" : int(FRAME_X_RADIUS+2),
"extra_lines_x_min" : -2,
"extra_lines_x_max" : 4,
"extra_lines_y_min" : -2,
"extra_lines_y_max" : 2,
}
def prepare_for_transformation(self, mob):
for line in mob.family_members_with_points():
#Find point of line cloest to 1 on C
if not isinstance(line, Line):
line.insert_n_anchor_points(self.min_added_anchors)
continue
p1 = line.get_start()+LEFT
p2 = line.get_end()+LEFT
t = (-np.dot(p1, p2-p1))/(get_norm(p2-p1)**2)
closest_to_one = interpolate(
line.get_start(), line.get_end(), t
)
#See how big this line will become
diameter = abs(zeta(complex(*closest_to_one[:2])))
target_num_anchors = np.clip(
int(self.anchor_density*np.pi*diameter),
self.min_added_anchors,
self.max_added_anchors,
)
num_anchors = line.get_num_anchor_points()
if num_anchors < target_num_anchors:
line.insert_n_anchor_points(target_num_anchors-num_anchors)
line.make_smooth()
def add_extra_plane_lines_for_zeta(self, animate = False, **kwargs):
dense_grid = self.get_dense_grid(**kwargs)
if animate:
self.play(ShowCreation(dense_grid))
self.plane.add(dense_grid)
self.add(self.plane)
def get_dense_grid(self, step_size = 1./16):
epsilon = 0.1
x_range = np.arange(
max(self.x_min, self.extra_lines_x_min),
min(self.x_max, self.extra_lines_x_max),
step_size
)
y_range = np.arange(
max(self.y_min, self.extra_lines_y_min),
min(self.y_max, self.extra_lines_y_max),
step_size
)
vert_lines = VGroup(*[
Line(
self.y_min*UP,
self.y_max*UP,
).shift(x*RIGHT)
for x in x_range
if abs(x-1) > epsilon
])
vert_lines.set_color_by_gradient(
self.vert_start_color, self.vert_end_color
)
horiz_lines = VGroup(*[
Line(
self.x_min*RIGHT,
self.x_max*RIGHT,
).shift(y*UP)
for y in y_range
if abs(y) > epsilon
])
horiz_lines.set_color_by_gradient(
self.horiz_start_color, self.horiz_end_color
)
dense_grid = VGroup(horiz_lines, vert_lines)
dense_grid.set_stroke(width = 1)
return dense_grid
def add_reflected_plane(self, animate = False):
reflected_plane = self.get_reflected_plane()
if animate:
self.play(ShowCreation(reflected_plane, run_time = 5))
self.plane.add(reflected_plane)
self.add(self.plane)
def get_reflected_plane(self):
reflected_plane = self.plane.copy()
reflected_plane.rotate(np.pi, UP, about_point = RIGHT)
for mob in reflected_plane.family_members_with_points():
mob.set_color(
Color(rgb = 1-0.5*color_to_rgb(mob.get_color()))
)
self.prepare_for_transformation(reflected_plane)
reflected_plane.submobjects = list(reversed(
reflected_plane.family_members_with_points()
))
return reflected_plane
def apply_zeta_function(self, **kwargs):
transform_kwargs = dict(self.default_apply_complex_function_kwargs)
transform_kwargs.update(kwargs)
self.apply_complex_function(zeta, **kwargs)
class TestZetaOnHalfPlane(ZetaTransformationScene):
CONFIG = {
"anchor_density" : 15,
}
def construct(self):
self.add_transformable_plane()
self.add_extra_plane_lines_for_zeta()
self.prepare_for_transformation(self.plane)
print(sum([
mob.get_num_points()
for mob in self.plane.family_members_with_points()
]))
print(len(self.plane.family_members_with_points()))
self.apply_zeta_function()
self.wait()
class TestZetaOnFullPlane(ZetaTransformationScene):
def construct(self):
self.add_transformable_plane(animate = True)
self.add_extra_plane_lines_for_zeta(animate = True)
self.add_reflected_plane(animate = True)
self.apply_zeta_function()
class TestZetaOnLine(ZetaTransformationScene):
def construct(self):
line = Line(UP+20*LEFT, UP+20*RIGHT)
self.add_transformable_plane()
self.plane.submobjects = [line]
self.apply_zeta_function()
self.wait(2)
self.play(ShowCreation(line, run_time = 10))
self.wait(3)
######################
class IntroduceZeta(ZetaTransformationScene):
CONFIG = {
"default_apply_complex_function_kwargs" : {
"run_time" : 8,
}
}
def construct(self):
title = TextMobject("Riemann zeta function")
title.add_background_rectangle()
title.to_corner(UP+LEFT)
func_mob = VGroup(
TexMobject("\\zeta(s) = "),
TexMobject("\\sum_{n=1}^\\infty \\frac{1}{n^s}")
)
func_mob.arrange_submobjects(RIGHT, buff = 0)
for submob in func_mob:
submob.add_background_rectangle()
func_mob.next_to(title, DOWN)
randy = Randolph().flip()
randy.to_corner(DOWN+RIGHT)
self.add_foreground_mobjects(title, func_mob)
self.add_transformable_plane()
self.add_extra_plane_lines_for_zeta()
self.play(ShowCreation(self.plane, run_time = 2))
reflected_plane = self.get_reflected_plane()
self.play(ShowCreation(reflected_plane, run_time = 2))
self.plane.add(reflected_plane)
self.wait()
self.apply_zeta_function()
self.wait(2)
self.play(FadeIn(randy))
self.play(
randy.change_mode, "confused",
randy.look_at, func_mob,
)
self.play(Blink(randy))
self.wait()
class WhyPeopleMayKnowIt(TeacherStudentsScene):
def construct(self):
title = TextMobject("Riemann zeta function")
title.to_corner(UP+LEFT)
func_mob = TexMobject(
"\\zeta(s) = \\sum_{n=1}^\\infty \\frac{1}{n^s}"
)
func_mob.next_to(title, DOWN, aligned_edge = LEFT)
self.add(title, func_mob)
mercenary_thought = VGroup(
TexMobject("\\$1{,}000{,}000").set_color_by_gradient(GREEN_B, GREEN_D),
TexMobject("\\zeta(s) = 0")
)
mercenary_thought.arrange_submobjects(DOWN)
divergent_sum = VGroup(
TexMobject("1+2+3+4+\\cdots = -\\frac{1}{12}"),
TexMobject("\\zeta(-1) = -\\frac{1}{12}")
)
divergent_sum.arrange_submobjects(DOWN)
divergent_sum[0].set_color_by_gradient(YELLOW, MAROON_B)
divergent_sum[1].set_color(BLACK)
#Thoughts
self.play(*it.chain(*[
[pi.change_mode, "pondering", pi.look_at, func_mob]
for pi in self.get_pi_creatures()
]))
self.random_blink()
self.student_thinks(
mercenary_thought, student_index = 2,
target_mode = "surprised",
)
student = self.get_students()[2]
self.random_blink()
self.wait(2)
self.student_thinks(
divergent_sum, student_index = 1,
added_anims = [student.change_mode, "plain"]
)
student = self.get_students()[1]
self.play(
student.change_mode, "confused",
student.look_at, divergent_sum,
)
self.random_blink()
self.play(*it.chain(*[
[pi.change_mode, "confused", pi.look_at, divergent_sum]
for pi in self.get_pi_creatures()
]))
self.wait()
self.random_blink()
divergent_sum[1].set_color(WHITE)
self.play(Write(divergent_sum[1]))
self.random_blink()
self.wait()
#Ask about continuation
self.student_says(
TextMobject("Can you explain \\\\" , "``analytic continuation''?"),
student_index = 1,
target_mode = "raise_right_hand"
)
self.change_student_modes(
"raise_left_hand",
"raise_right_hand",
"raise_left_hand",
)
self.play(
self.get_teacher().change_mode, "happy",
self.get_teacher().look_at, student.eyes,
)
self.random_blink()
self.wait(2)
self.random_blink()
self.wait()
class ComplexValuedFunctions(ComplexTransformationScene):
def construct(self):
title = TextMobject("Complex-valued function")
title.scale(1.5)
title.add_background_rectangle()
title.to_edge(UP)
self.add(title)
z_in = Dot(UP+RIGHT, color = YELLOW)
z_out = Dot(4*RIGHT + 2*UP, color = MAROON_B)
arrow = Arrow(z_in, z_out, buff = 0.1)
arrow.set_color(WHITE)
z = TexMobject("z").next_to(z_in, DOWN+LEFT, buff = SMALL_BUFF)
z.set_color(z_in.get_color())
f_z = TexMobject("f(z)").next_to(z_out, UP+RIGHT, buff = SMALL_BUFF)
f_z.set_color(z_out.get_color())
self.add(z_in, z)
self.wait()
self.play(ShowCreation(arrow))
self.play(
ShowCreation(z_out),
Write(f_z)
)
self.wait(2)
class PreviewZetaAndContinuation(ZetaTransformationScene):
CONFIG = {
"default_apply_complex_function_kwargs" : {
"run_time" : 4,
}
}
def construct(self):
self.add_transformable_plane()
self.add_extra_plane_lines_for_zeta()
reflected_plane = self.get_reflected_plane()
titles = [
TextMobject(
"What does", "%s"%s,
"look like?",
alignment = "",
)
for s in [
"$\\displaystyle \\sum_{n=1}^\\infty \\frac{1}{n^s}$",
"analytic continuation"
]
]
for mob in titles:
mob[1].set_color(YELLOW)
mob.to_corner(UP+LEFT, buff = 0.7)
mob.add_background_rectangle()
self.remove(self.plane)
self.play(Write(titles[0], run_time = 2))
self.add_foreground_mobjects(titles[0])
self.play(FadeIn(self.plane))
self.apply_zeta_function()
reflected_plane.apply_complex_function(zeta)
reflected_plane.make_smooth()
reflected_plane.set_stroke(width = 2)
self.wait()
self.play(Transform(*titles))
self.wait()
self.play(ShowCreation(
reflected_plane,
submobject_mode = "all_at_once",
run_time = 2
))
self.wait()
class AssumeKnowledgeOfComplexNumbers(ComplexTransformationScene):
def construct(self):
z = complex(5, 2)
dot = Dot(z.real*RIGHT + z.imag*UP, color = YELLOW)
line = Line(ORIGIN, dot.get_center(), color = dot.get_color())
x_line = Line(ORIGIN, z.real*RIGHT, color = GREEN_B)
y_line = Line(ORIGIN, z.imag*UP, color = RED)
y_line.shift(z.real*RIGHT)
complex_number_label = TexMobject(
"%d+%di"%(int(z.real), int(z.imag))
)
complex_number_label[0].set_color(x_line.get_color())
complex_number_label[2].set_color(y_line.get_color())
complex_number_label.next_to(dot, UP)
text = VGroup(
TextMobject("Assumed knowledge:"),
TextMobject("1) What complex numbers are."),
TextMobject("2) How to work with them."),
TextMobject("3) Maybe derivatives?"),
)
text.arrange_submobjects(DOWN, aligned_edge = LEFT)
for words in text:
words.add_background_rectangle()
text[0].shift(LEFT)
text[-1].set_color(PINK)
text.to_corner(UP+LEFT)
self.play(Write(text[0]))
self.wait()
self.play(FadeIn(text[1]))
self.play(
ShowCreation(x_line),
ShowCreation(y_line),
ShowCreation(VGroup(line, dot)),
Write(complex_number_label),
)
self.play(Write(text[2]))
self.wait(2)
self.play(Write(text[3]))
self.wait()
self.play(text[3].fade)
class DefineForRealS(PiCreatureScene):
def construct(self):
zeta_def, s_group = self.get_definition("s")
self.initial_definition(zeta_def)
self.plug_in_two(zeta_def)
self.plug_in_three_and_four(zeta_def)
self.plug_in_negative_values(zeta_def)
def initial_definition(self, zeta_def):
zeta_s, sum_terms, brace, sigma = zeta_def
self.say("Let's define $\\zeta(s)$")
self.blink()
pre_zeta_s = VGroup(
*self.pi_creature.bubble.content.copy()[-4:]
)
pre_zeta_s.add(VectorizedPoint(pre_zeta_s.get_right()))
self.play(
Transform(pre_zeta_s, zeta_s),
*self.get_bubble_fade_anims()
)
self.remove(pre_zeta_s)
self.add(zeta_s)
self.wait()
for count, term in enumerate(sum_terms):
self.play(FadeIn(term), run_time = 0.5)
if count%2 == 0:
self.wait()
self.play(
GrowFromCenter(brace),
Write(sigma),
self.pi_creature.change_mode, "pondering"
)
self.wait()
def plug_in_two(self, zeta_def):
two_def = self.get_definition("2")[0]
number_line = NumberLine(
x_min = 0,
x_max = 3,
tick_frequency = 0.25,
numbers_with_elongated_ticks = list(range(4)),
unit_size = 3,
)
number_line.add_numbers()
number_line.next_to(self.pi_creature, LEFT)
number_line.to_edge(LEFT)
self.number_line = number_line
lines, braces, dots, pi_dot = self.get_sum_lines(2)
fracs = VGroup(*[
TexMobject("\\frac{1}{%d}"%((d+1)**2)).scale(0.7)
for d, brace in enumerate(braces)
])
for frac, brace, line in zip(fracs, braces, lines):
frac.set_color(line.get_color())
frac.next_to(brace, UP, buff = SMALL_BUFF)
if frac is fracs[-1]:
frac.shift(0.5*RIGHT + 0.2*UP)
arrow = Arrow(
frac.get_bottom(), brace.get_top(),
tip_length = 0.1,
buff = 0.1
)
arrow.set_color(line.get_color())
frac.add(arrow)
pi_term = TexMobject("= \\frac{\\pi^2}{6}")
pi_term.next_to(zeta_def[1], RIGHT)
pi_arrow = Arrow(
pi_term[-1].get_bottom(), pi_dot,
color = pi_dot.get_color()
)
approx = TexMobject("\\approx 1.645")
approx.next_to(pi_term)
self.play(Transform(zeta_def, two_def))
self.wait()
self.play(ShowCreation(number_line))
for frac, brace, line in zip(fracs, braces, lines):
self.play(
Write(frac),
GrowFromCenter(brace),
ShowCreation(line),
run_time = 0.7
)
self.wait(0.7)
self.wait()
self.play(
ShowCreation(VGroup(*lines[4:])),
Write(dots)
)
self.wait()
self.play(
Write(pi_term),
ShowCreation(VGroup(pi_arrow, pi_dot)),
self.pi_creature.change_mode, "hooray"
)
self.wait()
self.play(
Write(approx),
self.pi_creature.change_mode, "happy"
)
self.wait(3)
self.play(*list(map(FadeOut, [
fracs, pi_arrow, pi_dot, approx,
])))
self.lines = lines
self.braces = braces
self.dots = dots
self.final_dot = pi_dot
self.final_sum = pi_term
def plug_in_three_and_four(self, zeta_def):
final_sums = ["1.202\\dots", "\\frac{\\pi^4}{90}"]
sum_terms, brace, sigma = zeta_def[1:]
for exponent, final_sum in zip([3, 4], final_sums):
self.transition_to_new_input(zeta_def, exponent, final_sum)
self.wait()
arrow = Arrow(sum_terms.get_left(), sum_terms.get_right())
arrow.next_to(sum_terms, DOWN)
smaller_words = TextMobject("Getting smaller")
smaller_words.next_to(arrow, DOWN)
self.arrow, self.smaller_words = arrow, smaller_words
self.wait()
self.play(
ShowCreation(arrow),
Write(smaller_words)
)
self.change_mode("happy")
self.wait(2)
def plug_in_negative_values(self, zeta_def):
zeta_s, sum_terms, brace, sigma = zeta_def
arrow = self.arrow
smaller_words = self.smaller_words
bigger_words = TextMobject("Getting \\emph{bigger}?")
bigger_words.move_to(self.smaller_words)
#plug in -1
self.transition_to_new_input(zeta_def, -1, "-\\frac{1}{12}")
self.play(
Transform(self.smaller_words, bigger_words),
self.pi_creature.change_mode, "confused"
)
new_sum_terms = TexMobject(
list("1+2+3+4+") + ["\\cdots"]
)
new_sum_terms.move_to(sum_terms, LEFT)
arrow.target = arrow.copy().next_to(new_sum_terms, DOWN)
arrow.target.stretch_to_fit_width(new_sum_terms.get_width())
bigger_words.next_to(arrow.target, DOWN)
new_brace = Brace(new_sum_terms, UP)
self.play(
Transform(sum_terms, new_sum_terms),
Transform(brace, new_brace),
sigma.next_to, new_brace, UP,
MoveToTarget(arrow),
Transform(smaller_words, bigger_words),
self.final_sum.next_to, new_sum_terms, RIGHT
)
self.wait(3)
#plug in -2
new_sum_terms = TexMobject(
list("1+4+9+16+") + ["\\cdots"]
)
new_sum_terms.move_to(sum_terms, LEFT)
new_zeta_def, ignore = self.get_definition("-2")
zeta_minus_two, ignore, ignore, new_sigma = new_zeta_def
new_sigma.next_to(brace, UP)
new_final_sum = TexMobject("=0")
new_final_sum.next_to(new_sum_terms)
lines, braces, dots, final_dot = self.get_sum_lines(-2)
self.play(
Transform(zeta_s, zeta_minus_two),
Transform(sum_terms, new_sum_terms),
Transform(sigma, new_sigma),
Transform(self.final_sum, new_final_sum),
Transform(self.lines, lines),
Transform(self.braces, braces),
)
self.wait()
self.change_mode("pleading")
self.wait(2)
def get_definition(self, input_string, input_color = YELLOW):
inputs = VGroup()
num_shown_terms = 4
n_input_chars = len(input_string)
zeta_s_eq = TexMobject("\\zeta(%s) = "%input_string)
zeta_s_eq.to_edge(LEFT, buff = LARGE_BUFF)
zeta_s_eq.shift(0.5*UP)
inputs.add(*zeta_s_eq[2:2+n_input_chars])
sum_terms = TexMobject(*it.chain(*list(zip(
[
"\\frac{1}{%d^{%s}}"%(d, input_string)
for d in range(1, 1+num_shown_terms)
],
it.cycle(["+"])
))))
sum_terms.add(TexMobject("\\cdots").next_to(sum_terms))
sum_terms.next_to(zeta_s_eq, RIGHT)
for x in range(num_shown_terms):
inputs.add(*sum_terms[2*x][-n_input_chars:])
brace = Brace(sum_terms, UP)
sigma = TexMobject(
"\\sum_{n=1}^\\infty \\frac{1}{n^{%s}}"%input_string
)
sigma.next_to(brace, UP)
inputs.add(*sigma[-n_input_chars:])
inputs.set_color(input_color)
group = VGroup(zeta_s_eq, sum_terms, brace, sigma)
return group, inputs
def get_sum_lines(self, exponent, line_thickness = 6):
num_lines = 100 if exponent > 0 else 6
powers = [0] + [x**(-exponent) for x in range(1, num_lines)]
power_sums = np.cumsum(powers)
lines = VGroup(*[
Line(
self.number_line.number_to_point(s1),
self.number_line.number_to_point(s2),
)
for s1, s2 in zip(power_sums, power_sums[1:])
])
lines.set_stroke(width = line_thickness)
# VGroup(*lines[:4]).set_color_by_gradient(RED, GREEN_B)
# VGroup(*lines[4:]).set_color_by_gradient(GREEN_B, MAROON_B)
VGroup(*lines[::2]).set_color(MAROON_B)
VGroup(*lines[1::2]).set_color(RED)
braces = VGroup(*[
Brace(line, UP)
for line in lines[:4]
])
dots = TexMobject("...")
dots.stretch_to_fit_width(
0.8 * VGroup(*lines[4:]).get_width()
)
dots.next_to(braces, RIGHT, buff = SMALL_BUFF)
final_dot = Dot(
self.number_line.number_to_point(power_sums[-1]),
color = GREEN_B
)
return lines, braces, dots, final_dot
def transition_to_new_input(self, zeta_def, exponent, final_sum):
new_zeta_def = self.get_definition(str(exponent))[0]
lines, braces, dots, final_dot = self.get_sum_lines(exponent)
final_sum = TexMobject("=" + final_sum)
final_sum.next_to(new_zeta_def[1][-1])
final_sum.shift(SMALL_BUFF*UP)
self.play(
Transform(zeta_def, new_zeta_def),
Transform(self.lines, lines),
Transform(self.braces, braces),
Transform(self.dots, dots),
Transform(self.final_dot, final_dot),
Transform(self.final_sum, final_sum),
self.pi_creature.change_mode, "pondering"
)
class ReadIntoZetaFunction(Scene):
CONFIG = {
"statement" : "$\\zeta(-1) = -\\frac{1}{12}$",
"target_mode" : "frustrated",
}
def construct(self):
randy = Randolph(mode = "pondering")
randy.shift(3*LEFT+DOWN)
paper = Rectangle(width = 4, height = 5)
paper.next_to(randy, RIGHT, aligned_edge = DOWN)
paper.set_color(WHITE)
max_width = 0.8*paper.get_width()
title = TextMobject("$\\zeta(s)$ manual")
title.next_to(paper.get_top(), DOWN)
title.set_color(YELLOW)
paper.add(title)
paragraph_lines = VGroup(
Line(LEFT, RIGHT),
Line(LEFT, RIGHT).shift(0.2*DOWN),
Line(LEFT, ORIGIN).shift(0.4*DOWN)
)
paragraph_lines.set_width(max_width)
paragraph_lines.next_to(title, DOWN, MED_LARGE_BUFF)
paper.add(paragraph_lines)
max_height = 1.5*paragraph_lines.get_height()
statement = TextMobject(self.statement)
if statement.get_width() > max_width:
statement.set_width(max_width)
if statement.get_height() > max_height:
statement.set_height(max_height)
statement.next_to(paragraph_lines, DOWN)
statement.set_color(GREEN_B)
paper.add(paragraph_lines.copy().next_to(statement, DOWN, MED_LARGE_BUFF))
randy.look_at(statement)
self.add(randy, paper)
self.play(Write(statement))
self.play(
randy.change_mode, self.target_mode,
randy.look_at, title
)
self.play(Blink(randy))
self.play(randy.look_at, statement)
self.wait()
class ReadIntoZetaFunctionTrivialZero(ReadIntoZetaFunction):
CONFIG = {
"statement" : "$\\zeta(-2n) = 0$"
}
class ReadIntoZetaFunctionAnalyticContinuation(ReadIntoZetaFunction):
CONFIG = {
"statement" : "...analytic \\\\ continuation...",
"target_mode" : "confused",
}
class IgnoreNegatives(TeacherStudentsScene):
def construct(self):
definition = TexMobject("""
\\zeta(s) = \\sum_{n=1}^{\\infty} \\frac{1}{n^s}
""")
VGroup(definition[2], definition[-1]).set_color(YELLOW)
definition.to_corner(UP+LEFT)
self.add(definition)
brace = Brace(definition, DOWN)
only_s_gt_1 = brace.get_text("""
Only defined
for $s > 1$
""")
only_s_gt_1[-3].set_color(YELLOW)
self.change_student_modes(*["confused"]*3)
words = TextMobject(
"Ignore $s \\le 1$ \\dots \\\\",
"For now."
)
words[0][6].set_color(YELLOW)
words[1].set_color(BLACK)
self.teacher_says(words)
self.play(words[1].set_color, WHITE)
self.change_student_modes(*["happy"]*3)
self.play(
GrowFromCenter(brace),
Write(only_s_gt_1),
*it.chain(*[
[pi.look_at, definition]
for pi in self.get_pi_creatures()
])
)
self.random_blink(3)
class RiemannFatherOfComplex(ComplexTransformationScene):
def construct(self):
name = TextMobject(
"Bernhard Riemann $\\rightarrow$ Complex analysis"
)
name.to_corner(UP+LEFT)
name.shift(0.25*DOWN)
name.add_background_rectangle()
# photo = Square()
photo = ImageMobject("Riemann", invert = False)
photo.set_width(5)
photo.next_to(name, DOWN, aligned_edge = LEFT)
self.add(photo)
self.play(Write(name))
self.wait()
input_dot = Dot(2*RIGHT+UP, color = YELLOW)
arc = Arc(-2*np.pi/3)
arc.rotate(-np.pi)
arc.add_tip()
arc.shift(input_dot.get_top()-arc.points[0]+SMALL_BUFF*UP)
output_dot = Dot(
arc.points[-1] + SMALL_BUFF*(2*RIGHT+DOWN),
color = MAROON_B
)
for dot, tex in (input_dot, "z"), (output_dot, "f(z)"):
dot.label = TexMobject(tex)
dot.label.add_background_rectangle()
dot.label.next_to(dot, DOWN+RIGHT, buff = SMALL_BUFF)
dot.label.set_color(dot.get_color())
self.play(
ShowCreation(input_dot),
Write(input_dot.label)
)
self.play(ShowCreation(arc))
self.play(
ShowCreation(output_dot),
Write(output_dot.label)
)
self.wait()
class FromRealToComplex(ComplexTransformationScene):
CONFIG = {
"plane_config" : {
"space_unit_to_x_unit" : 2,
"space_unit_to_y_unit" : 2,
},
"background_label_scale_val" : 0.7,
"output_color" : GREEN_B,
"num_lines_in_spiril_sum" : 1000,
}
def construct(self):
self.handle_background()
self.show_real_to_real()
self.transition_to_complex()
self.single_out_complex_exponent()
##Fade to several scenes defined below
self.show_s_equals_two_lines()
self.transition_to_spiril_sum()
self.vary_complex_input()
self.show_domain_of_convergence()
self.ask_about_visualizing_all()
def handle_background(self):
self.remove(self.background)
#Oh yeah, this is great practice...
self.background[-1].remove(*self.background[-1][-3:])
def show_real_to_real(self):
zeta = self.get_zeta_definition("2", "\\frac{\\pi^2}{6}")
number_line = NumberLine(
unit_size = 2,
tick_frequency = 0.5,
numbers_with_elongated_ticks = list(range(-2, 3))
)
number_line.add_numbers()
input_dot = Dot(number_line.number_to_point(2))
input_dot.set_color(YELLOW)
output_dot = Dot(number_line.number_to_point(np.pi**2/6))
output_dot.set_color(self.output_color)
arc = Arc(
2*np.pi/3, start_angle = np.pi/6,
)
arc.stretch_to_fit_width(
(input_dot.get_center()-output_dot.get_center())[0]
)
arc.stretch_to_fit_height(0.5)
arc.next_to(input_dot.get_center(), UP, aligned_edge = RIGHT)
arc.add_tip()
two = zeta[1][2].copy()
sum_term = zeta[-1]
self.add(number_line, *zeta[:-1])
self.wait()
self.play(Transform(two, input_dot))
self.remove(two)
self.add(input_dot)
self.play(ShowCreation(arc))
self.play(ShowCreation(output_dot))
self.play(Transform(output_dot.copy(), sum_term))
self.remove(*self.get_mobjects_from_last_animation())
self.add(sum_term)
self.wait(2)
self.play(
ShowCreation(
self.background,
run_time = 2
),
FadeOut(VGroup(arc, output_dot, number_line)),
Animation(zeta),
Animation(input_dot)
)
self.wait(2)
self.zeta = zeta
self.input_dot = input_dot
def transition_to_complex(self):
complex_zeta = self.get_zeta_definition("2+i", "???")
input_dot = self.input_dot
input_dot.generate_target()
input_dot.target.move_to(
self.background.num_pair_to_point((2, 1))
)
input_label = TexMobject("2+i")
input_label.set_color(YELLOW)
input_label.next_to(input_dot.target, DOWN+RIGHT, buff = SMALL_BUFF)
input_label.add_background_rectangle()
input_label.save_state()
input_label.replace(VGroup(*complex_zeta[1][2:5]))
input_label.background_rectangle.scale_in_place(0.01)
self.input_label = input_label
self.play(Transform(self.zeta, complex_zeta))
self.wait()
self.play(
input_label.restore,
MoveToTarget(input_dot)
)
self.wait(2)
def single_out_complex_exponent(self):
frac_scale_factor = 1.2
randy = Randolph()
randy.to_corner()
bubble = randy.get_bubble(height = 4)
bubble.set_fill(BLACK, opacity = 1)
frac = VGroup(
VectorizedPoint(self.zeta[2][3].get_left()),
self.zeta[2][3],
VectorizedPoint(self.zeta[2][3].get_right()),
self.zeta[2][4],
).copy()
frac.generate_target()
frac.target.scale(frac_scale_factor)
bubble.add_content(frac.target)
new_frac = TexMobject(
"\\Big(", "\\frac{1}{2}", "\\Big)", "^{2+i}"
)
new_frac[-1].set_color(YELLOW)
new_frac.scale(frac_scale_factor)
new_frac.move_to(frac.target)
new_frac.shift(LEFT+0.2*UP)
words = TextMobject("Not repeated \\\\", " multiplication")
words.scale(0.8)
words.set_color(RED)
words.next_to(new_frac, RIGHT)
new_words = TextMobject("Not \\emph{super} \\\\", "crucial to know...")
new_words.replace(words)
new_words.scale_in_place(1.3)
self.play(FadeIn(randy))
self.play(
randy.change_mode, "confused",
randy.look_at, bubble,
ShowCreation(bubble),
MoveToTarget(frac)
)
self.play(Blink(randy))
self.play(Transform(frac, new_frac))
self.play(Write(words))
for x in range(2):
self.wait(2)
self.play(Blink(randy))
self.play(
Transform(words, new_words),
randy.change_mode, "maybe"
)
self.wait()
self.play(Blink(randy))
self.play(randy.change_mode, "happy")
self.wait()
self.play(*list(map(FadeOut, [randy, bubble, frac, words])))
def show_s_equals_two_lines(self):
self.input_label.save_state()
zeta = self.get_zeta_definition("2", "\\frac{\\pi^2}{6}")
lines, output_dot = self.get_sum_lines(2)
sum_terms = self.zeta[2][:-1:3]
dots_copy = zeta[2][-1].copy()
pi_copy = zeta[3].copy()
def transform_and_replace(m1, m2):
self.play(Transform(m1, m2))
self.remove(m1)
self.add(m2)
self.play(
self.input_dot.shift, 2*DOWN,
self.input_label.fade, 0.7,
)
self.play(Transform(self.zeta, zeta))
for term, line in zip(sum_terms, lines):
line.save_state()
line.next_to(term, DOWN)
term_copy = term.copy()
transform_and_replace(term_copy, line)
self.play(line.restore)
later_lines = VGroup(*lines[4:])
transform_and_replace(dots_copy, later_lines)
self.wait()
transform_and_replace(pi_copy, output_dot)
self.wait()
self.lines = lines
self.output_dot = output_dot
def transition_to_spiril_sum(self):
zeta = self.get_zeta_definition("2+i", "1.15 - 0.44i")
zeta.set_width(FRAME_WIDTH-1)
zeta.to_corner(UP+LEFT)
lines, output_dot = self.get_sum_lines(complex(2, 1))
self.play(
self.input_dot.shift, 2*UP,
self.input_label.restore,
)
self.wait()
self.play(Transform(self.zeta, zeta))
self.wait()
self.play(
Transform(self.lines, lines),
Transform(self.output_dot, output_dot),
run_time = 2,
path_arc = -np.pi/6,
)
self.wait()
def vary_complex_input(self):
zeta = self.get_zeta_definition("s", "")
zeta[3].set_color(BLACK)
self.play(Transform(self.zeta, zeta))
self.play(FadeOut(self.input_label))
self.wait(2)
inputs = [
complex(1.5, 1.8),
complex(1.5, -1),
complex(3, -1),
complex(1.5, 1.8),
complex(1.5, -1.8),
complex(1.4, -1.8),
complex(1.5, 0),
complex(2, 1),
]
for s in inputs:
input_point = self.z_to_point(s)
lines, output_dot = self.get_sum_lines(s)
self.play(
self.input_dot.move_to, input_point,
Transform(self.lines, lines),
Transform(self.output_dot, output_dot),
run_time = 2
)
self.wait()
self.wait()
def show_domain_of_convergence(self, opacity = 0.2):
domain = Rectangle(
width = FRAME_X_RADIUS-2,
height = FRAME_HEIGHT,
stroke_width = 0,
fill_color = YELLOW,
fill_opacity = opacity,
)
domain.to_edge(RIGHT, buff = 0)
anti_domain = Rectangle(
width = FRAME_X_RADIUS+2,
height = FRAME_HEIGHT,
stroke_width = 0,
fill_color = RED,
fill_opacity = opacity,
)
anti_domain.to_edge(LEFT, buff = 0)
domain_words = TextMobject("""
$\\zeta(s)$ happily
converges and
makes sense
""")
domain_words.to_corner(UP+RIGHT, buff = MED_LARGE_BUFF)
anti_domain_words = TextMobject("""
Not so much...
""")
anti_domain_words.next_to(ORIGIN, LEFT, buff = LARGE_BUFF)
anti_domain_words.shift(1.5*DOWN)
self.play(FadeIn(domain))
self.play(Write(domain_words))
self.wait()
self.play(FadeIn(anti_domain))
self.play(Write(anti_domain_words))
self.wait(2)
self.play(*list(map(FadeOut, [
anti_domain, anti_domain_words,
])))
self.domain_words = domain_words
def ask_about_visualizing_all(self):
morty = Mortimer().flip()
morty.scale(0.7)
morty.to_corner(DOWN+LEFT)
bubble = morty.get_bubble(SpeechBubble, height = 4)
bubble.set_fill(BLACK, opacity = 0.5)
bubble.write("""
How can we visualize
this for all inputs?
""")
self.play(FadeIn(morty))
self.play(
morty.change_mode, "speaking",
ShowCreation(bubble),
Write(bubble.content)
)
self.play(Blink(morty))
self.wait(3)
self.play(
morty.change_mode, "pondering",
morty.look_at, self.input_dot,
*list(map(FadeOut, [
bubble, bubble.content, self.domain_words
]))
)
arrow = Arrow(self.input_dot, self.output_dot, buff = SMALL_BUFF)
arrow.set_color(WHITE)
self.play(ShowCreation(arrow))
self.play(Blink(morty))
self.wait()
def get_zeta_definition(self, input_string, output_string, input_color = YELLOW):
inputs = VGroup()
num_shown_terms = 4
n_input_chars = len(input_string)
zeta_s_eq = TexMobject("\\zeta(%s) = "%input_string)
zeta_s_eq.to_edge(LEFT, buff = LARGE_BUFF)
zeta_s_eq.shift(0.5*UP)
inputs.add(*zeta_s_eq[2:2+n_input_chars])
raw_sum_terms = TexMobject(*[
"\\frac{1}{%d^{%s}} + "%(d, input_string)
for d in range(1, 1+num_shown_terms)
])
sum_terms = VGroup(*it.chain(*[
[
VGroup(*term[:3]),
VGroup(*term[3:-1]),
term[-1],
]
for term in raw_sum_terms
]))
sum_terms.add(TexMobject("\\cdots").next_to(sum_terms[-1]))
sum_terms.next_to(zeta_s_eq, RIGHT)
for x in range(num_shown_terms):
inputs.add(*sum_terms[3*x+1])
output = TexMobject("= \\," + output_string)
output.next_to(sum_terms, RIGHT)
output.set_color(self.output_color)
inputs.set_color(input_color)
group = VGroup(zeta_s_eq, sum_terms, output)
group.to_edge(UP)
group.add_to_back(BackgroundRectangle(group))
return group
def get_sum_lines(self, exponent, line_thickness = 6):
powers = [0] + [
x**(-exponent)
for x in range(1, self.num_lines_in_spiril_sum)
]
power_sums = np.cumsum(powers)
lines = VGroup(*[
Line(*list(map(self.z_to_point, z_pair)))
for z_pair in zip(power_sums, power_sums[1:])
])
widths = np.linspace(line_thickness, 0, len(list(lines)))
for line, width in zip(lines, widths):
line.set_stroke(width = width)
VGroup(*lines[::2]).set_color(MAROON_B)
VGroup(*lines[1::2]).set_color(RED)
final_dot = Dot(
# self.z_to_point(power_sums[-1]),
self.z_to_point(zeta(exponent)),
color = self.output_color
)
return lines, final_dot
class TerritoryOfExponents(ComplexTransformationScene):
def construct(self):
self.add_title()
familiar_territory = TextMobject("Familiar territory")
familiar_territory.set_color(YELLOW)
familiar_territory.next_to(ORIGIN, UP+RIGHT)
familiar_territory.shift(2*UP)
real_line = Line(LEFT, RIGHT).scale(FRAME_X_RADIUS)
real_line.set_color(YELLOW)
arrow1 = Arrow(familiar_territory.get_bottom(), real_line.get_left())
arrow2 = Arrow(familiar_territory.get_bottom(), real_line.get_right())
VGroup(arrow1, arrow2).set_color(WHITE)
extended_realm = TextMobject("Extended realm")
extended_realm.move_to(familiar_territory)
full_plane = Rectangle(
width = FRAME_WIDTH,
height = FRAME_HEIGHT,
fill_color = YELLOW,
fill_opacity = 0.3
)
self.add(familiar_territory)
self.play(ShowCreation(arrow1))
self.play(
Transform(arrow1, arrow2),
ShowCreation(real_line)
)
self.play(FadeOut(arrow1))
self.play(
FadeIn(full_plane),
Transform(familiar_territory, extended_realm),
Animation(real_line)
)
def add_title(self):
exponent = TexMobject(
"\\left(\\frac{1}{2}\\right)^s"
)
exponent[-1].set_color(YELLOW)
exponent.next_to(ORIGIN, LEFT, MED_LARGE_BUFF).to_edge(UP)
self.add_foreground_mobjects(exponent)
class ComplexExponentiation(Scene):
def construct(self):
self.extract_pure_imaginary_part()
self.add_on_planes()
self.show_imaginary_powers()
def extract_pure_imaginary_part(self):
original = TexMobject(
"\\left(\\frac{1}{2}\\right)", "^{2+i}"
)
split = TexMobject(
"\\left(\\frac{1}{2}\\right)", "^{2}",
"\\left(\\frac{1}{2}\\right)", "^{i}",
)
VGroup(original[-1], split[1], split[3]).set_color(YELLOW)
VGroup(original, split).shift(UP)
real_part = VGroup(*split[:2])
imag_part = VGroup(*split[2:])
brace = Brace(real_part)
we_understand = brace.get_text(
"We understand this"
)
VGroup(brace, we_understand).set_color(GREEN_B)
self.add(original)
self.wait()
self.play(*[
Transform(*pair)
for pair in [
(original[0], split[0]),
(original[1][0], split[1]),
(original[0].copy(), split[2]),
(VGroup(*original[1][1:]), split[3]),
]
])
self.remove(*self.get_mobjects_from_last_animation())
self.add(real_part, imag_part)
self.wait()
self.play(
GrowFromCenter(brace),
FadeIn(we_understand),
real_part.set_color, GREEN_B
)
self.wait()
self.play(
imag_part.move_to, imag_part.get_left(),
*list(map(FadeOut, [brace, we_understand, real_part]))
)
self.wait()
self.imag_exponent = imag_part
def add_on_planes(self):
left_plane = NumberPlane(x_radius = (FRAME_X_RADIUS-1)/2)
left_plane.to_edge(LEFT, buff = 0)
imag_line = Line(DOWN, UP).scale(FRAME_Y_RADIUS)
imag_line.set_color(YELLOW).fade(0.3)
imag_line.move_to(left_plane.get_center())
left_plane.add(imag_line)
left_title = TextMobject("Input space")
left_title.add_background_rectangle()
left_title.set_color(YELLOW)
left_title.next_to(left_plane.get_top(), DOWN)
right_plane = NumberPlane(x_radius = (FRAME_X_RADIUS-1)/2)
right_plane.to_edge(RIGHT, buff = 0)
unit_circle = Circle()
unit_circle.set_color(MAROON_B).fade(0.3)
unit_circle.shift(right_plane.get_center())
right_plane.add(unit_circle)
right_title = TextMobject("Output space")
right_title.add_background_rectangle()
right_title.set_color(MAROON_B)
right_title.next_to(right_plane.get_top(), DOWN)
for plane in left_plane, right_plane:
labels = VGroup()
for x in range(-2, 3):
label = TexMobject(str(x))
label.move_to(plane.num_pair_to_point((x, 0)))
labels.add(label)
for y in range(-3, 4):
if y == 0:
continue
label = TexMobject(str(y) + "i")
label.move_to(plane.num_pair_to_point((0, y)))
labels.add(label)
for label in labels:
label.scale_in_place(0.5)
label.next_to(
label.get_center(), DOWN+RIGHT,
buff = SMALL_BUFF
)
plane.add(labels)
arrow = Arrow(LEFT, RIGHT)
self.play(
ShowCreation(left_plane),
Write(left_title),
run_time = 3
)
self.play(
ShowCreation(right_plane),
Write(right_title),
run_time = 3
)
self.play(ShowCreation(arrow))
self.wait()
self.left_plane = left_plane
self.right_plane = right_plane
def show_imaginary_powers(self):
i = complex(0, 1)
input_dot = Dot(self.z_to_point(i))
input_dot.set_color(YELLOW)
output_dot = Dot(self.z_to_point(0.5**(i), is_input = False))
output_dot.set_color(MAROON_B)
output_dot.save_state()
output_dot.move_to(input_dot)
output_dot.set_color(input_dot.get_color())
curr_base = 0.5
def output_dot_update(ouput_dot):
y = input_dot.get_center()[1]
output_dot.move_to(self.z_to_point(
curr_base**complex(0, y), is_input = False
))
return output_dot
def walk_up_and_down():
for vect in 3*DOWN, 5*UP, 5*DOWN, 2*UP:
self.play(
input_dot.shift, vect,
UpdateFromFunc(output_dot, output_dot_update),
run_time = 3
)
exp = self.imag_exponent[-1]
new_exp = TexMobject("ti")
new_exp.set_color(exp.get_color())
new_exp.set_height(exp.get_height())
new_exp.move_to(exp, LEFT)
nine = TexMobject("9")
nine.set_color(BLUE)
denom = self.imag_exponent[0][3]
denom.save_state()
nine.replace(denom)
self.play(Transform(exp, new_exp))
self.play(input_dot.shift, 2*UP)
self.play(input_dot.shift, 2*DOWN)
self.wait()
self.play(output_dot.restore)
self.wait()
walk_up_and_down()
self.wait()
curr_base = 1./9
self.play(Transform(denom, nine))
walk_up_and_down()
self.wait()
def z_to_point(self, z, is_input = True):
if is_input:
plane = self.left_plane
else:
plane = self.right_plane
return plane.num_pair_to_point((z.real, z.imag))
class SizeAndRotationBreakdown(Scene):
def construct(self):
original = TexMobject(
"\\left(\\frac{1}{2}\\right)", "^{2+i}"
)
split = TexMobject(
"\\left(\\frac{1}{2}\\right)", "^{2}",
"\\left(\\frac{1}{2}\\right)", "^{i}",
)
VGroup(original[-1], split[1], split[3]).set_color(YELLOW)
VGroup(original, split).shift(UP)
real_part = VGroup(*split[:2])
imag_part = VGroup(*split[2:])
size_brace = Brace(real_part)
size = size_brace.get_text("Size")
rotation_brace = Brace(imag_part, UP)
rotation = rotation_brace.get_text("Rotation")
self.add(original)
self.wait()
self.play(*[
Transform(*pair)
for pair in [
(original[0], split[0]),
(original[1][0], split[1]),
(original[0].copy(), split[2]),
(VGroup(*original[1][1:]), split[3]),
]
])
self.play(
GrowFromCenter(size_brace),
Write(size)
)
self.play(
GrowFromCenter(rotation_brace),
Write(rotation)
)
self.wait()
class SeeLinksInDescription(TeacherStudentsScene):
def construct(self):
self.teacher_says("""
See links in the
description for more.
""")
self.play(*it.chain(*[
[pi.change_mode, "hooray", pi.look, DOWN]
for pi in self.get_students()
]))
self.random_blink(3)
class ShowMultiplicationOfRealAndImaginaryExponentialParts(FromRealToComplex):
def construct(self):
self.break_up_exponent()
self.show_multiplication()
def break_up_exponent(self):
original = TexMobject(
"\\left(\\frac{1}{2}\\right)", "^{2+i}"
)
split = TexMobject(
"\\left(\\frac{1}{2}\\right)", "^{2}",
"\\left(\\frac{1}{2}\\right)", "^{i}",
)
VGroup(original[-1], split[1], split[3]).set_color(YELLOW)
VGroup(original, split).to_corner(UP+LEFT)
rect = BackgroundRectangle(split)
real_part = VGroup(*split[:2])
imag_part = VGroup(*split[2:])
self.add(rect, original)
self.wait()
self.play(*[
Transform(*pair)
for pair in [
(original[0], split[0]),
(original[1][0], split[1]),
(original[0].copy(), split[2]),
(VGroup(*original[1][1:]), split[3]),
]
])
self.remove(*self.get_mobjects_from_last_animation())
self.add(real_part, imag_part)
self.wait()
self.real_part = real_part
self.imag_part = imag_part
def show_multiplication(self):
real_part = self.real_part.copy()
imag_part = self.imag_part.copy()
for part in real_part, imag_part:
part.add_to_back(BackgroundRectangle(part))
fourth_point = self.z_to_point(0.25)
fourth_line = Line(ORIGIN, fourth_point)
brace = Brace(fourth_line, UP, buff = SMALL_BUFF)
fourth_dot = Dot(fourth_point)
fourth_group = VGroup(fourth_line, brace, fourth_dot)
fourth_group.set_color(RED)
circle = Circle(radius = 2, color = MAROON_B)
circle.fade(0.3)
imag_power_point = self.z_to_point(0.5**complex(0, 1))
imag_power_dot = Dot(imag_power_point)
imag_power_line = Line(ORIGIN, imag_power_point)
VGroup(imag_power_dot, imag_power_line).set_color(MAROON_B)
full_power_tex = TexMobject(
"\\left(\\frac{1}{2}\\right)", "^{2+i}"
)
full_power_tex[-1].set_color(YELLOW)
full_power_tex.add_background_rectangle()
full_power_tex.scale(0.7)
full_power_tex.next_to(
0.5*self.z_to_point(0.5**complex(2, 1)),
UP+RIGHT
)
self.play(
real_part.scale, 0.7,
real_part.next_to, brace, UP, SMALL_BUFF, LEFT,
ShowCreation(fourth_dot)
)
self.play(
GrowFromCenter(brace),
ShowCreation(fourth_line),
)
self.wait()
self.play(
imag_part.scale, 0.7,
imag_part.next_to, imag_power_dot, DOWN+RIGHT, SMALL_BUFF,
ShowCreation(imag_power_dot)
)
self.play(ShowCreation(circle), Animation(imag_power_dot))
self.play(ShowCreation(imag_power_line))
self.wait(2)
self.play(
fourth_group.rotate, imag_power_line.get_angle()
)
real_part.generate_target()
imag_part.generate_target()
real_part.target.next_to(brace, UP+RIGHT, buff = 0)
imag_part.target.next_to(real_part.target, buff = 0)
self.play(*list(map(MoveToTarget, [real_part, imag_part])))
self.wait()
class ComplexFunctionsAsTransformations(ComplexTransformationScene):
def construct(self):
self.add_title()
input_dots, output_dots, arrows = self.get_dots()
self.play(FadeIn(
input_dots,
run_time = 2,
submobject_mode = "lagged_start"
))
for in_dot, out_dot, arrow in zip(input_dots, output_dots, arrows):
self.play(
Transform(in_dot.copy(), out_dot),
ShowCreation(arrow)
)
self.wait()
self.wait()
def add_title(self):
title = TextMobject("Complex functions as transformations")
title.add_background_rectangle()
title.to_edge(UP)
self.add(title)
def get_dots(self):
input_points = [
RIGHT+2*UP,
4*RIGHT+DOWN,
2*LEFT+2*UP,
LEFT+DOWN,
6*LEFT+DOWN,
]
output_nudges = [
DOWN+RIGHT,
2*UP+RIGHT,
2*RIGHT+2*DOWN,
2*RIGHT+DOWN,
RIGHT+2*UP,
]
input_dots = VGroup(*list(map(Dot, input_points)))
input_dots.set_color(YELLOW)
output_dots = VGroup(*[
Dot(ip + on)
for ip, on in zip(input_points, output_nudges)
])
output_dots.set_color(MAROON_B)
arrows = VGroup(*[
Arrow(in_dot, out_dot, buff = 0.1, color = WHITE)
for in_dot, out_dot, in zip(input_dots, output_dots)
])
for i, dot in enumerate(input_dots):
label = TexMobject("s_%d"%i)
label.set_color(dot.get_color())
label.next_to(dot, DOWN+LEFT, buff = SMALL_BUFF)
dot.add(label)
for i, dot in enumerate(output_dots):
label = TexMobject("f(s_%d)"%i)
label.set_color(dot.get_color())
label.next_to(dot, UP+RIGHT, buff = SMALL_BUFF)
dot.add(label)
return input_dots, output_dots, arrows
class VisualizingSSquared(ComplexTransformationScene):
CONFIG = {
"num_anchors_to_add_per_line" : 100,
"horiz_end_color" : GOLD,
"y_min" : 0,
}
def construct(self):
self.add_title()
self.plug_in_specific_values()
self.show_transformation()
self.comment_on_two_dimensions()
def add_title(self):
title = TexMobject("f(", "s", ") = ", "s", "^2")
title.set_color_by_tex("s", YELLOW)
title.add_background_rectangle()
title.scale(1.5)
title.to_corner(UP+LEFT)
self.play(Write(title))
self.add_foreground_mobject(title)
self.wait()
self.title = title
def plug_in_specific_values(self):
inputs = list(map(complex, [2, -1, complex(0, 1)]))
input_dots = VGroup(*[
Dot(self.z_to_point(z), color = YELLOW)
for z in inputs
])
output_dots = VGroup(*[
Dot(self.z_to_point(z**2), color = BLUE)
for z in inputs
])
arrows = VGroup()
VGroup(*[
ParametricFunction(
lambda t : self.z_to_point(z**(1.1+0.8*t))
)
for z in inputs
])
for z, dot in zip(inputs, input_dots):
path = ParametricFunction(
lambda t : self.z_to_point(z**(1+t))
)
dot.path = path
arrow = ParametricFunction(
lambda t : self.z_to_point(z**(1.1+0.8*t))
)
stand_in_arrow = Arrow(
arrow.points[-2], arrow.points[-1],
tip_length = 0.2
)
arrow.add(stand_in_arrow.tip)
arrows.add(arrow)
arrows.set_color(WHITE)
for input_dot, output_dot, arrow in zip(input_dots, output_dots, arrows):
input_dot.save_state()
input_dot.move_to(self.title[1][1])
input_dot.set_fill(opacity = 0)
self.play(input_dot.restore)
self.wait()
self.play(ShowCreation(arrow))
self.play(ShowCreation(output_dot))
self.wait()
self.add_foreground_mobjects(arrows, output_dots, input_dots)
self.input_dots = input_dots
self.output_dots = output_dots
def add_transformable_plane(self, **kwargs):
ComplexTransformationScene.add_transformable_plane(self, **kwargs)
self.plane.next_to(ORIGIN, UP, buff = 0.01)
self.plane.add(self.plane.copy().rotate(np.pi, RIGHT))
self.plane.add(
Line(ORIGIN, FRAME_X_RADIUS*RIGHT, color = self.horiz_end_color),
Line(ORIGIN, FRAME_X_RADIUS*LEFT, color = self.horiz_end_color),
)
self.add(self.plane)
def show_transformation(self):
self.add_transformable_plane()
self.play(ShowCreation(self.plane, run_time = 3))
self.wait()
self.apply_complex_homotopy(
lambda z, t : z**(1+t),
added_anims = [
MoveAlongPath(dot, dot.path, run_time = 5)
for dot in self.input_dots
],
run_time = 5
)
self.wait(2)
def comment_on_two_dimensions(self):
morty = Mortimer().flip()
morty.scale(0.7)
morty.to_corner(DOWN+LEFT)
bubble = morty.get_bubble(SpeechBubble, height = 2, width = 4)
bubble.set_fill(BLACK, opacity = 0.9)
bubble.write("""
It all happens
in two dimensions!
""")
self.foreground_mobjects = []
self.play(FadeIn(morty))
self.play(
morty.change_mode, "hooray",
ShowCreation(bubble),
Write(bubble.content),
)
self.play(Blink(morty))
self.wait(2)
class ShowZetaOnHalfPlane(ZetaTransformationScene):
CONFIG = {
"x_min" : 1,
"x_max" : int(FRAME_X_RADIUS+2),
}
def construct(self):
self.add_title()
self.initial_transformation()
self.react_to_transformation()
self.show_cutoff()
self.set_color_i_line()
self.show_continuation()
self.emphsize_sum_doesnt_make_sense()
def add_title(self):
zeta = TexMobject(
"\\zeta(", "s", ")=",
*[
"\\frac{1}{%d^s} + "%d
for d in range(1, 5)
] + ["\\cdots"]
)
zeta[1].set_color(YELLOW)
for mob in zeta[3:3+4]:
mob[-2].set_color(YELLOW)
zeta.add_background_rectangle()
zeta.scale(0.8)
zeta.to_corner(UP+LEFT)
self.add_foreground_mobjects(zeta)
self.zeta = zeta
def initial_transformation(self):
self.add_transformable_plane()
self.wait()
self.add_extra_plane_lines_for_zeta(animate = True)
self.wait(2)
self.plane.save_state()
self.apply_zeta_function()
self.wait(2)
def react_to_transformation(self):
morty = Mortimer().flip()
morty.to_corner(DOWN+LEFT)
bubble = morty.get_bubble(SpeechBubble)
bubble.set_fill(BLACK, 0.5)
bubble.write("\\emph{Damn}!")
bubble.resize_to_content()
bubble.pin_to(morty)
self.play(FadeIn(morty))
self.play(
morty.change_mode, "surprised",
ShowCreation(bubble),
Write(bubble.content)
)
self.play(Blink(morty))
self.play(morty.look_at, self.plane.get_top())
self.wait()
self.play(
morty.look_at, self.plane.get_bottom(),
*list(map(FadeOut, [bubble, bubble.content]))
)
self.play(Blink(morty))
self.play(FadeOut(morty))
def show_cutoff(self):
words = TextMobject("Such an abrupt stop...")
words.add_background_rectangle()
words.next_to(ORIGIN, UP+LEFT)
words.shift(LEFT+UP)
line = Line(*list(map(self.z_to_point, [
complex(np.euler_gamma, u*FRAME_Y_RADIUS)
for u in (1, -1)
])))
line.set_color(YELLOW)
arrows = [
Arrow(words.get_right(), point)
for point in line.get_start_and_end()
]
self.play(Write(words, run_time = 2))
self.play(ShowCreation(arrows[0]))
self.play(
Transform(*arrows),
ShowCreation(line),
run_time = 2
)
self.play(FadeOut(arrows[0]))
self.wait(2)
self.play(*list(map(FadeOut, [words, line])))
def set_color_i_line(self):
right_i_lines, left_i_lines = [
VGroup(*[
Line(
vert_vect+RIGHT,
vert_vect+(FRAME_X_RADIUS+1)*horiz_vect
)
for vert_vect in (UP, DOWN)
])
for horiz_vect in (RIGHT, LEFT)
]
right_i_lines.set_color(YELLOW)
left_i_lines.set_color(BLUE)
for lines in right_i_lines, left_i_lines:
self.prepare_for_transformation(lines)
self.restore_mobjects(self.plane)
self.plane.add(*right_i_lines)
colored_plane = self.plane.copy()
right_i_lines.set_stroke(width = 0)
self.play(
self.plane.set_stroke, GREY, 1,
)
right_i_lines.set_stroke(YELLOW, width = 3)
self.play(ShowCreation(right_i_lines))
self.plane.save_state()
self.wait(2)
self.apply_zeta_function()
self.wait(2)
left_i_lines.save_state()
left_i_lines.apply_complex_function(zeta)
self.play(ShowCreation(left_i_lines, run_time = 5))
self.wait()
self.restore_mobjects(self.plane, left_i_lines)
self.play(Transform(self.plane, colored_plane))
self.wait()
self.left_i_lines = left_i_lines
def show_continuation(self):
reflected_plane = self.get_reflected_plane()
self.play(ShowCreation(reflected_plane, run_time = 2))
self.plane.add(reflected_plane)
self.remove(self.left_i_lines)
self.wait()
self.apply_zeta_function()
self.wait(2)
self.play(ShowCreation(
reflected_plane,
run_time = 6,
rate_func = lambda t : 1-there_and_back(t)
))
self.wait(2)
def emphsize_sum_doesnt_make_sense(self):
brace = Brace(VGroup(*self.zeta[1][3:]))
words = brace.get_text("""
Still fails to converge
when Re$(s) < 1$
""", buff = SMALL_BUFF)
words.add_background_rectangle()
words.scale_in_place(0.8)
divergent_sum = TexMobject("1+2+3+4+\\cdots")
divergent_sum.next_to(ORIGIN, UP)
divergent_sum.to_edge(LEFT)
divergent_sum.add_background_rectangle()
self.play(
GrowFromCenter(brace),
Write(words)
)
self.wait(2)
self.play(Write(divergent_sum))
self.wait(2)
def restore_mobjects(self, *mobjects):
self.play(*it.chain(*[
[m.restore, m.make_smooth]
for m in mobjects
]), run_time = 2)
for m in mobjects:
self.remove(m)
m.restore()
self.add(m)
class ShowConditionalDefinition(Scene):
def construct(self):
zeta = TexMobject("\\zeta(s)=")
zeta[2].set_color(YELLOW)
sigma = TexMobject("\\sum_{n=1}^\\infty \\frac{1}{n^s}")
sigma[-1].set_color(YELLOW)
something_else = TextMobject("Something else...")
conditions = VGroup(*[
TextMobject("if Re$(s) %s 1$"%s)
for s in (">", "\\le")
])
definitions = VGroup(sigma, something_else)
definitions.arrange_submobjects(DOWN, buff = MED_LARGE_BUFF, aligned_edge = LEFT)
conditions.arrange_submobjects(DOWN, buff = LARGE_BUFF)
definitions.shift(2*LEFT+2*UP)
conditions.next_to(definitions, RIGHT, buff = LARGE_BUFF, aligned_edge = DOWN)
brace = Brace(definitions, LEFT)
zeta.next_to(brace, LEFT)
sigma.save_state()
sigma.next_to(zeta)
self.add(zeta, sigma)
self.wait()
self.play(
sigma.restore,
GrowFromCenter(brace),
FadeIn(something_else)
)
self.play(Write(conditions))
self.wait()
underbrace = Brace(something_else)
question = underbrace.get_text("""
What to put here?
""")
VGroup(underbrace, question).set_color(GREEN_B)
self.play(
GrowFromCenter(underbrace),
Write(question),
something_else.set_color, GREEN_B
)
self.wait(2)
class SquiggleOnExtensions(ZetaTransformationScene):
CONFIG = {
"x_min" : 1,
"x_max" : int(FRAME_X_RADIUS+2),
}
def construct(self):
self.show_negative_one()
self.cycle_through_options()
self.lock_into_place()
def show_negative_one(self):
self.add_transformable_plane()
thin_plane = self.plane.copy()
thin_plane.add(self.get_reflected_plane())
self.remove(self.plane)
self.add_extra_plane_lines_for_zeta()
reflected_plane = self.get_reflected_plane()
self.plane.add(reflected_plane)
self.remove(self.plane)
self.add(thin_plane)
dot = self.note_point(-1, "-1")
self.play(
ShowCreation(self.plane, run_time = 2),
Animation(dot),
run_time = 2
)
self.remove(thin_plane)
self.apply_zeta_function(added_anims = [
ApplyMethod(
dot.move_to, self.z_to_point(-1./12),
run_time = 5
)
])
dot_to_remove = self.note_point(-1./12, "-\\frac{1}{12}")
self.remove(dot_to_remove)
self.left_plane = reflected_plane
self.dot = dot
def note_point(self, z, label_tex):
dot = Dot(self.z_to_point(z))
dot.set_color(YELLOW)
label = TexMobject(label_tex)
label.add_background_rectangle()
label.next_to(dot, UP+LEFT, buff = SMALL_BUFF)
label.shift(LEFT)
arrow = Arrow(label.get_right(), dot, buff = SMALL_BUFF)
self.play(Write(label, run_time = 1))
self.play(*list(map(ShowCreation, [arrow, dot])))
self.wait()
self.play(*list(map(FadeOut, [arrow, label])))
return dot
def cycle_through_options(self):
gamma = np.euler_gamma
def shear(point):
x, y, z = point
return np.array([
x,
y+0.25*(1-x)**2,
0
])
def mixed_scalar_func(point):
x, y, z = point
scalar = 1 + (gamma-x)/(gamma+FRAME_X_RADIUS)
return np.array([
(scalar**2)*x,
(scalar**3)*y,
0
])
def alt_mixed_scalar_func(point):
x, y, z = point
scalar = 1 + (gamma-x)/(gamma+FRAME_X_RADIUS)
return np.array([
(scalar**5)*x,
(scalar**2)*y,
0
])
def sinusoidal_func(point):
x, y, z = point
freq = np.pi/gamma
return np.array([
x-0.2*np.sin(x*freq)*np.sin(y),
y-0.2*np.sin(x*freq)*np.sin(y),
0
])
funcs = [
shear,
mixed_scalar_func,
alt_mixed_scalar_func,
sinusoidal_func,
]
for mob in self.left_plane.family_members_with_points():
if np.all(np.abs(mob.points[:,1]) < 0.1):
self.left_plane.remove(mob)
new_left_planes = [
self.left_plane.copy().apply_function(func)
for func in funcs
]
new_dots = [
self.dot.copy().move_to(func(self.dot.get_center()))
for func in funcs
]
self.left_plane.save_state()
for plane, dot in zip(new_left_planes, new_dots):
self.play(
Transform(self.left_plane, plane),
Transform(self.dot, dot),
run_time = 3
)
self.wait()
self.play(FadeOut(self.dot))
#Squiggle on example
self.wait()
self.play(FadeOut(self.left_plane))
self.play(ShowCreation(
self.left_plane,
run_time = 5,
rate_func = None
))
self.wait()
def lock_into_place(self):
words = TextMobject(
"""Only one extension
has a """,
"\\emph{derivative}",
"everywhere",
alignment = ""
)
words.to_corner(UP+LEFT)
words.set_color_by_tex("\\emph{derivative}", YELLOW)
words.add_background_rectangle()
self.play(Write(words))
self.add_foreground_mobjects(words)
self.play(self.left_plane.restore)
self.wait()
class DontKnowDerivatives(TeacherStudentsScene):
def construct(self):
self.student_says(
"""
You said we don't
need derivatives!
""",
target_mode = "pleading"
)
self.random_blink(2)
self.student_says(
"""
I get $\\frac{df}{dx}$, just not
for complex functions
""",
target_mode = "confused",
student_index = 2
)
self.random_blink(2)
self.teacher_says(
"""
Luckily, there's a purely
geometric intuition here.
""",
target_mode = "hooray"
)
self.change_student_modes(*["happy"]*3)
self.random_blink(3)
class IntroduceAnglePreservation(VisualizingSSquared):
CONFIG = {
"num_anchors_to_add_per_line" : 50,
"use_homotopy" : True,
}
def construct(self):
self.add_title()
self.show_initial_transformation()
self.talk_about_derivative()
self.cycle_through_line_pairs()
self.note_grid_lines()
self.name_analytic()
def add_title(self):
title = TexMobject("f(", "s", ")=", "s", "^2")
title.set_color_by_tex("s", YELLOW)
title.scale(1.5)
title.to_corner(UP+LEFT)
title.add_background_rectangle()
self.title = title
self.add_transformable_plane()
self.play(Write(title))
self.add_foreground_mobjects(title)
self.wait()
def show_initial_transformation(self):
self.apply_function()
self.wait(2)
self.reset()
def talk_about_derivative(self):
randy = Randolph().scale(0.8)
randy.to_corner(DOWN+LEFT)
morty = Mortimer()
morty.to_corner(DOWN+RIGHT)
randy.make_eye_contact(morty)
for pi, words in (randy, "$f'(s) = 2s$"), (morty, "Here's some \\\\ related geometry..."):
pi.bubble = pi.get_bubble(SpeechBubble)
pi.bubble.set_fill(BLACK, opacity = 0.7)
pi.bubble.write(words)
pi.bubble.resize_to_content()
pi.bubble.pin_to(pi)
for index in 3, 7:
randy.bubble.content[index].set_color(YELLOW)
self.play(*list(map(FadeIn, [randy, morty])))
self.play(
randy.change_mode, "speaking",
ShowCreation(randy.bubble),
Write(randy.bubble.content)
)
self.play(Blink(morty))
self.wait()
self.play(
morty.change_mode, "speaking",
randy.change_mode, "pondering",
ShowCreation(morty.bubble),
Write(morty.bubble.content),
)
self.play(Blink(randy))
self.wait()
self.play(*list(map(FadeOut, [
randy, morty,
randy.bubble, randy.bubble.content,
morty.bubble, morty.bubble.content,
])))
def cycle_through_line_pairs(self):
line_pairs = [
(
Line(3*DOWN+3*RIGHT, 2*UP),
Line(DOWN+RIGHT, 3*UP+4*RIGHT)
),
(
Line(RIGHT+3.5*DOWN, RIGHT+2.5*UP),
Line(3*LEFT+0.5*UP, 3*RIGHT+0.5*UP),
),
(
Line(4*RIGHT+4*DOWN, RIGHT+2*UP),
Line(4*DOWN+RIGHT, 2*UP+2*RIGHT)
),
]
for lines in line_pairs:
self.show_angle_preservation_between_lines(*lines)
self.reset()
def note_grid_lines(self):
intersection_inputs = [
complex(x, y)
for x in np.arange(-5, 5, 0.5)
for y in np.arange(0, 3, 0.5)
if not (x <= 0 and y == 0)
]
brackets = VGroup(*list(map(
self.get_right_angle_bracket,
intersection_inputs
)))
self.apply_function()
self.wait()
self.play(
ShowCreation(brackets, run_time = 5),
Animation(self.plane)
)
self.wait()
def name_analytic(self):
equiv = TextMobject("``Analytic'' $\\Leftrightarrow$ Angle-preserving")
kind_of = TextMobject("...kind of")
for text in equiv, kind_of:
text.scale(1.2)
text.add_background_rectangle()
equiv.set_color(YELLOW)
kind_of.set_color(RED)
kind_of.next_to(equiv, RIGHT)
VGroup(equiv, kind_of).next_to(ORIGIN, UP, buff = 1)
self.play(Write(equiv))
self.wait(2)
self.play(Write(kind_of, run_time = 1))
self.wait(2)
def reset(self, faded = True):
self.play(FadeOut(self.plane))
self.add_transformable_plane()
if faded:
self.plane.fade()
self.play(FadeIn(self.plane))
def apply_function(self, **kwargs):
if self.use_homotopy:
self.apply_complex_homotopy(
lambda z, t : z**(1+t),
run_time = 5,
**kwargs
)
else:
self.apply_complex_function(
lambda z : z**2,
**kwargs
)
def show_angle_preservation_between_lines(self, *lines):
R2_endpoints = [
[l.get_start()[:2], l.get_end()[:2]]
for l in lines
]
R2_intersection_point = intersection(*R2_endpoints)
intersection_point = np.array(list(R2_intersection_point) + [0])
angle1, angle2 = [l.get_angle() for l in lines]
arc = Arc(
start_angle = angle1,
angle = angle2-angle1,
radius = 0.4,
color = YELLOW
)
arc.shift(intersection_point)
arc.insert_n_anchor_points(10)
arc.generate_target()
input_z = complex(*arc.get_center()[:2])
scale_factor = abs(2*input_z)
arc.target.scale_about_point(1./scale_factor, intersection_point)
arc.target.apply_complex_function(lambda z : z**2)
angle_tex = TexMobject(
"%d^\\circ"%abs(int((angle2-angle1)*180/np.pi))
)
angle_tex.set_color(arc.get_color())
angle_tex.add_background_rectangle()
self.put_angle_tex_next_to_arc(angle_tex, arc)
angle_arrow = Arrow(
angle_tex, arc,
color = arc.get_color(),
buff = 0.1,
)
angle_group = VGroup(angle_tex, angle_arrow)
self.play(*list(map(ShowCreation, lines)))
self.play(
Write(angle_tex),
ShowCreation(angle_arrow),
ShowCreation(arc)
)
self.wait()
self.play(FadeOut(angle_group))
self.plane.add(*lines)
self.apply_function(added_anims = [
MoveToTarget(arc, run_time = 5)
])
self.put_angle_tex_next_to_arc(angle_tex, arc)
arrow = Arrow(angle_tex, arc, buff = 0.1)
arrow.set_color(arc.get_color())
self.play(
Write(angle_tex),
ShowCreation(arrow)
)
self.wait(2)
self.play(*list(map(FadeOut, [arc, angle_tex, arrow])))
def put_angle_tex_next_to_arc(self, angle_tex, arc):
vect = arc.point_from_proportion(0.5)-interpolate(
arc.points[0], arc.points[-1], 0.5
)
unit_vect = vect/get_norm(vect)
angle_tex.move_to(arc.get_center() + 1.7*unit_vect)
def get_right_angle_bracket(self, input_z):
output_z = input_z**2
derivative = 2*input_z
rotation = np.log(derivative).imag
brackets = VGroup(
Line(RIGHT, RIGHT+UP),
Line(RIGHT+UP, UP)
)
brackets.scale(0.15)
brackets.set_stroke(width = 2)
brackets.set_color(YELLOW)
brackets.shift(0.02*UP) ##Why???
brackets.rotate(rotation, about_point = ORIGIN)
brackets.shift(self.z_to_point(output_z))
return brackets
class AngleAtZeroDerivativePoints(IntroduceAnglePreservation):
CONFIG = {
"use_homotopy" : True
}
def construct(self):
self.add_title()
self.is_before_transformation = True
self.add_transformable_plane()
self.plane.fade()
line = Line(3*LEFT+0.5*UP, 3*RIGHT+0.5*DOWN)
self.show_angle_preservation_between_lines(
line, line.copy().rotate(np.pi/5)
)
self.wait()
def add_title(self):
title = TexMobject("f(", "s", ")=", "s", "^2")
title.set_color_by_tex("s", YELLOW)
title.scale(1.5)
title.to_corner(UP+LEFT)
title.add_background_rectangle()
derivative = TexMobject("f'(0) = 0")
derivative.set_color(RED)
derivative.scale(1.2)
derivative.add_background_rectangle()
derivative.next_to(title, DOWN)
self.add_foreground_mobjects(title, derivative)
def put_angle_tex_next_to_arc(self, angle_tex, arc):
IntroduceAnglePreservation.put_angle_tex_next_to_arc(
self, angle_tex, arc
)
if not self.is_before_transformation:
two_dot = TexMobject("2 \\times ")
two_dot.set_color(angle_tex.get_color())
two_dot.next_to(angle_tex, LEFT, buff = SMALL_BUFF)
two_dot.add_background_rectangle()
center = angle_tex.get_center()
angle_tex.add_to_back(two_dot)
angle_tex.move_to(center)
else:
self.is_before_transformation = False
class AnglePreservationAtAnyPairOfPoints(IntroduceAnglePreservation):
def construct(self):
self.add_transformable_plane()
self.plane.fade()
line_pairs = self.get_line_pairs()
line_pair = line_pairs[0]
for target_pair in line_pairs[1:]:
self.play(Transform(
line_pair, target_pair,
run_time = 2,
path_arc = np.pi
))
self.wait()
self.show_angle_preservation_between_lines(*line_pair)
self.show_example_analytic_functions()
def get_line_pairs(self):
return list(it.starmap(VGroup, [
(
Line(3*DOWN, 3*LEFT+2*UP),
Line(2*LEFT+DOWN, 3*UP+RIGHT)
),
(
Line(2*RIGHT+DOWN, 3*LEFT+2*UP),
Line(LEFT+3*DOWN, 4*RIGHT+3*UP),
),
(
Line(LEFT+3*DOWN, LEFT+3*UP),
Line(5*LEFT+UP, 3*RIGHT+UP)
),
(
Line(4*RIGHT+3*DOWN, RIGHT+2*UP),
Line(3*DOWN+RIGHT, 2*UP+2*RIGHT)
),
]))
def show_example_analytic_functions(self):
words = TextMobject("Examples of analytic functions:")
words.shift(2*UP)
words.set_color(YELLOW)
words.add_background_rectangle()
words.next_to(UP, UP).to_edge(LEFT)
functions = TextMobject(
"$e^x$, ",
"$\\sin(x)$, ",
"any polynomial, "
"$\\log(x)$, ",
"\\dots",
)
functions.next_to(ORIGIN, UP).to_edge(LEFT)
for function in functions:
function.add_to_back(BackgroundRectangle(function))
self.play(Write(words))
for function in functions:
self.play(FadeIn(function))
self.wait()
class NoteZetaFunctionAnalyticOnRightHalf(ZetaTransformationScene):
CONFIG = {
"anchor_density" : 35,
}
def construct(self):
self.add_title()
self.add_transformable_plane(animate = False)
self.add_extra_plane_lines_for_zeta(animate = True)
self.apply_zeta_function()
self.note_right_angles()
def add_title(self):
title = TexMobject(
"\\zeta(s) = \\sum_{n=1}^\\infty \\frac{1}{n^s}"
)
title[2].set_color(YELLOW)
title[-1].set_color(YELLOW)
title.add_background_rectangle()
title.to_corner(UP+LEFT)
self.add_foreground_mobjects(title)
def note_right_angles(self):
intersection_inputs = [
complex(x, y)
for x in np.arange(1+2./16, 1.4, 1./16)
for y in np.arange(-0.5, 0.5, 1./16)
if abs(y) > 1./16
]
brackets = VGroup(*list(map(
self.get_right_angle_bracket,
intersection_inputs
)))
self.play(ShowCreation(brackets, run_time = 3))
self.wait()
def get_right_angle_bracket(self, input_z):
output_z = zeta(input_z)
derivative = d_zeta(input_z)
rotation = np.log(derivative).imag
brackets = VGroup(
Line(RIGHT, RIGHT+UP),
Line(RIGHT+UP, UP)
)
brackets.scale(0.1)
brackets.set_stroke(width = 2)
brackets.set_color(YELLOW)
brackets.rotate(rotation, about_point = ORIGIN)
brackets.shift(self.z_to_point(output_z))
return brackets
class InfiniteContinuousJigsawPuzzle(ZetaTransformationScene):
CONFIG = {
"anchor_density" : 35,
}
def construct(self):
self.set_stage()
self.add_title()
self.show_jigsaw()
self.name_analytic_continuation()
def set_stage(self):
self.plane = self.get_dense_grid()
left_plane = self.get_reflected_plane()
self.plane.add(left_plane)
self.apply_zeta_function(run_time = 0)
self.remove(left_plane)
lines_per_piece = 5
pieces = [
VGroup(*left_plane[lines_per_piece*i:lines_per_piece*(i+1)])
for i in range(len(list(left_plane))/lines_per_piece)
]
random.shuffle(pieces)
self.pieces = pieces
def add_title(self):
title = TextMobject("Infinite ", "continuous ", "jigsaw puzzle")
title.scale(1.5)
title.to_edge(UP)
for word in title:
word.add_to_back(BackgroundRectangle(word))
self.play(FadeIn(word))
self.wait()
self.add_foreground_mobjects(title)
self.title = title
def show_jigsaw(self):
for piece in self.pieces:
self.play(FadeIn(piece, run_time = 0.5))
self.wait()
def name_analytic_continuation(self):
words = TextMobject("``Analytic continuation''")
words.set_color(YELLOW)
words.scale(1.5)
words.next_to(self.title, DOWN, buff = LARGE_BUFF)
words.add_background_rectangle()
self.play(Write(words))
self.wait()
class ThatsHowZetaIsDefined(TeacherStudentsScene):
def construct(self):
self.add_zeta_definition()
self.teacher_says("""
So that's how
$\\zeta(s)$ is defined
""")
self.change_student_modes(*["hooray"]*3)
self.random_blink(2)
def add_zeta_definition(self):
zeta = TexMobject(
"\\zeta(s) = \\sum_{n=1}^\\infty \\frac{1}{n^s}"
)
VGroup(zeta[2], zeta[-1]).set_color(YELLOW)
zeta.to_corner(UP+LEFT)
self.add(zeta)
class ManyIntersectingLinesPreZeta(ZetaTransformationScene):
CONFIG = {
"apply_zeta" : False,
"lines_center" : RIGHT,
"nudge_size" : 0.9,
"function" : zeta,
"angle" : np.pi/5,
"arc_scale_factor" : 0.3,
"shift_directions" : [LEFT, RIGHT],
}
def construct(self):
self.establish_plane()
self.add_title()
line = Line(DOWN+2*LEFT, UP+2*RIGHT)
lines = VGroup(line, line.copy().rotate(self.angle))
arc = Arc(start_angle = line.get_angle(), angle = self.angle)
arc.scale(self.arc_scale_factor)
arc.set_color(YELLOW)
lines.add(arc)
# lines.set_stroke(WHITE, width = 5)
lines.shift(self.lines_center + self.nudge_size*RIGHT)
if self.apply_zeta:
self.apply_zeta_function(run_time = 0)
lines.set_stroke(width = 0)
added_anims = self.get_modified_line_anims(lines)
for vect in self.shift_directions:
self.play(
ApplyMethod(lines.shift, 2*self.nudge_size*vect, path_arc = np.pi),
*added_anims,
run_time = 3
)
def establish_plane(self):
self.add_transformable_plane()
self.add_extra_plane_lines_for_zeta()
self.add_reflected_plane()
self.plane.fade()
def add_title(self):
if self.apply_zeta:
title = TextMobject("After \\\\ transformation")
else:
title = TextMobject("Before \\\\ transformation")
title.add_background_rectangle()
title.to_edge(UP)
self.add_foreground_mobjects(title)
def get_modified_line_anims(self, lines):
return []
class ManyIntersectingLinesPostZeta(ManyIntersectingLinesPreZeta):
CONFIG = {
"apply_zeta" : True,
# "anchor_density" : 5
}
def get_modified_line_anims(self, lines):
n_inserted_points = 30
new_lines = lines.copy()
new_lines.set_stroke(width = 5)
def update_new_lines(lines_to_update):
transformed = lines.copy()
self.prepare_for_transformation(transformed)
transformed.apply_complex_function(self.function)
transformed.make_smooth()
transformed.set_stroke(width = 5)
for start, end in zip(lines_to_update, transformed):
if start.get_num_points() > 0:
start.points = np.array(end.points)
return [UpdateFromFunc(new_lines, update_new_lines)]
class ManyIntersectingLinesPreSSquared(ManyIntersectingLinesPreZeta):
CONFIG = {
"x_min" : -int(FRAME_X_RADIUS),
"apply_zeta" : False,
"lines_center" : ORIGIN,
"nudge_size" : 0.9,
"function" : lambda z : z**2,
"shift_directions" : [LEFT, RIGHT, UP, DOWN, DOWN+LEFT, UP+RIGHT],
}
def establish_plane(self):
self.add_transformable_plane()
self.plane.fade()
def apply_zeta_function(self, **kwargs):
self.apply_complex_function(self.function, **kwargs)
class ManyIntersectingLinesPostSSquared(ManyIntersectingLinesPreSSquared):
CONFIG = {
"apply_zeta" : True,
}
def get_modified_line_anims(self, lines):
n_inserted_points = 30
new_lines = lines.copy()
new_lines.set_stroke(width = 5)
def update_new_lines(lines_to_update):
transformed = lines.copy()
self.prepare_for_transformation(transformed)
transformed.apply_complex_function(self.function)
transformed.make_smooth()
transformed.set_stroke(width = 5)
for start, end in zip(lines_to_update, transformed):
if start.get_num_points() > 0:
start.points = np.array(end.points)
return [UpdateFromFunc(new_lines, update_new_lines)]
class ButWhatIsTheExensions(TeacherStudentsScene):
def construct(self):
self.student_says(
"""
But what exactly \\emph{is}
that continuation?
""",
target_mode = "sassy"
)
self.change_student_modes("confused", "sassy", "confused")
self.random_blink(2)
self.teacher_says("""
You're $\\$1{,}000{,}000$ richer
if you can answer
that fully
""", target_mode = "shruggie")
self.change_student_modes(*["pondering"]*3)
self.random_blink(3)
class MathematiciansLookingAtFunctionEquation(Scene):
def construct(self):
equation = TexMobject(
"\\zeta(s)",
"= 2^s \\pi ^{s-1}",
"\\sin\\left(\\frac{\\pi s}{2}\\right)",
"\\Gamma(1-s)",
"\\zeta(1-s)",
)
equation.shift(UP)
mathy = Mathematician().to_corner(DOWN+LEFT)
mathys = VGroup(mathy)
for x in range(2):
mathys.add(Mathematician().next_to(mathys))
for mathy in mathys:
mathy.change_mode("pondering")
mathy.look_at(equation)
self.add(mathys)
self.play(Write(VGroup(*equation[:-1])))
self.play(Transform(
equation[0].copy(),
equation[-1],
path_arc = -np.pi/3,
run_time = 2
))
for mathy in mathys:
self.play(Blink(mathy))
self.wait()
class DiscussZeros(ZetaTransformationScene):
def construct(self):
self.establish_plane()
self.ask_about_zeros()
self.show_trivial_zeros()
self.show_critical_strip()
self.transform_bit_of_critical_line()
self.extend_transformed_critical_line()
def establish_plane(self):
self.add_transformable_plane()
self.add_extra_plane_lines_for_zeta()
self.add_reflected_plane()
self.plane.fade()
def ask_about_zeros(self):
dots = VGroup(*[
Dot(
(2+np.sin(12*alpha))*\
rotate_vector(RIGHT, alpha+nudge)
)
for alpha in np.arange(3*np.pi/20, 2*np.pi, 2*np.pi/5)
for nudge in [random.random()*np.pi/6]
])
dots.set_color(YELLOW)
q_marks = VGroup(*[
TexMobject("?").next_to(dot, UP)
for dot in dots
])
arrows = VGroup(*[
Arrow(dot, ORIGIN, buff = 0.2, tip_length = 0.1)
for dot in dots
])
question = TextMobject("Which numbers go to $0$?")
question.add_background_rectangle()
question.to_edge(UP)
for mob in dots, arrows, q_marks:
self.play(ShowCreation(mob))
self.play(Write(question))
self.wait(2)
dots.generate_target()
for i, dot in enumerate(dots.target):
dot.move_to(2*(i+1)*LEFT)
self.play(
FadeOut(arrows),
FadeOut(q_marks),
FadeOut(question),
MoveToTarget(dots),
)
self.wait()
self.dots = dots
def show_trivial_zeros(self):
trivial_zero_words = TextMobject("``Trivial'' zeros")
trivial_zero_words.next_to(ORIGIN, UP)
trivial_zero_words.to_edge(LEFT)
randy = Randolph().flip()
randy.to_corner(DOWN+RIGHT)
bubble = randy.get_bubble()
bubble.set_fill(BLACK, opacity = 0.8)
bubble.write("$1^1 + 2^2 + 3^2 + \\cdots = 0$")
bubble.resize_to_content()
bubble.pin_to(randy)
self.plane.save_state()
self.dots.save_state()
for dot in self.dots.target:
dot.move_to(ORIGIN)
self.apply_zeta_function(
added_anims = [MoveToTarget(self.dots, run_time = 3)],
run_time = 3
)
self.wait(3)
self.play(
self.plane.restore,
self.plane.make_smooth,
self.dots.restore,
run_time = 2
)
self.remove(*self.get_mobjects_from_last_animation())
self.plane.restore()
self.dots.restore()
self.add(self.plane, self.dots)
self.play(Write(trivial_zero_words))
self.wait()
self.play(FadeIn(randy))
self.play(
randy.change_mode, "confused",
ShowCreation(bubble),
Write(bubble.content)
)
self.play(Blink(randy))
self.wait()
self.play(Blink(randy))
self.play(*list(map(FadeOut, [
randy, bubble, bubble.content, trivial_zero_words
])))
def show_critical_strip(self):
strip = Rectangle(
height = FRAME_HEIGHT,
width = 1
)
strip.next_to(ORIGIN, RIGHT, buff = 0)
strip.set_stroke(width = 0)
strip.set_fill(YELLOW, opacity = 0.3)
name = TextMobject("Critical strip")
name.add_background_rectangle()
name.next_to(ORIGIN, LEFT)
name.to_edge(UP)
arrow = Arrow(name.get_bottom(), 0.5*RIGHT+UP)
primes = TexMobject("2, 3, 5, 7, 11, 13, 17, \\dots")
primes.to_corner(UP+RIGHT)
# photo = Square()
photo = ImageMobject("Riemann", invert = False)
photo.set_width(5)
photo.to_corner(UP+LEFT)
new_dots = VGroup(*[
Dot(0.5*RIGHT + y*UP)
for y in np.linspace(-2.5, 3.2, 5)
])
new_dots.set_color(YELLOW)
critical_line = Line(
0.5*RIGHT+FRAME_Y_RADIUS*DOWN,
0.5*RIGHT+FRAME_Y_RADIUS*UP,
color = YELLOW
)
self.give_dots_wandering_anims()
self.play(FadeIn(strip), *self.get_dot_wandering_anims())
self.play(
Write(name, run_time = 1),
ShowCreation(arrow),
*self.get_dot_wandering_anims()
)
self.play(*self.get_dot_wandering_anims())
self.play(
FadeIn(primes),
*self.get_dot_wandering_anims()
)
for x in range(7):
self.play(*self.get_dot_wandering_anims())
self.play(
GrowFromCenter(photo),
FadeOut(name),
FadeOut(arrow),
*self.get_dot_wandering_anims()
)
self.play(Transform(self.dots, new_dots))
self.play(ShowCreation(critical_line))
self.wait(3)
self.play(
photo.shift, 7*LEFT,
*list(map(FadeOut, [
primes, self.dots, strip
]))
)
self.remove(photo)
self.critical_line = critical_line
def give_dots_wandering_anims(self):
def func(t):
result = (np.sin(6*2*np.pi*t) + 1)*RIGHT/2
result += 3*np.cos(2*2*np.pi*t)*UP
return result
self.wandering_path = ParametricFunction(func)
for i, dot in enumerate(self.dots):
dot.target = dot.copy()
q_mark = TexMobject("?")
q_mark.next_to(dot.target, UP)
dot.target.add(q_mark)
dot.target.move_to(self.wandering_path.point_from_proportion(
(float(2+2*i)/(4*len(list(self.dots))))%1
))
self.dot_anim_count = 0
def get_dot_wandering_anims(self):
self.dot_anim_count += 1
if self.dot_anim_count == 1:
return list(map(MoveToTarget, self.dots))
denom = 4*(len(list(self.dots)))
def get_rate_func(index):
return lambda t : (float(self.dot_anim_count + 2*index + t)/denom)%1
return [
MoveAlongPath(
dot, self.wandering_path,
rate_func = get_rate_func(i)
)
for i, dot in enumerate(self.dots)
]
def transform_bit_of_critical_line(self):
self.play(
self.plane.scale, 0.8,
self.critical_line.scale, 0.8,
rate_func = there_and_back,
run_time = 2
)
self.wait()
self.play(
self.plane.set_stroke, GREY, 1,
Animation(self.critical_line)
)
self.plane.add(self.critical_line)
self.apply_zeta_function()
self.wait(2)
self.play(
self.plane.fade,
Animation(self.critical_line)
)
def extend_transformed_critical_line(self):
def func(t):
z = zeta(complex(0.5, t))
return z.real*RIGHT + z.imag*UP
full_line = VGroup(*[
ParametricFunction(func, t_min = t0, t_max = t0+1)
for t0 in range(100)
])
full_line.set_color_by_gradient(
YELLOW, BLUE, GREEN, RED, YELLOW, BLUE, GREEN, RED,
)
self.play(ShowCreation(full_line, run_time = 20, rate_func = None))
self.wait()
class AskAboutRelationToPrimes(TeacherStudentsScene):
def construct(self):
self.student_says("""
Whoa! Where the heck
do primes come in here?
""", target_mode = "confused")
self.random_blink(3)
self.teacher_says("""
Perhaps in a
different video.
""", target_mode = "hesitant")
self.random_blink(3)
class HighlightCriticalLineAgain(DiscussZeros):
def construct(self):
self.establish_plane()
title = TexMobject("\\zeta(", "s", ") = 0")
title.set_color_by_tex("s", YELLOW)
title.add_background_rectangle()
title.to_corner(UP+LEFT)
self.add(title)
strip = Rectangle(
height = FRAME_HEIGHT,
width = 1
)
strip.next_to(ORIGIN, RIGHT, buff = 0)
strip.set_stroke(width = 0)
strip.set_fill(YELLOW, opacity = 0.3)
line = Line(
0.5*RIGHT+FRAME_Y_RADIUS*UP,
0.5*RIGHT+FRAME_Y_RADIUS*DOWN,
color = YELLOW
)
randy = Randolph().to_corner(DOWN+LEFT)
million = TexMobject("\\$1{,}000{,}000")
million.set_color(GREEN_B)
million.next_to(ORIGIN, UP+LEFT)
million.shift(2*LEFT)
arrow1 = Arrow(million.get_right(), line.get_top())
arrow2 = Arrow(million.get_right(), line.get_bottom())
self.add(randy, strip)
self.play(Write(million))
self.play(
randy.change_mode, "pondering",
randy.look_at, line.get_top(),
ShowCreation(arrow1),
run_time = 3
)
self.play(
randy.look_at, line.get_bottom(),
ShowCreation(line),
Transform(arrow1, arrow2)
)
self.play(FadeOut(arrow1))
self.play(Blink(randy))
self.wait()
self.play(randy.look_at, line.get_center())
self.play(randy.change_mode, "confused")
self.play(Blink(randy))
self.wait()
self.play(randy.change_mode, "pondering")
self.wait()
class DiscussSumOfNaturals(Scene):
def construct(self):
title = TexMobject(
"\\zeta(s) = \\sum_{n=1}^\\infty \\frac{1}{n^s}"
)
VGroup(title[2], title[-1]).set_color(YELLOW)
title.to_corner(UP+LEFT)
neg_twelfth, eq, zeta_neg_1, sum_naturals = equation = TexMobject(
"-\\frac{1}{12}",
"=",
"\\zeta(-1)",
"= 1 + 2 + 3 + 4 + \\cdots"
)
neg_twelfth.set_color(GREEN_B)
VGroup(*zeta_neg_1[2:4]).set_color(YELLOW)
q_mark = TexMobject("?").next_to(sum_naturals[0], UP)
q_mark.set_color(RED)
randy = Randolph()
randy.to_corner(DOWN+LEFT)
analytic_continuation = TextMobject("Analytic continuation")
analytic_continuation.next_to(title, RIGHT, 3*LARGE_BUFF)
sum_to_zeta = Arrow(title.get_corner(DOWN+RIGHT), zeta_neg_1)
sum_to_ac = Arrow(title.get_right(), analytic_continuation)
ac_to_zeta = Arrow(analytic_continuation.get_bottom(), zeta_neg_1.get_top())
cross = TexMobject("\\times")
cross.scale(2)
cross.set_color(RED)
cross.rotate(np.pi/6)
cross.move_to(sum_to_zeta.get_center())
brace = Brace(VGroup(zeta_neg_1, sum_naturals))
words = TextMobject(
"If not equal, at least connected",
"\\\\(see links in description)"
)
words.next_to(brace, DOWN)
self.add(neg_twelfth, eq, zeta_neg_1, randy, title)
self.wait()
self.play(
Write(sum_naturals),
Write(q_mark),
randy.change_mode, "confused"
)
self.play(Blink(randy))
self.wait()
self.play(randy.change_mode, "angry")
self.play(
ShowCreation(sum_to_zeta),
Write(cross)
)
self.play(Blink(randy))
self.wait()
self.play(
Transform(sum_to_zeta, sum_to_ac),
FadeOut(cross),
Write(analytic_continuation),
randy.change_mode, "pondering",
randy.look_at, analytic_continuation,
)
self.play(ShowCreation(ac_to_zeta))
self.play(Blink(randy))
self.wait()
self.play(
GrowFromCenter(brace),
Write(words[0]),
randy.look_at, words[0],
)
self.wait()
self.play(FadeIn(words[1]))
self.play(Blink(randy))
self.wait()
class InventingMathPreview(Scene):
def construct(self):
rect = Rectangle(height = 9, width = 16)
rect.set_height(4)
title = TextMobject("What does it feel like to invent math?")
title.next_to(rect, UP)
sum_tex = TexMobject("1+2+4+8+\\cdots = -1")
sum_tex.set_width(rect.get_width()-1)
self.play(
ShowCreation(rect),
Write(title)
)
self.play(Write(sum_tex))
self.wait()
class FinalAnimationTease(Scene):
def construct(self):
morty = Mortimer().shift(2*(DOWN+RIGHT))
bubble = morty.get_bubble(SpeechBubble)
bubble.write("""
Want to know what
$\\zeta'(s)$ looks like?
""")
self.add(morty)
self.play(
morty.change_mode, "hooray",
morty.look_at, bubble.content,
ShowCreation(bubble),
Write(bubble.content)
)
self.play(Blink(morty))
self.wait()
class PatreonThanks(Scene):
CONFIG = {
"specific_patrons" : [
"CrypticSwarm",
"Ali Yahya",
"Damion Kistler",
"Juan Batiz-Benet",
"Yu Jun",
"Othman Alikhan",
"Markus Persson",
"Joseph John Cox",
"Luc Ritchie",
"Shimin Kuang",
"Einar Johansen",
"Rish Kundalia",
"Achille Brighton",
"Kirk Werklund",
"Ripta Pasay",
"Felipe Diniz",
]
}
def construct(self):
morty = Mortimer()
morty.next_to(ORIGIN, DOWN)
n_patrons = len(self.specific_patrons)
special_thanks = TextMobject("Special thanks to:")
special_thanks.set_color(YELLOW)
special_thanks.shift(3*UP)
patreon_logo = ImageMobject("patreon", invert = False)
patreon_logo.set_height(1.5)
patreon_logo.next_to(special_thanks, DOWN)
left_patrons = VGroup(*list(map(TextMobject,
self.specific_patrons[:n_patrons/2]
)))
right_patrons = VGroup(*list(map(TextMobject,
self.specific_patrons[n_patrons/2:]
)))
for patrons, vect in (left_patrons, LEFT), (right_patrons, RIGHT):
patrons.arrange_submobjects(DOWN, aligned_edge = LEFT)
patrons.next_to(special_thanks, DOWN)
patrons.to_edge(vect, buff = LARGE_BUFF)
self.add(patreon_logo)
self.play(morty.change_mode, "gracious")
self.play(Write(special_thanks, run_time = 1))
self.play(
Write(left_patrons),
morty.look_at, left_patrons
)
self.play(
Write(right_patrons),
morty.look_at, right_patrons
)
self.play(Blink(morty))
for patrons in left_patrons, right_patrons:
for index in 0, -1:
self.play(morty.look_at, patrons[index])
self.wait()
class CreditTwo(Scene):
def construct(self):
morty = Mortimer()
morty.next_to(ORIGIN, DOWN)
morty.to_edge(RIGHT)
brother = PiCreature(color = GOLD_E)
brother.next_to(morty, LEFT)
brother.look_at(morty.eyes)
headphones = Headphones(height = 1)
headphones.move_to(morty.eyes, aligned_edge = DOWN)
headphones.shift(0.1*DOWN)
url = TextMobject("www.audible.com/3blue1brown")
url.to_corner(UP+RIGHT, buff = LARGE_BUFF)
self.add(morty)
self.play(Blink(morty))
self.play(
FadeIn(headphones),
Write(url),
Animation(morty)
)
self.play(morty.change_mode, "happy")
for x in range(4):
self.wait()
self.play(Blink(morty))
self.wait()
self.play(
FadeIn(brother),
morty.look_at, brother.eyes
)
self.play(brother.change_mode, "surprised")
self.play(Blink(brother))
self.wait()
self.play(
morty.look, LEFT,
brother.change_mode, "happy",
brother.look, LEFT
)
for x in range(10):
self.play(Blink(morty))
self.wait()
self.play(Blink(brother))
self.wait()
class FinalAnimation(ZetaTransformationScene):
CONFIG = {
"min_added_anchors" : 100,
}
def construct(self):
self.add_transformable_plane()
self.add_extra_plane_lines_for_zeta()
self.add_reflected_plane()
title = TexMobject("s", "\\to \\frac{d\\zeta}{ds}(", "s", ")")
title.set_color_by_tex("s", YELLOW)
title.add_background_rectangle()
title.scale(1.5)
title.to_corner(UP+LEFT)
self.play(Write(title))
self.add_foreground_mobjects(title)
self.wait()
self.apply_complex_function(d_zeta, run_time = 8)
self.wait()
class Thumbnail(ZetaTransformationScene):
CONFIG = {
"anchor_density" : 35
}
def construct(self):
self.y_min = -4
self.y_max = 4
self.x_min = 1
self.x_max = int(FRAME_X_RADIUS+2)
self.add_transformable_plane()
self.add_extra_plane_lines_for_zeta()
self.add_reflected_plane()
# self.apply_zeta_function()
self.plane.set_stroke(width = 4)
div_sum = TexMobject("-\\frac{1}{12} = ", "1+2+3+4+\\cdots")
div_sum.set_width(FRAME_WIDTH-1)
div_sum.to_edge(DOWN)
div_sum.set_color(YELLOW)
div_sum.set_background_stroke(width=8)
# for mob in div_sum.submobjects:
# mob.add_to_back(BackgroundRectangle(mob))
zeta = TexMobject("\\zeta(s)")
zeta.set_height(FRAME_Y_RADIUS-1)
zeta.to_corner(UP+LEFT)
million = TexMobject("\\$1{,}000{,}000")
million.set_width(FRAME_X_RADIUS+1)
million.to_edge(UP+RIGHT)
million.set_color(GREEN_B)
million.set_background_stroke(width=8)
self.add(div_sum, million, zeta)
class ZetaPartialSums(ZetaTransformationScene):
CONFIG = {
"anchor_density" : 35,
"num_partial_sums" : 12,
}
def construct(self):
self.add_transformable_plane()
self.add_extra_plane_lines_for_zeta()
self.prepare_for_transformation(self.plane)
N_list = [2**k for k in range(self.num_partial_sums)]
sigma = TexMobject(
"\\sum_{n = 1}^N \\frac{1}{n^s}"
)
sigmas = []
for N in N_list + ["\\infty"]:
tex = TexMobject(str(N))
tex.set_color(YELLOW)
new_sigma = sigma.copy()
top = new_sigma[0]
tex.move_to(top, DOWN)
new_sigma.remove(top)
new_sigma.add(tex)
new_sigma.to_corner(UP+LEFT)
sigmas.append(new_sigma)
def get_partial_sum_func(n_terms):
return lambda s : sum([1./(n**s) for n in range(1, n_terms+1)])
interim_planes = [
self.plane.copy().apply_complex_function(
get_partial_sum_func(N)
)
for N in N_list
]
interim_planes.append(self.plane.copy().apply_complex_function(zeta))
symbol = VGroup(TexMobject("s"))
symbol.scale(2)
symbol.set_color(YELLOW)
symbol.to_corner(UP+LEFT)
for plane, sigma in zip(interim_planes, sigmas):
self.play(
Transform(self.plane, plane),
Transform(symbol, sigma)
)
self.wait()
|
from django.db import models
from django.utils import timezone
from datetime import date
import shared
# Create your models here.
## EP Project
class Project(models.Model):
jobnumber = models.CharField(max_length=15, default='', unique=True)
projectname = models.CharField(max_length=100, default='')
projectmanager = models.CharField(max_length=25, default='', choices=shared.PROJECT_MANAGERS)
projectdescription = models.CharField(max_length=1000, default='', blank=True)
client = models.CharField(max_length=30, default='', blank=True)
county = models.CharField(max_length=15, default='', choices=shared.COUNTY_NAMES)
# relatedprojects = models.ManyToManyField('self', null=True)
env_cert_row = models.DateField(null=True, blank=True)
env_cert_let = models.DateField(null=True, blank=True)
row_auth = models.DateField(null=True, blank=True)
let_cert = models.DateField(null=True, blank=True)
pfpr = models.DateField(null=True, blank=True)
ffpr = models.DateField(null=True, blank=True)
comments = models.CharField(max_length=2000, default='', blank=True)
def is_complete():
pass
def gdot_district(self):
if self.county:
return '{}'.format(shared.COUNTIES[self.county])
else:
return 'Unassigned'
def __str__(self):
return self.jobnumber
class PINumbers(models.Model):
projects = models.ManyToManyField(Project, related_name='pis')
pi_number = models.CharField(max_length=7, null=True, unique=True)
class Meta:
verbose_name_plural = 'PI Numbers'
def __str__(self):
return self.pi_number
class ProjectNumbers(models.Model):
projects = models.ManyToManyField(Project, related_name='projectnumbers')
project_number = models.CharField(max_length=20, null=True, unique=True)
class Meta:
verbose_name_plural = 'Project Numbers'
def __str__(self):
return self.project_number
class SpecialStudy(models.Model):
""" Base class for special studies documents """
project = models.ForeignKey(Project, default='')
specialist = models.CharField(max_length=50, default='', choices=shared.EMPLOYEES)
gdot_specialist = models.CharField(max_length=50, default='')
title = models.CharField(max_length=50, default='')
documenttype = models.CharField(max_length=15, default='')
draftsubmittal = models.DateField(null=True, blank=True)
draftapproval = models.DateField(null=True, blank=True)
duedate = models.DateField(null=True, blank=True)
comments = models.CharField(max_length=1000, default='', blank=True)
class Meta:
abstract = True
def __str__(self):
return '{}'.format(self.documenttype)
class Nepa(models.Model):
project = models.ForeignKey(Project, default='')
specialist = models.CharField(max_length=50, default='', choices=shared.NEPA_PLANNERS)
stateplanner = models.CharField(max_length=50, default='')
documenttype = models.CharField(max_length=15, default='', choices=shared.ENVIRONMENTAL_DOCUMENTS)
##Submittals
earlycoordination = models.DateField(null=True, blank=True)
statedraft = models.DateField(null=True, blank=True)
stateapproval = models.DateField(null=True, blank=True)
fhwadraft = models.DateField(null=True, blank=True)
fhwaapproval = models.DateField(null=True, blank=True)
##Due Dates
statedraftdue = models.DateField(null=True, blank=True)
fhwadraftdue = models.DateField(null=True, blank=True)
comments = models.CharField(max_length=1000, default='', blank=True)
def is_gepa(self):
if 'GEPA' in self.documenttype:
return True
return False
def statedraft_due_in(self):
if self.stateapproval:
return 'Approved'
if self.statedraftdue:
date_diff = self.statedraftdue - date.today()
if not date_diff:
return 'Due Today'
days = '{}'.format(date_diff)
days_stripped = days.replace(', 0:00:00', '')
return days_stripped
return 'No Date'
def fhwadraft_due_in(self):
if 'GEPA' in self.documenttype:
return 'Not Applicable'
if self.fhwaapproval:
return 'Approved'
if self.fhwadraftdue:
date_diff = self.fhwadraftdue - date.today()
if not date_diff:
return 'Due Today'
days = '{}'.format(self.fhwadraftdue - date.today())
days_stripped = days.replace(', 0:00:00', '')
return days_stripped
return 'No Date'
def __str__(self):
return '{}'.format(self.documenttype)
class Air(SpecialStudy):
pass
class Noise(SpecialStudy):
pass
class Ecology(SpecialStudy):
pass
class Aquatics(SpecialStudy):
pass
class Archaeology(SpecialStudy):
pass
class History(SpecialStudy):
pass
|
#!/usr/bin/python
from __future__ import print_function
import sys
import rubrik_cdm
import getopt
import getpass
import urllib3
urllib3.disable_warnings()
def usage():
sys.stderr.write("Usage: rbk_share_grab.py [-h] [-c creds] [-p protocol] [-t token] [-o outfile] rubrik\n")
sys.stderr.write("-h | --help: Prints this message\n")
sys.stderr.write("-c | --creds : Enter cluster credentials on the CLI [user:password]\n")
sys.stderr.write("-p | --protocol : Only grab shares of the given protocol [NFS | SMB]\n")
sys.stderr.write("-t | --token : Authenticate via token\n")
sys.stderr.write("-o | --output : Write output to a file\n")
sys.stderr.write("rubrik : Hostname or IP of a Rubrik Cluster\n")
exit(0)
def python_input(message):
if int(sys.version[0]) > 2:
value = input(message)
else:
value = raw_input(message)
return(value)
if __name__ == "__main__":
user = ""
password = ""
token = ""
protocol = ""
outfile = ""
timeout = 60
optlist, args = getopt.getopt(sys.argv[1:], 'c:t:p:ho:', ['creds=', 'token=', 'protocol=', 'help', 'output='])
for opt, a in optlist:
if opt in ('-c', '--creds'):
(user, password) = a.split(':')
if opt in ('-t', '--token'):
token = a
if opt in ('-p', '--protocol'):
protocol = a.upper()
if opt in ('-h', '--help'):
usage()
if opt in ('-o', '--output'):
outfile = a
try:
rubrik_node = args[0]
except:
usage()
if not user:
user = python_input("User: ")
if not password:
password = getpass.getpass("Password: ")
if token != "":
rubrik = rubrik_cdm.Connect(rubrik_node, api_token=token)
else:
rubrik = rubrik_cdm.Connect(rubrik_node, user, password)
hs_data = rubrik.get('internal', '/host/share', timeout=timeout)
if outfile:
fp = open(outfile, "w")
for hs in hs_data['data']:
if protocol != "" and protocol != hs['shareType']:
continue
if hs['status'] != "REPLICATION_TARGET":
if outfile:
fp.write(hs['hostname'] + ":" + hs['exportPoint'] + "\n")
else:
print(hs['hostname'] + ":" + hs['exportPoint'])
if outfile:
fp.close()
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'includes': [
'breakpad_sender.gypi',
'breakpad_handler.gypi',
],
'conditions': [
# minidump_stackwalk and minidump_dump are tool-type executables that do
# not build on iOS with Xcode (but do build on iOS with ninja.)
['(OS!="ios" or "<(GENERATOR)"!="xcode" or "<(GENERATOR_FLAVOR)"=="ninja") and OS!="win"', {
'targets': [
{
# code shared by both {micro,mini}dump_stackwalk
# GN version: //breakpad:stackwalk_common
'target_name': 'stackwalk_common',
'type': 'static_library',
'includes': ['breakpad_tools.gypi'],
'defines': ['BPLOG_MINIMUM_SEVERITY=SEVERITY_ERROR'],
'sources': [
'src/processor/basic_code_module.h',
'src/processor/basic_code_modules.cc',
'src/processor/basic_code_modules.h',
'src/processor/basic_source_line_resolver.cc',
'src/processor/call_stack.cc',
'src/processor/cfi_frame_info.cc',
'src/processor/cfi_frame_info.h',
'src/processor/disassembler_x86.cc',
'src/processor/disassembler_x86.h',
'src/processor/dump_context.cc',
'src/processor/dump_object.cc',
'src/processor/logging.cc',
'src/processor/logging.h',
'src/processor/pathname_stripper.cc',
'src/processor/pathname_stripper.h',
'src/processor/process_state.cc',
'src/processor/proc_maps_linux.cc',
'src/processor/simple_symbol_supplier.cc',
'src/processor/simple_symbol_supplier.h',
'src/processor/source_line_resolver_base.cc',
'src/processor/stack_frame_cpu.cc',
'src/processor/stack_frame_symbolizer.cc',
'src/processor/stackwalk_common.cc',
'src/processor/stackwalker.cc',
'src/processor/stackwalker_amd64.cc',
'src/processor/stackwalker_amd64.h',
'src/processor/stackwalker_arm.cc',
'src/processor/stackwalker_arm.h',
'src/processor/stackwalker_arm64.cc',
'src/processor/stackwalker_arm64.h',
'src/processor/stackwalker_mips.cc',
'src/processor/stackwalker_mips.h',
'src/processor/stackwalker_ppc.cc',
'src/processor/stackwalker_ppc.h',
'src/processor/stackwalker_ppc64.cc',
'src/processor/stackwalker_ppc64.h',
'src/processor/stackwalker_sparc.cc',
'src/processor/stackwalker_sparc.h',
'src/processor/stackwalker_x86.cc',
'src/processor/stackwalker_x86.h',
'src/processor/tokenize.cc',
'src/processor/tokenize.h',
# libdisasm
'src/third_party/libdisasm/ia32_implicit.c',
'src/third_party/libdisasm/ia32_implicit.h',
'src/third_party/libdisasm/ia32_insn.c',
'src/third_party/libdisasm/ia32_insn.h',
'src/third_party/libdisasm/ia32_invariant.c',
'src/third_party/libdisasm/ia32_invariant.h',
'src/third_party/libdisasm/ia32_modrm.c',
'src/third_party/libdisasm/ia32_modrm.h',
'src/third_party/libdisasm/ia32_opcode_tables.c',
'src/third_party/libdisasm/ia32_opcode_tables.h',
'src/third_party/libdisasm/ia32_operand.c',
'src/third_party/libdisasm/ia32_operand.h',
'src/third_party/libdisasm/ia32_reg.c',
'src/third_party/libdisasm/ia32_reg.h',
'src/third_party/libdisasm/ia32_settings.c',
'src/third_party/libdisasm/ia32_settings.h',
'src/third_party/libdisasm/libdis.h',
'src/third_party/libdisasm/qword.h',
'src/third_party/libdisasm/x86_disasm.c',
'src/third_party/libdisasm/x86_format.c',
'src/third_party/libdisasm/x86_imm.c',
'src/third_party/libdisasm/x86_imm.h',
'src/third_party/libdisasm/x86_insn.c',
'src/third_party/libdisasm/x86_misc.c',
'src/third_party/libdisasm/x86_operand_list.c',
'src/third_party/libdisasm/x86_operand_list.h',
],
'conditions': [
['OS=="ios"', {
'toolsets': ['host'],
}],
],
},
{
# GN version: //breakpad:microdump_stackwalk
'target_name': 'microdump_stackwalk',
'type': 'executable',
'dependencies': ['stackwalk_common'],
'includes': ['breakpad_tools.gypi'],
'defines': ['BPLOG_MINIMUM_SEVERITY=SEVERITY_ERROR'],
'sources': [
'src/processor/microdump.cc',
'src/processor/microdump_processor.cc',
'src/processor/microdump_stackwalk.cc',
],
'conditions': [
['OS=="ios"', {
'toolsets': ['host'],
}],
],
},
{
# GN version: //breakpad:minidump_stackwalk
'target_name': 'minidump_stackwalk',
'type': 'executable',
'dependencies': ['stackwalk_common'],
'includes': ['breakpad_tools.gypi'],
'defines': ['BPLOG_MINIMUM_SEVERITY=SEVERITY_ERROR'],
'sources': [
'src/processor/exploitability.cc',
'src/processor/exploitability_linux.cc',
'src/processor/exploitability_linux.h',
'src/processor/exploitability_win.cc',
'src/processor/exploitability_win.h',
'src/processor/minidump.cc',
'src/processor/minidump_processor.cc',
'src/processor/minidump_stackwalk.cc',
'src/processor/symbolic_constants_win.cc',
'src/processor/symbolic_constants_win.h',
],
'conditions': [
['OS=="ios"', {
'toolsets': ['host'],
}],
],
},
{
# GN version: //breakpad:minidump_dump
'target_name': 'minidump_dump',
'type': 'executable',
'includes': ['breakpad_tools.gypi'],
'sources': [
'src/processor/basic_code_module.h',
'src/processor/basic_code_modules.cc',
'src/processor/basic_code_modules.h',
'src/processor/dump_context.cc',
'src/processor/dump_object.cc',
'src/processor/logging.cc',
'src/processor/logging.h',
'src/processor/minidump.cc',
'src/processor/minidump_dump.cc',
'src/processor/pathname_stripper.cc',
'src/processor/pathname_stripper.h',
'src/processor/proc_maps_linux.cc',
],
'conditions': [
['OS=="ios"', {
'toolsets': ['host'],
}],
],
},
],
}],
['OS=="mac" or (OS=="ios" and ("<(GENERATOR)"!="xcode" or "<(GENERATOR_FLAVOR)"=="ninja"))', {
'target_defaults': {
'include_dirs': [
'src',
],
'configurations': {
'Debug_Base': {
'defines': [
# This is needed for GTMLogger to work correctly.
'DEBUG',
],
},
},
},
'targets': [
{
# GN version: //breakpad:dump_syms
'target_name': 'dump_syms',
'type': 'executable',
'toolsets': ['host'],
'include_dirs': [
'src/common/mac',
],
'sources': [
'src/common/dwarf/bytereader.cc',
'src/common/dwarf_cfi_to_module.cc',
'src/common/dwarf_cu_to_module.cc',
'src/common/dwarf/dwarf2diehandler.cc',
'src/common/dwarf/dwarf2reader.cc',
'src/common/dwarf_line_to_module.cc',
'src/common/language.cc',
'src/common/mac/arch_utilities.cc',
'src/common/mac/arch_utilities.h',
'src/common/mac/dump_syms.cc',
'src/common/mac/file_id.cc',
'src/common/mac/macho_id.cc',
'src/common/mac/macho_reader.cc',
'src/common/mac/macho_utilities.cc',
'src/common/mac/macho_walker.cc',
'src/common/md5.cc',
'src/common/module.cc',
'src/common/stabs_reader.cc',
'src/common/stabs_to_module.cc',
'src/tools/mac/dump_syms/dump_syms_tool.cc',
],
'defines': [
# For src/common/stabs_reader.h.
'HAVE_MACH_O_NLIST_H',
],
'xcode_settings': {
# Like ld, dump_syms needs to operate on enough data that it may
# actually need to be able to address more than 4GB. Use x86_64.
# Don't worry! An x86_64 dump_syms is perfectly able to dump
# 32-bit files.
'ARCHS': [
'x86_64',
],
# The DWARF utilities require -funsigned-char.
'GCC_CHAR_IS_UNSIGNED_CHAR': 'YES',
# dwarf2reader.cc uses dynamic_cast.
'GCC_ENABLE_CPP_RTTI': 'YES',
},
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/Foundation.framework',
],
},
'configurations': {
'Release_Base': {
'xcode_settings': {
# dump_syms crashes when built at -O1, -O2, and -O3. It does
# not crash at -Os. To play it safe, dump_syms is always built
# at -O0 until this can be sorted out.
# http://code.google.com/p/google-breakpad/issues/detail?id=329
'GCC_OPTIMIZATION_LEVEL': '0', # -O0
},
},
},
},
{
# GN version: //breakpad:symupload
'target_name': 'symupload',
'type': 'executable',
'toolsets': ['host'],
'include_dirs': [
'src/common/mac',
],
'sources': [
'src/common/mac/HTTPMultipartUpload.m',
'src/tools/mac/symupload/symupload.m',
],
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/Foundation.framework',
],
}
},
],
}],
['OS=="mac"', {
'target_defaults': {
'include_dirs': [
'src',
],
'configurations': {
'Debug_Base': {
'defines': [
# This is needed for GTMLogger to work correctly.
'DEBUG',
],
},
},
},
'targets': [
{
# GN version: //breakpad:utilities
'target_name': 'breakpad_utilities',
'type': 'static_library',
'sources': [
'src/client/mac/crash_generation/ConfigFile.mm',
'src/client/mac/handler/breakpad_nlist_64.cc',
'src/client/mac/handler/dynamic_images.cc',
'src/client/mac/handler/minidump_generator.cc',
'src/client/minidump_file_writer.cc',
'src/common/convert_UTF.c',
'src/common/mac/MachIPC.mm',
'src/common/mac/arch_utilities.cc',
'src/common/mac/bootstrap_compat.cc',
'src/common/mac/file_id.cc',
'src/common/mac/launch_reporter.cc',
'src/common/mac/macho_id.cc',
'src/common/mac/macho_utilities.cc',
'src/common/mac/macho_walker.cc',
'src/common/mac/string_utilities.cc',
'src/common/md5.cc',
'src/common/simple_string_dictionary.cc',
'src/common/string_conversion.cc',
],
},
{
# GN version: //breakpad:crash_inspector
'target_name': 'crash_inspector',
'type': 'executable',
'variables': {
'mac_real_dsym': 1,
},
'dependencies': [
'breakpad_utilities',
],
'include_dirs': [
'src/client/apple/Framework',
'src/common/mac',
],
'sources': [
'src/client/mac/crash_generation/Inspector.mm',
'src/client/mac/crash_generation/InspectorMain.mm',
],
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/CoreServices.framework',
'$(SDKROOT)/System/Library/Frameworks/Foundation.framework',
],
}
},
{
# GN version: //breakpad:crash_report_sender
'target_name': 'crash_report_sender',
'type': 'executable',
'mac_bundle': 1,
'variables': {
'mac_real_dsym': 1,
},
'include_dirs': [
'src/common/mac',
],
'sources': [
'src/common/mac/HTTPMultipartUpload.m',
'src/client/mac/sender/crash_report_sender.m',
'src/client/mac/sender/uploader.mm',
'src/common/mac/GTMLogger.m',
],
'mac_bundle_resources': [
'src/client/mac/sender/English.lproj/Localizable.strings',
'src/client/mac/sender/crash_report_sender.icns',
'src/client/mac/sender/Breakpad.xib',
'src/client/mac/sender/crash_report_sender-Info.plist',
],
'mac_bundle_resources!': [
'src/client/mac/sender/crash_report_sender-Info.plist',
],
'xcode_settings': {
'INFOPLIST_FILE': 'src/client/mac/sender/crash_report_sender-Info.plist',
},
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/AppKit.framework',
'$(SDKROOT)/System/Library/Frameworks/Foundation.framework',
'$(SDKROOT)/System/Library/Frameworks/SystemConfiguration.framework',
],
}
},
{
# GN version: //breakpad
'target_name': 'breakpad',
'type': 'static_library',
'dependencies': [
'breakpad_utilities',
'crash_inspector',
'crash_report_sender',
],
'include_dirs': [
'src/client/apple/Framework',
],
'direct_dependent_settings': {
'include_dirs': [
'src/client/apple/Framework',
],
},
'defines': [
'USE_PROTECTED_ALLOCATIONS=1',
],
'sources': [
'src/client/mac/crash_generation/crash_generation_client.cc',
'src/client/mac/crash_generation/crash_generation_client.h',
'src/client/mac/handler/protected_memory_allocator.cc',
'src/client/mac/handler/exception_handler.cc',
'src/client/mac/Framework/Breakpad.mm',
'src/client/mac/Framework/OnDemandServer.mm',
],
},
],
}],
[ 'OS=="linux" or OS=="android" or os_bsd==1', {
'conditions': [
['OS=="android"', {
'defines': [
'__ANDROID__',
],
}],
],
# Tools needed for archiving build symbols.
'targets': [
{
# GN version: //breakpad:symupload
'target_name': 'symupload',
'type': 'executable',
'includes': ['breakpad_tools.gypi'],
'sources': [
'src/tools/linux/symupload/sym_upload.cc',
'src/common/linux/http_upload.cc',
'src/common/linux/http_upload.h',
],
'include_dirs': [
'src',
'src/third_party',
],
'link_settings': {
'libraries': [
'-ldl',
],
},
},
{
# GN version: //breakpad:dump_syms
'target_name': 'dump_syms',
'type': 'executable',
'toolsets': ['host'],
# dwarf2reader.cc uses dynamic_cast. Because we don't typically
# don't support RTTI, we enable it for this single target. Since
# dump_syms doesn't share any object files with anything else,
# this doesn't end up polluting Chrome itself.
'cflags_cc!': ['-fno-rtti'],
'sources': [
'src/common/dwarf/bytereader.cc',
'src/common/dwarf_cfi_to_module.cc',
'src/common/dwarf_cfi_to_module.h',
'src/common/dwarf_cu_to_module.cc',
'src/common/dwarf_cu_to_module.h',
'src/common/dwarf/dwarf2diehandler.cc',
'src/common/dwarf/dwarf2reader.cc',
'src/common/dwarf_line_to_module.cc',
'src/common/dwarf_line_to_module.h',
'src/common/language.cc',
'src/common/language.h',
'src/common/linux/crc32.cc',
'src/common/linux/crc32.h',
'src/common/linux/dump_symbols.cc',
'src/common/linux/dump_symbols.h',
'src/common/linux/elf_symbols_to_module.cc',
'src/common/linux/elf_symbols_to_module.h',
'src/common/linux/elfutils.cc',
'src/common/linux/elfutils.h',
'src/common/linux/file_id.cc',
'src/common/linux/file_id.h',
'src/common/linux/linux_libc_support.cc',
'src/common/linux/linux_libc_support.h',
'src/common/linux/memory_mapped_file.cc',
'src/common/linux/memory_mapped_file.h',
'src/common/linux/guid_creator.h',
'src/common/module.cc',
'src/common/module.h',
'src/common/stabs_reader.cc',
'src/common/stabs_reader.h',
'src/common/stabs_to_module.cc',
'src/common/stabs_to_module.h',
'src/tools/linux/dump_syms/dump_syms.cc',
],
# Breakpad rev 583 introduced this flag.
# Using this define, stabs_reader.h will include a.out.h to
# build on Linux.
'defines': [
'HAVE_A_OUT_H',
],
'include_dirs': [
'src',
'..',
],
},
{
# GN version: //breakpad:client
'target_name': 'breakpad_client',
'type': 'static_library',
'sources': [
'src/client/linux/crash_generation/crash_generation_client.cc',
'src/client/linux/crash_generation/crash_generation_client.h',
'src/client/linux/handler/exception_handler.cc',
'src/client/linux/handler/exception_handler.h',
'src/client/linux/handler/minidump_descriptor.cc',
'src/client/linux/handler/minidump_descriptor.h',
'src/client/linux/log/log.cc',
'src/client/linux/log/log.h',
'src/client/linux/dump_writer_common/mapping_info.h',
'src/client/linux/dump_writer_common/thread_info.cc',
'src/client/linux/dump_writer_common/thread_info.h',
'src/client/linux/dump_writer_common/ucontext_reader.cc',
'src/client/linux/dump_writer_common/ucontext_reader.h',
'src/client/linux/microdump_writer/microdump_writer.cc',
'src/client/linux/microdump_writer/microdump_writer.h',
'src/client/linux/minidump_writer/cpu_set.h',
'src/client/linux/minidump_writer/directory_reader.h',
'src/client/linux/minidump_writer/line_reader.h',
'src/client/linux/minidump_writer/linux_core_dumper.cc',
'src/client/linux/minidump_writer/linux_core_dumper.h',
'src/client/linux/minidump_writer/linux_dumper.cc',
'src/client/linux/minidump_writer/linux_dumper.h',
'src/client/linux/minidump_writer/linux_ptrace_dumper.cc',
'src/client/linux/minidump_writer/linux_ptrace_dumper.h',
'src/client/linux/minidump_writer/minidump_writer.cc',
'src/client/linux/minidump_writer/minidump_writer.h',
'src/client/linux/minidump_writer/proc_cpuinfo_reader.h',
'src/client/minidump_file_writer-inl.h',
'src/client/minidump_file_writer.cc',
'src/client/minidump_file_writer.h',
'src/common/convert_UTF.c',
'src/common/convert_UTF.h',
'src/common/linux/elf_core_dump.cc',
'src/common/linux/elf_core_dump.h',
'src/common/linux/elfutils.cc',
'src/common/linux/elfutils.h',
'src/common/linux/file_id.cc',
'src/common/linux/file_id.h',
'src/common/linux/google_crashdump_uploader.cc',
'src/common/linux/google_crashdump_uploader.h',
'src/common/linux/guid_creator.cc',
'src/common/linux/guid_creator.h',
'src/common/linux/libcurl_wrapper.cc',
'src/common/linux/libcurl_wrapper.h',
'src/common/linux/linux_libc_support.cc',
'src/common/linux/linux_libc_support.h',
'src/common/linux/memory_mapped_file.cc',
'src/common/linux/memory_mapped_file.h',
'src/common/linux/safe_readlink.cc',
'src/common/linux/safe_readlink.h',
'src/common/memory.h',
'src/common/simple_string_dictionary.cc',
'src/common/simple_string_dictionary.h',
'src/common/string_conversion.cc',
'src/common/string_conversion.h',
],
'conditions': [
['target_arch=="arm" and chromeos==1', {
# Avoid running out of registers in
# linux_syscall_support.h:sys_clone()'s inline assembly.
'cflags': ['-marm'],
}],
['OS=="android"', {
'include_dirs': [
'src/common/android/include',
],
'direct_dependent_settings': {
'include_dirs': [
'src/common/android/include',
],
},
'sources': [
'src/common/android/breakpad_getcontext.S',
],
}],
['OS!="android"', {
'link_settings': {
'libraries': [
# In case of Android, '-ldl' is added in common.gypi, since it
# is needed for stlport_static. For LD, the order of libraries
# is important, and thus we skip to add it here.
'-ldl',
],
},
}],
['clang==1 and target_arch=="ia32"', {
'cflags!': [
# Clang's -mstackrealign doesn't work well with
# linux_syscall_support.h hand written asm syscalls.
# See https://crbug.com/556393
'-mstackrealign',
],
}],
],
'include_dirs': [
'src',
'src/client',
'src/third_party/linux/include',
'..',
'.',
],
},
{
# Breakpad r693 uses some files from src/processor in unit tests.
# GN version: //breakpad:processor_support
'target_name': 'breakpad_processor_support',
'type': 'static_library',
'sources': [
'src/common/scoped_ptr.h',
'src/processor/basic_code_modules.cc',
'src/processor/basic_code_modules.h',
'src/processor/dump_context.cc',
'src/processor/dump_object.cc',
'src/processor/logging.cc',
'src/processor/logging.h',
'src/processor/minidump.cc',
'src/processor/pathname_stripper.cc',
'src/processor/pathname_stripper.h',
'src/processor/proc_maps_linux.cc',
],
'include_dirs': [
'src',
'src/client',
'src/third_party/linux/include',
'..',
'.',
],
},
{
# GN version: //breakpad:breakpad_unittests
'target_name': 'breakpad_unittests',
'type': 'executable',
'dependencies': [
'../testing/gtest.gyp:gtest',
'../testing/gtest.gyp:gtest_main',
'../testing/gmock.gyp:gmock',
'breakpad_client',
'breakpad_processor_support',
'linux_dumper_unittest_helper',
],
'variables': {
'clang_warning_flags': [
# See http://crbug.com/138571#c18
'-Wno-unused-value',
],
},
'sources': [
'linux/breakpad_googletest_includes.h',
'src/client/linux/handler/exception_handler_unittest.cc',
'src/client/linux/minidump_writer/cpu_set_unittest.cc',
'src/client/linux/minidump_writer/directory_reader_unittest.cc',
'src/client/linux/minidump_writer/line_reader_unittest.cc',
'src/client/linux/minidump_writer/linux_core_dumper_unittest.cc',
'src/client/linux/minidump_writer/linux_ptrace_dumper_unittest.cc',
'src/client/linux/minidump_writer/minidump_writer_unittest.cc',
'src/client/linux/minidump_writer/minidump_writer_unittest_utils.cc',
'src/client/linux/minidump_writer/proc_cpuinfo_reader_unittest.cc',
'src/common/linux/elf_core_dump_unittest.cc',
'src/common/linux/file_id_unittest.cc',
'src/common/linux/linux_libc_support_unittest.cc',
'src/common/linux/synth_elf.cc',
'src/common/linux/tests/auto_testfile.h',
'src/common/linux/tests/crash_generator.cc',
'src/common/linux/tests/crash_generator.h',
'src/common/memory_range.h',
'src/common/memory_unittest.cc',
'src/common/simple_string_dictionary_unittest.cc',
'src/common/test_assembler.cc',
'src/common/tests/file_utils.cc',
'src/common/tests/file_utils.h',
'src/tools/linux/md2core/minidump_memory_range.h',
'src/tools/linux/md2core/minidump_memory_range_unittest.cc',
],
'include_dirs': [
'linux', # Use our copy of breakpad_googletest_includes.h
'src',
'..',
'.',
],
'conditions': [
['OS=="android"', {
'libraries': [
'-llog',
],
'include_dirs': [
'src/common/android/include',
],
'sources': [
'src/common/android/breakpad_getcontext_unittest.cc',
],
'variables': {
'test_type': 'gtest',
'test_suite_name': '<(_target_name)',
'isolate_file': 'breakpad_unittests.isolate',
},
'includes': [ '../build/android/test_runner.gypi' ],
}],
['clang==1 and target_arch=="ia32"', {
'cflags!': [
# Clang's -mstackrealign doesn't work well with
# linux_syscall_support.h hand written asm syscalls.
# See https://crbug.com/556393
'-mstackrealign',
],
}],
],
},
{
# GN version: //breakpad:linux_dumper_unittest_helper
'target_name': 'linux_dumper_unittest_helper',
'type': 'executable',
'dependencies': [
'breakpad_processor_support',
],
'sources': [
'src/client/linux/minidump_writer/linux_dumper_unittest_helper.cc',
],
'include_dirs': [
'src',
'..',
],
'conditions': [
['target_arch=="mipsel" and OS=="android"', {
'include_dirs': [
'src/common/android/include',
],
}],
],
},
{
# GN version: //breakpad:generate_test_dump
'target_name': 'generate_test_dump',
'type': 'executable',
'sources': [
'linux/generate-test-dump.cc',
],
'dependencies': [
'breakpad_client',
],
'include_dirs': [
'..',
'src',
],
'conditions': [
['OS=="android"', {
'libraries': [
'-llog',
],
'include_dirs': [
'src/common/android/include',
],
}],
],
},
{
# GN version: //breakpad:minidump-2-core
'target_name': 'minidump-2-core',
'type': 'executable',
'sources': [
'src/tools/linux/md2core/minidump-2-core.cc'
],
'dependencies': [
'breakpad_client',
],
'include_dirs': [
'..',
'src',
],
},
{
# GN version: //breakpad:core-2-minidump
'target_name': 'core-2-minidump',
'type': 'executable',
'sources': [
'src/tools/linux/core2md/core2md.cc'
],
'dependencies': [
'breakpad_client',
],
'include_dirs': [
'..',
'src',
],
},
],
}],
['OS=="ios"', {
'targets': [
{
# GN version: //breakpad:client
'target_name': 'breakpad_client',
'type': 'static_library',
'sources': [
'src/client/ios/Breakpad.h',
'src/client/ios/Breakpad.mm',
'src/client/ios/BreakpadController.h',
'src/client/ios/BreakpadController.mm',
'src/client/ios/handler/ios_exception_minidump_generator.mm',
'src/client/ios/handler/ios_exception_minidump_generator.h',
'src/client/mac/crash_generation/ConfigFile.h',
'src/client/mac/crash_generation/ConfigFile.mm',
'src/client/mac/handler/breakpad_nlist_64.cc',
'src/client/mac/handler/breakpad_nlist_64.h',
'src/client/mac/handler/dynamic_images.cc',
'src/client/mac/handler/dynamic_images.h',
'src/client/mac/handler/protected_memory_allocator.cc',
'src/client/mac/handler/protected_memory_allocator.h',
'src/client/mac/handler/exception_handler.cc',
'src/client/mac/handler/exception_handler.h',
'src/client/mac/handler/minidump_generator.cc',
'src/client/mac/handler/minidump_generator.h',
'src/client/mac/sender/uploader.h',
'src/client/mac/sender/uploader.mm',
'src/client/minidump_file_writer.cc',
'src/client/minidump_file_writer.h',
'src/client/minidump_file_writer-inl.h',
'src/common/convert_UTF.c',
'src/common/convert_UTF.h',
'src/common/mac/file_id.cc',
'src/common/mac/file_id.h',
'src/common/mac/HTTPMultipartUpload.m',
'src/common/mac/macho_id.cc',
'src/common/mac/macho_id.h',
'src/common/mac/macho_utilities.cc',
'src/common/mac/macho_utilities.h',
'src/common/mac/macho_walker.cc',
'src/common/mac/macho_walker.h',
'src/common/mac/string_utilities.cc',
'src/common/mac/string_utilities.h',
'src/common/md5.cc',
'src/common/md5.h',
'src/common/simple_string_dictionary.cc',
'src/common/simple_string_dictionary.h',
'src/common/string_conversion.cc',
'src/common/string_conversion.h',
'src/google_breakpad/common/minidump_format.h',
],
'include_dirs': [
'src',
'src/client/mac/Framework',
'src/common/mac',
],
'direct_dependent_settings': {
'include_dirs': [
'src',
],
},
}
]
}],
['OS=="ios" and "<(GENERATOR)"=="xcode" and "<(GENERATOR_FLAVOR)"!="ninja"', {
'variables': {
'ninja_output_dir': 'ninja-breakpad',
'ninja_product_dir':
'<(DEPTH)/xcodebuild/<(ninja_output_dir)/<(CONFIGURATION_NAME)',
},
# Generation is done via two actions: (1) compiling the executable with
# ninja, and (2) copying the executable into a location that is shared
# with other projects. These actions are separated into two targets in
# order to be able to specify that the second action should not run until
# the first action finishes (since the ordering of multiple actions in
# one target is defined only by inputs and outputs, and it's impossible
# to set correct inputs for the ninja build, so setting all the inputs
# and outputs isn't an option).
'targets': [
{
'target_name': 'compile_breakpad_utilities',
'type': 'none',
'variables': {
# Gyp to rerun
're_run_targets': [
'breakpad/breakpad.gyp',
],
},
'includes': ['../build/ios/mac_build.gypi'],
'actions': [
{
'action_name': 'compile breakpad utilities',
'inputs': [],
'outputs': [],
'action': [
'<@(ninja_cmd)',
'dump_syms',
'symupload',
],
'message': 'Generating the breakpad executables',
},
],
},
{
'target_name': 'breakpad_utilities',
'type': 'none',
'dependencies': [
'compile_breakpad_utilities',
],
'actions': [
{
'action_name': 'copy dump_syms',
'inputs': [
'<(ninja_product_dir)/dump_syms',
],
'outputs': [
'<(PRODUCT_DIR)/dump_syms',
],
'action': [
'cp',
'<(ninja_product_dir)/dump_syms',
'<(PRODUCT_DIR)/dump_syms',
],
},
{
'action_name': 'copy symupload',
'inputs': [
'<(ninja_product_dir)/symupload',
],
'outputs': [
'<(PRODUCT_DIR)/symupload',
],
'action': [
'cp',
'<(ninja_product_dir)/symupload',
'<(PRODUCT_DIR)/symupload',
],
},
],
},
{
'target_name': 'dump_syms',
'type': 'none',
'dependencies': [
'breakpad_utilities',
],
},
{
'target_name': 'symupload',
'type': 'none',
'dependencies': [
'breakpad_utilities',
],
}
],
}],
['OS=="android"', {
'targets': [
{
'target_name': 'breakpad_unittests_stripped',
'type': 'none',
'dependencies': [ 'breakpad_unittests' ],
'actions': [{
'action_name': 'strip breakpad_unittests',
'inputs': [ '<(PRODUCT_DIR)/breakpad_unittests' ],
'outputs': [ '<(PRODUCT_DIR)/breakpad_unittests_stripped' ],
'action': [ '<(android_strip)', '<@(_inputs)', '-o', '<@(_outputs)' ],
}],
},
{
'target_name': 'breakpad_unittests_deps',
'type': 'none',
'dependencies': [
'breakpad_unittests_stripped',
],
# For the component build, ensure dependent shared libraries are
# stripped and put alongside breakpad_unittest to simplify pushing to
# the device.
'variables': {
'output_dir': '<(PRODUCT_DIR)/breakpad_unittests_deps/',
'native_binary': '<(PRODUCT_DIR)/breakpad_unittests_stripped',
'include_main_binary': 0,
},
'includes': [
'../build/android/native_app_dependencies.gypi'
],
}
],
'conditions': [
['test_isolation_mode != "noop"',
{
'targets': [
{
'target_name': 'breakpad_unittests_apk_run',
'type': 'none',
'dependencies': [
'breakpad_unittests',
],
'includes': [
'../build/isolate.gypi',
],
'sources': [
'breakpad_unittests_apk.isolate',
],
},
]
}
],
],
}],
],
}
|
import copy
import numpy as np
from dowhy.causal_refuter import CausalRefutation
from dowhy.causal_refuter import CausalRefuter
class PlaceboTreatmentRefuter(CausalRefuter):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._placebo_type = kwargs["placebo_type"]
def refute_estimate(self):
num_rows = self._data.shape[0]
if self._placebo_type == "permute":
new_treatment = self._data[self._treatment_name].sample(frac=1).values
else:
new_treatment = np.random.randn(num_rows)
new_data = self._data.assign(placebo=new_treatment)
self.logger.debug(new_data[0:10])
estimator_class = self._estimate.params['estimator_class']
identified_estimand = copy.deepcopy(self._target_estimand)
identified_estimand.treatment_variable = "placebo"
new_estimator = estimator_class(
new_data,
identified_estimand,
"placebo", self._outcome_name,
test_significance=None
)
new_effect = new_estimator.estimate_effect()
refute = CausalRefutation(self._estimate.value, new_effect.value,
refutation_type="Refute: Use a Placebo Treatment")
return refute
|
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Note: This package is not named "flask" because of
# https://github.com/PyCQA/pylint/issues/2648
"""
This library builds on the OpenTelemetry WSGI middleware to track web requests
in Flask applications. In addition to opentelemetry-util-http, it
supports Flask-specific features such as:
* The Flask url rule pattern is used as the Span name.
* The ``http.route`` Span attribute is set so that one can see which URL rule
matched a request.
Usage
-----
.. code-block:: python
from flask import Flask
from opentelemetry.instrumentation.flask import FlaskInstrumentor
app = Flask(__name__)
FlaskInstrumentor().instrument_app(app)
@app.route("/")
def hello():
return "Hello!"
if __name__ == "__main__":
app.run(debug=True)
API
---
"""
from logging import getLogger
import flask
import opentelemetry.instrumentation.wsgi as otel_wsgi
from opentelemetry import context, trace
from opentelemetry.instrumentation.flask.version import __version__
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
from opentelemetry.instrumentation.propagators import (
get_global_response_propagator,
)
from opentelemetry.propagate import extract
from opentelemetry.semconv.trace import SpanAttributes
from opentelemetry.util._time import _time_ns
from opentelemetry.util.http import get_excluded_urls
_logger = getLogger(__name__)
_ENVIRON_STARTTIME_KEY = "opentelemetry-flask.starttime_key"
_ENVIRON_SPAN_KEY = "opentelemetry-flask.span_key"
_ENVIRON_ACTIVATION_KEY = "opentelemetry-flask.activation_key"
_ENVIRON_TOKEN = "opentelemetry-flask.token"
_excluded_urls = get_excluded_urls("FLASK")
def get_default_span_name():
span_name = ""
try:
span_name = flask.request.url_rule.rule
except AttributeError:
span_name = otel_wsgi.get_default_span_name(flask.request.environ)
return span_name
def _rewrapped_app(wsgi_app):
def _wrapped_app(wrapped_app_environ, start_response):
# We want to measure the time for route matching, etc.
# In theory, we could start the span here and use
# update_name later but that API is "highly discouraged" so
# we better avoid it.
wrapped_app_environ[_ENVIRON_STARTTIME_KEY] = _time_ns()
def _start_response(status, response_headers, *args, **kwargs):
if not _excluded_urls.url_disabled(flask.request.url):
span = flask.request.environ.get(_ENVIRON_SPAN_KEY)
propagator = get_global_response_propagator()
if propagator:
propagator.inject(
response_headers,
setter=otel_wsgi.default_response_propagation_setter,
)
if span:
otel_wsgi.add_response_attributes(
span, status, response_headers
)
else:
_logger.warning(
"Flask environ's OpenTelemetry span "
"missing at _start_response(%s)",
status,
)
return start_response(status, response_headers, *args, **kwargs)
return wsgi_app(wrapped_app_environ, _start_response)
return _wrapped_app
def _wrapped_before_request(name_callback):
def _before_request():
if _excluded_urls.url_disabled(flask.request.url):
return
flask_request_environ = flask.request.environ
span_name = name_callback()
token = context.attach(
extract(flask_request_environ, getter=otel_wsgi.wsgi_getter)
)
tracer = trace.get_tracer(__name__, __version__)
span = tracer.start_span(
span_name,
kind=trace.SpanKind.SERVER,
start_time=flask_request_environ.get(_ENVIRON_STARTTIME_KEY),
)
if span.is_recording():
attributes = otel_wsgi.collect_request_attributes(
flask_request_environ
)
if flask.request.url_rule:
# For 404 that result from no route found, etc, we
# don't have a url_rule.
attributes[
SpanAttributes.HTTP_ROUTE
] = flask.request.url_rule.rule
for key, value in attributes.items():
span.set_attribute(key, value)
activation = trace.use_span(span, end_on_exit=True)
activation.__enter__() # pylint: disable=E1101
flask_request_environ[_ENVIRON_ACTIVATION_KEY] = activation
flask_request_environ[_ENVIRON_SPAN_KEY] = span
flask_request_environ[_ENVIRON_TOKEN] = token
return _before_request
def _teardown_request(exc):
# pylint: disable=E1101
if _excluded_urls.url_disabled(flask.request.url):
return
activation = flask.request.environ.get(_ENVIRON_ACTIVATION_KEY)
if not activation:
# This request didn't start a span, maybe because it was created in a
# way that doesn't run `before_request`, like when it is created with
# `app.test_request_context`.
return
if exc is None:
activation.__exit__(None, None, None)
else:
activation.__exit__(
type(exc), exc, getattr(exc, "__traceback__", None)
)
context.detach(flask.request.environ.get(_ENVIRON_TOKEN))
class _InstrumentedFlask(flask.Flask):
name_callback = get_default_span_name
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._original_wsgi_ = self.wsgi_app
self.wsgi_app = _rewrapped_app(self.wsgi_app)
_before_request = _wrapped_before_request(
_InstrumentedFlask.name_callback
)
self._before_request = _before_request
self.before_request(_before_request)
self.teardown_request(_teardown_request)
class FlaskInstrumentor(BaseInstrumentor):
# pylint: disable=protected-access,attribute-defined-outside-init
"""An instrumentor for flask.Flask
See `BaseInstrumentor`
"""
def _instrument(self, **kwargs):
self._original_flask = flask.Flask
name_callback = kwargs.get("name_callback")
if callable(name_callback):
_InstrumentedFlask.name_callback = name_callback
flask.Flask = _InstrumentedFlask
def instrument_app(
self, app, name_callback=get_default_span_name
): # pylint: disable=no-self-use
if not hasattr(app, "_is_instrumented"):
app._is_instrumented = False
if not app._is_instrumented:
app._original_wsgi_app = app.wsgi_app
app.wsgi_app = _rewrapped_app(app.wsgi_app)
_before_request = _wrapped_before_request(name_callback)
app._before_request = _before_request
app.before_request(_before_request)
app.teardown_request(_teardown_request)
app._is_instrumented = True
else:
_logger.warning(
"Attempting to instrument Flask app while already instrumented"
)
def _uninstrument(self, **kwargs):
flask.Flask = self._original_flask
def uninstrument_app(self, app): # pylint: disable=no-self-use
if not hasattr(app, "_is_instrumented"):
app._is_instrumented = False
if app._is_instrumented:
app.wsgi_app = app._original_wsgi_app
# FIXME add support for other Flask blueprints that are not None
app.before_request_funcs[None].remove(app._before_request)
app.teardown_request_funcs[None].remove(_teardown_request)
del app._original_wsgi_app
app._is_instrumented = False
else:
_logger.warning(
"Attempting to uninstrument Flask "
"app while already uninstrumented"
)
|
def fit(X_train, y_train):
from sklearn.metrics import accuracy_score
import pandas as pd
data = X_train.copy()
assert len(data) == len(y_train)
y_train = pd.DataFrame(list(y_train))
data.index = range(len(data))
concated_data = pd.concat([data,y_train],axis=1)
majority_dict = {}
list_majority_dict = []
predictions = []
concated_data_copy = concated_data.copy()
for i in concated_data.columns[:-1]:
len_i = len(concated_data[i].value_counts())
for n in range(len_i):
temp_dict = {}
element = concated_data[i].value_counts().index[n]
temp_data = concated_data[concated_data[i].isin([element])]
if len(temp_data.iloc[:,-1].mode()) > 0:
majority_class = temp_data.iloc[:,-1].mode()[0]
else:
majority_class = temp_data.iloc[:,-1].mode()
temp_dict = {element: majority_class}
concated_data_copy[i] = concated_data_copy[i].map(temp_dict)
majority_dict.update(temp_dict)
list_majority_dict.append(majority_dict)
for ind in concated_data_copy.index:
row_prediction = concated_data_copy.iloc[ind].mode()[0]
predictions.append(row_prediction)
return accuracy_score(y_train, predictions), majority_dict
def predict(X_test,y_test,majority_dict):
import pandas as pd
from sklearn.metrics import accuracy_score
predictions = []
data = X_test.copy()
data.index = range(len(data))
for i in data.columns:
data[i] = data[i].map(majority_dict)
for ind in data.index:
row_prediction = data.iloc[ind].mode()[0]
predictions.append(row_prediction)
return accuracy_score(y_test, predictions)
|
"""
Pomice
~~~~~~
The modern Lavalink wrapper designed for discord.py.
:copyright: 2021, cloudwithax
:license: GPL-3.0
"""
import discord
if not discord.__version__.startswith("2"):
class DiscordPyOutdated(Exception):
pass
raise DiscordPyOutdated(
"You need the 'discord' library to use this library"
)
__version__ = "1.1.7"
__title__ = "pomice"
__author__ = "cloudwithax"
from .enums import SearchType
from .events import *
from .exceptions import *
from .filters import *
from .objects import *
from .player import Player
from .pool import *
|
from django.contrib import admin
from .models import Fixation, FixationAnswer, FixationQuestion, FixationSession, FixationSessionPlayer, FixationSessionSettings
# Register your models here.
admin.site.register(Fixation)
admin.site.register(FixationQuestion)
admin.site.register(FixationAnswer)
admin.site.register(FixationSession)
admin.site.register(FixationSessionPlayer)
admin.site.register(FixationSessionSettings)
|
"""
PERIODS
"""
numPeriods = 180
"""
STOPS
"""
numStations = 13
station_names = (
"Hamburg Hbf", # 0
"Landwehr", # 1
"Hasselbrook", # 2
"Wansbeker Chaussee*", # 3
"Friedrichsberg*", # 4
"Barmbek*", # 5
"Alte Woehr (Stadtpark)", # 6
"Ruebenkamp (City Nord)", # 7
"Ohlsdorf*", # 8
"Kornweg", # 9
"Hoheneichen", # 10
"Wellingsbuettel", # 11
"Poppenbuettel*", # 12
)
numStops = 26
stops_position = (
(0, 0), # Stop 0
(2, 0), # Stop 1
(3, 0), # Stop 2
(4, 0), # Stop 3
(5, 0), # Stop 4
(6, 0), # Stop 5
(7, 0), # Stop 6
(8, 0), # Stop 7
(9, 0), # Stop 8
(11, 0), # Stop 9
(13, 0), # Stop 10
(14, 0), # Stop 11
(15, 0), # Stop 12
(15, 1), # Stop 13
(15, 1), # Stop 14
(13, 1), # Stop 15
(12, 1), # Stop 16
(11, 1), # Stop 17
(10, 1), # Stop 18
(9, 1), # Stop 19
(8, 1), # Stop 20
(7, 1), # Stop 21
(6, 1), # Stop 22
(4, 1), # Stop 23
(2, 1), # Stop 24
(1, 1), # Stop 25
)
stops_distance = (
(0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), # Stop 0
(0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), # Stop 1
(0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), # Stop 2
(0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), # Stop 3
(0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), # Stop 4
(0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), # Stop 5
(0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), # Stop 6
(0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), # Stop 7
(0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), # Stop 8
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), # Stop 9
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), # Stop 10
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), # Stop 11
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), # Stop 12
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), # Stop 13
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), # Stop 14
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0), # Stop 15
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0), # Stop 16
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0), # Stop 17
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0), # Stop 18
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0), # Stop 19
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0), # Stop 20
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0), # Stop 21
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0), # Stop 22
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0), # Stop 23
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2), # Stop 24
(1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), # Stop 25
)
station_start = 0
"""
TRAMS
"""
numTrams = 18
tram_capacity = 514
tram_capacity_cargo = 304
tram_capacity_min_passenger = 208
tram_capacity_min_cargo = 0
tram_speed = 1
tram_headway = 1
tram_min_service = 1
tram_max_service = 10
min_time_next_tram = 0.333
tram_travel_deviation = 0.167
"""
PASSENGERS
"""
passenger_set = "pas-20210422-1717-int6000000000000001e-1"
passenger_service_time_board = 0.0145
passenger_service_time_alight = 0.0145
"""
CARGO
"""
numCargo = 10
cargo_size = 4
cargo_station_destination = (
5, # 0
12, # 1
8, # 2
3, # 3
12, # 4
4, # 5
12, # 6
12, # 7
12, # 8
3, # 9
)
cargo_release = (
24, # 0
25, # 1
26, # 2
33, # 3
34, # 4
37, # 5
57, # 6
70, # 7
70, # 8
71, # 9
)
cargo_station_deadline = (
126, # 0
91, # 1
36, # 2
163, # 3
108, # 4
47, # 5
139, # 6
169, # 7
80, # 8
129, # 9
)
cargo_max_delay = 3
cargo_service_time_load = 0.3333333333333333
cargo_service_time_unload = 0.25
"""
parameters for reproducibiliy. More information: https://numpy.org/doc/stable/reference/random/parallel.html
"""
#initial entropy
entropy = 8991598675325360468762009371570610170
#index for seed sequence child
child_seed_index = (
0, # 0
)
|
import datetime
import os
from scrappers.scrappers.config.config import configuration
current_date = datetime.datetime.now().date()
def write_image(func):
"""A decorator definition that writes images to
a default directory.
"""
default_path = configuration.default_output_dir
def writer(self, data, celebrity=None, *args):
new_file_name = f'{celebrity}_{current_date.year}_{current_date.month}'
if data:
with open(os.path.join(default_path, new_file_name), 'wb') as f:
print('[IMAGE]: Creating image %s' % new_file_name)
f.write(data)
return writer
|
"""
Copyright (C) 2017-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging as log
import numpy as np
from mo.front.caffe.extractors.utils import get_canonical_axis_index
from mo.graph.graph import Node, Graph
from mo.ops.op import Op, PermuteAttrs
class ArgMaxOp(Op):
op = 'ArgMax'
def __init__(self, graph: Graph, attrs: dict):
mandatory_props = {
'type': __class__.op,
'op': __class__.op,
'infer': ArgMaxOp.argmax_infer,
'in_ports_count': 2,
'out_ports_count': 1,
}
super().__init__(graph, mandatory_props, attrs)
def supported_attrs(self):
return [
'out_max_val',
'top_k',
'axis',
]
@staticmethod
def argmax_infer(node: Node):
shape = node.in_node(0).shape
if shape is None:
return
# there are two inputs in TensorFlow. The second input is the axis for ArgMax
if len(node.in_nodes()) == 2:
if node.in_node(1).value is None:
log.debug('The second argument to ArgMax is None')
return
node.axis = node.in_node(1).value.item()
# remove the unnecessary input
node.graph.remove_edge(node.in_node(1).id, node.id)
num_top_axes = shape.size
if num_top_axes < 3:
num_top_axes = 3
out_shape = np.ones(num_top_axes, dtype=int)
if node.has_valid('axis'):
axis = get_canonical_axis_index(shape, node.axis)
node.axis = axis
out_shape = np.array(shape)
out_shape[axis] = node.top_k
PermuteAttrs.create_permute_attrs(node, attrs=[('axis', 'input:0')])
else:
out_shape[0] = shape[0]
out_shape[2] = node.top_k
if node.out_max_val:
out_shape[1] = 2
node.out_node().shape = out_shape
|
from datetime import datetime
import pytest
from pygrocy.data_models.chore import AssignmentType, Chore, PeriodType
from pygrocy.data_models.user import User
from pygrocy.errors.grocy_error import GrocyError
class TestChores:
@pytest.mark.vcr
def test_get_chores_valid(self, grocy):
chores = grocy.chores(get_details=True)
assert isinstance(chores, list)
assert len(chores) == 6
for chore in chores:
assert isinstance(chore, Chore)
assert isinstance(chore.id, int)
assert isinstance(chore.last_tracked_time, datetime)
assert isinstance(chore.next_estimated_execution_time, datetime)
assert isinstance(chore.name, str)
assert isinstance(chore.last_done_by, User)
chore = next(chore for chore in chores if chore.id == 6)
assert chore.name == "Change the bed sheets"
assert chore.period_config == "monday"
@pytest.mark.vcr
def test_get_chore_details(self, grocy):
chore_details = grocy.chore(3)
assert isinstance(chore_details, Chore)
assert chore_details.name == "Take out the trash"
assert chore_details.assignment_type == AssignmentType.RANDOM
assert chore_details.last_done_by.id == 1
assert chore_details.period_type == PeriodType.HOURLY
assert chore_details.period_days is None
assert chore_details.period_config is None
assert chore_details.track_date_only is True
assert chore_details.rollover is False
assert chore_details.assignment_config == "1,2,3,4"
assert chore_details.next_execution_assigned_user.id == 1
assert chore_details.next_execution_assigned_to_user_id == 1
assert chore_details.userfields is None
@pytest.mark.vcr
def test_execute_chore_valid(self, grocy):
result = grocy.execute_chore(1)
assert not isinstance(result, GrocyError)
@pytest.mark.vcr
def test_execute_chore_valid_with_data(self, grocy):
result = grocy.execute_chore(1, done_by=1, tracked_time=datetime.now())
assert not isinstance(result, GrocyError)
@pytest.mark.vcr
def test_execute_chore_invalid(self, grocy):
with pytest.raises(GrocyError) as exc_info:
grocy.execute_chore(1000)
error = exc_info.value
assert error.status_code == 400
|
# coding: utf-8
"""
=======================================================
Feature Extraction for Denoising: Clean and Noisy Audio
=======================================================
Extract acoustic features from clean and noisy datasets for
training a denoising model, e.g. a denoising autoencoder.
To see how soundpy implements this, see `soundpy.builtin.denoiser_feats`.
"""
###############################################################################################
#
#####################################################################
import os, sys
import inspect
currentdir = os.path.dirname(os.path.abspath(
inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
parparentdir = os.path.dirname(parentdir)
packagedir = os.path.dirname(parparentdir)
sys.path.insert(0, packagedir)
import soundpy as sp
import IPython.display as ipd
package_dir = '../../../'
os.chdir(package_dir)
sp_dir = package_dir
######################################################
# Prepare for Extraction: Data Organization
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
######################################################
# I will use a mini denoising dataset as an example
# Example noisy data:
data_noisy_dir = '{}../mini-audio-datasets/denoise/noisy'.format(sp_dir)
# Example clean data:
data_clean_dir = '{}../mini-audio-datasets/denoise/clean'.format(sp_dir)
# Where to save extracted features:
data_features_dir = './audiodata/example_feats_models/denoiser/'
######################################################
# Choose Feature Type
# ~~~~~~~~~~~~~~~~~~~
# We can extract 'mfcc', 'fbank', 'powspec', and 'stft'.
# if you are working with speech, I suggest 'fbank', 'powspec', or 'stft'.
feature_type = 'stft'
sr = 22050
######################################################
# Set Duration of Audio
# ~~~~~~~~~~~~~~~~~~~~~
# How much audio in seconds used from each audio file.
# the speech samples are about 3 seconds long.
dur_sec = 3
#######################################################################
# Option 1: Built-In Functionality: soundpy does everything for you
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
############################################################
# Define which data to use and which features to extract.
# NOTE: beacuse of the very small dataset, will set
# `perc_train` to a lower level than 0.8. (Otherwise, will raise error)
# Everything else is based on defaults. A feature folder with
# the feature data will be created in the current working directory.
# (Although, you can set this under the parameter `data_features_dir`)
# `visualize` saves periodic images of the features extracted.
# This is useful if you want to know what's going on during the process.
perc_train = 0.6 # with larger datasets this would be around 0.8
extraction_dir = sp.denoiser_feats(
data_clean_dir = data_clean_dir,
data_noisy_dir = data_noisy_dir,
sr = sr,
feature_type = feature_type,
dur_sec = dur_sec,
perc_train = perc_train,
visualize=True);
extraction_dir
################################################################
# The extracted features, extraction settings applied, and
# which audio files were assigned to which datasets
# will be saved in the `extraction_dir` directory
############################################################
# Logged Information
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Let's have a look at the files in the extraction_dir. The files ending
# with .npy extension contain the feature data; the .csv files contain
# logged information.
featfiles = list(extraction_dir.glob('*.*'))
for f in featfiles:
print(f.name)
############################################################
# Feature Settings
# ~~~~~~~~~~~~~~~~~~
# Since much was conducted behind the scenes, it's nice to know how the features
# were extracted, for example, the sample rate and number of frequency bins applied, etc.
feat_settings = sp.utils.load_dict(
extraction_dir.joinpath('log_extraction_settings.csv'))
for key, value in feat_settings.items():
print(key, ' ---> ', value)
|
from busy_beaver.apps.upcoming_events.workflow import generate_upcoming_events_message
from busy_beaver.toolbox.slack_block_kit import Block, Divider, Section
APP_HOME_HEADER_INSTALLED = (
"*Welcome!* Busy Beaver is a community engagement bot.\n\n"
"Join <#{channel}> to see daily GitHub summaries for registered users. "
"Wanna join the fun? `/busybeaver connect` to link your GitHub account!"
)
APP_HOME_HEADER = (
"*Welcome!* Busy Beaver is a community engagement bot.\n\n"
"Please contact the Slack workspace admin to complete installation."
)
class AppHome:
def __init__(self, *, channel=None, meetup_group=None):
if channel:
header = APP_HOME_HEADER_INSTALLED.format(channel=channel)
else:
header = APP_HOME_HEADER
blocks = [Section(header)]
if meetup_group:
blocks.extend([Divider(), Section("\n\n\n\n")])
blocks.extend(generate_upcoming_events_message(meetup_group, count=5))
self.blocks = blocks
def __repr__(self): # pragma: no cover
return "<AppHome>"
def __len__(self):
return len(self.blocks)
def __getitem__(self, i):
return self.blocks[i]
def to_dict(self) -> dict:
blocks = [
block.to_dict() if isinstance(block, Block) else block
for block in self.blocks
]
return {"type": "home", "blocks": blocks}
|
#
# CSE.py
#
# (c) 2020 by Andreas Kraft
# License: BSD 3-Clause License. See the LICENSE file for further details.
#
# Container that holds references to instances of various managing entities.
#
import atexit, argparse, os, threading, time
from Constants import Constants as C
from AnnouncementManager import AnnouncementManager
from Configuration import Configuration, defaultConfigFile
from Dispatcher import Dispatcher
from EventManager import EventManager
from GroupManager import GroupManager
from HttpServer import HttpServer
from Importer import Importer
from Logging import Logging
from NotificationManager import NotificationManager
from RegistrationManager import RegistrationManager
from RemoteCSEManager import RemoteCSEManager
from SecurityManager import SecurityManager
from Statistics import Statistics
from Storage import Storage
from AEStatistics import AEStatistics
from CSENode import CSENode
# singleton main components. These variables will hold all the various manager
# components that are used throughout the CSE implementation.
announce = None
dispatcher = None
event = None
group = None
httpServer = None
notification = None
registration = None
remote = None
security = None
statistics = None
storage = None
rootDirectory = None
aeCSENode = None
aeStatistics = None
appsStarted = False
aeStartupDelay = 5 # seconds
# TODO make AE registering a bit more generic
##############################################################################
#def startup(args=None, configfile=None, resetdb=None, loglevel=None):
def startup(args, **kwargs):
global announce, dispatcher, group, httpServer, notification, registration, remote, security, statistics, storage, event
global rootDirectory
global aeStatistics
rootDirectory = os.getcwd() # get the root directory
# Handle command line arguments and load the configuration
if args is None:
args = argparse.Namespace() # In case args is None create a new args object and populate it
args.configfile = None
args.resetdb = False
args.loglevel = None
for key, value in kwargs.items():
args.__setattr__(key, value)
if not Configuration.init(args):
return
# init Logging
Logging.init()
Logging.log('============')
Logging.log('Starting CSE')
Logging.log('CSE-Type: %s' % C.cseTypes[Configuration.get('cse.type')])
Logging.log(Configuration.print())
# Initiatlize the resource storage
storage = Storage()
# Initialize the event manager
event = EventManager()
# Initialize the statistics system
statistics = Statistics()
# Initialize the registration manager
registration = RegistrationManager()
# Initialize the resource dispatcher
dispatcher = Dispatcher()
# Initialize the security manager
security = SecurityManager()
# Initialize the HTTP server
httpServer = HttpServer()
# Initialize the notification manager
notification = NotificationManager()
# Initialize the announcement manager
announce = AnnouncementManager()
# Initialize the group manager
group = GroupManager()
# Import a default set of resources, e.g. the CSE, first ACP or resource structure
importer = Importer()
if not importer.importResources():
return
# Initialize the remote CSE manager
remote = RemoteCSEManager()
remote.start()
# Start AEs
startAppsDelayed() # the Apps are actually started after the CSE finished the startup
# Start the HTTP server
event.cseStartup()
Logging.log('CSE started')
httpServer.run() # This does NOT return
# Gracefully shutdown the CSE, e.g. when receiving a keyboard interrupt
@atexit.register
def shutdown():
if appsStarted:
stopApps()
if remote is not None:
remote.shutdown()
if group is not None:
group.shutdown()
if announce is not None:
announce.shutdown()
if notification is not None:
notification.shutdown()
if dispatcher is not None:
dispatcher.shutdown()
if security is not None:
security.shutdown()
if registration is not None:
registration.shutdown()
if statistics is not None:
statistics.shutdown()
if event is not None:
event.shutdown()
if storage is not None:
storage.shutdown()
# Delay starting the AEs in the backround. This is needed because the CSE
# has not yet started. This will be called when the cseStartup event is raised.
def startAppsDelayed():
event.addHandler(event.cseStartup, startApps)
def startApps():
global appsStarted, aeStatistics, aeCSENode
if not Configuration.get('cse.enableApplications'):
return
time.sleep(aeStartupDelay)
Logging.log('Starting Apps')
appsStarted = True
if Configuration.get('app.csenode.enable'):
aeCSENode = CSENode()
if Configuration.get('app.statistics.enable'):
aeStatistics = AEStatistics()
# Add more apps here
def stopApps():
global appsStarted
if appsStarted:
Logging.log('Stopping Apps')
appsStarted = False
if aeStatistics is not None:
aeStatistics.shutdown()
if aeCSENode is not None:
aeCSENode.shutdown()
|
# https://github.com/andy-gh/prettyjson/blob/master/prettyjson.py
def prettyjson(obj, indent=2, maxlinelength=80):
"""Renders JSON content with indentation and line splits/concatenations to fit maxlinelength.
Only dicts, lists and basic types are supported"""
items, _ = getsubitems(obj, itemkey="", islast=True, maxlinelength=maxlinelength - indent, indent=indent)
return indentitems(items, indent, level=0)
def getsubitems(obj, itemkey, islast, maxlinelength, indent):
items = []
is_inline = True # at first, assume we can concatenate the inner tokens into one line
isdict = isinstance(obj, dict)
islist = isinstance(obj, list)
istuple = isinstance(obj, tuple)
isbasictype = not (isdict or islist or istuple)
maxlinelength = max(0, maxlinelength)
# build json content as a list of strings or child lists
if isbasictype:
# render basic type
keyseparator = "" if itemkey == "" else ": "
itemseparator = "" if islast else ","
items.append(itemkey + keyseparator + basictype2str(obj) + itemseparator)
else:
# render lists/dicts/tuples
if isdict: opening, closing, keys = ("{", "}", iter(obj.keys()))
elif islist: opening, closing, keys = ("[", "]", range(0, len(obj)))
elif istuple: opening, closing, keys = ("[", "]", range(0, len(obj))) # tuples are converted into json arrays
if itemkey != "": opening = itemkey + ": " + opening
if not islast: closing += ","
count = 0
itemkey = ""
subitems = []
# get the list of inner tokens
for (i, k) in enumerate(keys):
islast_ = i == len(obj)-1
itemkey_ = ""
if isdict: itemkey_ = basictype2str(k)
inner, is_inner_inline = getsubitems(obj[k], itemkey_, islast_, maxlinelength - indent, indent)
subitems.extend(inner) # inner can be a string or a list
is_inline = is_inline and is_inner_inline # if a child couldn't be rendered inline, then we are not able either
# fit inner tokens into one or multiple lines, each no longer than maxlinelength
if is_inline:
multiline = True
# in Multi-line mode items of a list/dict/tuple can be rendered in multiple lines if they don't fit on one.
# suitable for large lists holding data that's not manually editable.
# in Single-line mode items are rendered inline if all fit in one line, otherwise each is rendered in a separate line.
# suitable for smaller lists or dicts where manual editing of individual items is preferred.
# this logic may need to be customized based on visualization requirements:
if (isdict): multiline = False
if (islist): multiline = True
if (multiline):
lines = []
current_line = ""
current_index = 0
for (i, item) in enumerate(subitems):
item_text = item
if i < len(inner)-1: item_text = item + ","
if len (current_line) > 0:
try_inline = current_line + " " + item_text
else:
try_inline = item_text
if (len(try_inline) > maxlinelength):
# push the current line to the list if maxlinelength is reached
if len(current_line) > 0: lines.append(current_line)
current_line = item_text
else:
# keep fitting all to one line if still below maxlinelength
current_line = try_inline
# Push the remainder of the content if end of list is reached
if (i == len (subitems)-1): lines.append(current_line)
subitems = lines
if len(subitems) > 1: is_inline = False
else: # single-line mode
totallength = len(subitems)-1 # spaces between items
for item in subitems: totallength += len(item)
if (totallength <= maxlinelength):
str = ""
for item in subitems: str += item + " " # insert space between items, comma is already there
subitems = [ str.strip() ] # wrap concatenated content in a new list
else:
is_inline = False
# attempt to render the outer brackets + inner tokens in one line
if is_inline:
item_text = ""
if len(subitems) > 0: item_text = subitems[0]
if len(opening) + len(item_text) + len(closing) <= maxlinelength:
items.append(opening + item_text + closing)
else:
is_inline = False
# if inner tokens are rendered in multiple lines already, then the outer brackets remain in separate lines
if not is_inline:
items.append(opening) # opening brackets
items.append(subitems) # Append children to parent list as a nested list
items.append(closing) # closing brackets
return items, is_inline
def basictype2str(obj):
if isinstance (obj, str):
strobj = "\"" + str(obj) + "\""
elif isinstance(obj, bool):
strobj = { True: "true", False: "false" }[obj]
else:
strobj = str(obj)
return strobj
def indentitems(items, indent, level):
"""Recursively traverses the list of json lines, adds indentation based on the current depth"""
res = ""
indentstr = " " * (indent * level)
for (i, item) in enumerate(items):
if isinstance(item, list):
res += indentitems(item, indent, level+1)
else:
islast = (i==len(items)-1)
# no new line character after the last rendered line
if level==0 and islast:
res += indentstr + item
else:
res += indentstr + item + "\n"
return res
|
# coding:utf-8
import requests
import re
headers = {
'user-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36'
}
r = requests.get('http://www.zhihu.com/explore', headers=headers)
pattern = re.compile('explore-feed.*?question_link.*?>(.*?)</a>', re.S)
titles = re.findall(pattern, r.text)
print(titles)
|
"""
@author: Yifei Ji
@contact: jiyf990330@163.com
"""
import torch
import torch.nn as nn
__all__ = ['BatchSpectralShrinkage']
class BatchSpectralShrinkage(nn.Module):
r"""
The regularization term in `Catastrophic Forgetting Meets Negative Transfer:
Batch Spectral Shrinkage for Safe Transfer Learning (NIPS 2019) <https://proceedings.neurips.cc/paper/2019/file/c6bff625bdb0393992c9d4db0c6bbe45-Paper.pdf>`_.
The BSS regularization of feature matrix :math:`F` can be described as:
.. math::
L_{bss}(F) = \sum_{i=1}^{k} \sigma_{-i}^2 ,
where :math:`k` is the number of singular values to be penalized, :math:`\sigma_{-i}` is the :math:`i`-th smallest singular value of feature matrix :math:`F`.
All the singular values of feature matrix :math:`F` are computed by `SVD`:
.. math::
F = U\Sigma V^T,
where the main diagonal elements of the singular value matrix :math:`\Sigma` is :math:`[\sigma_1, \sigma_2, ..., \sigma_b]`.
Args:
k (int): The number of singular values to be penalized. Default: 1
Shape:
- Input: :math:`(b, |\mathcal{f}|)` where :math:`b` is the batch size and :math:`|\mathcal{f}|` is feature dimension.
- Output: scalar.
"""
def __init__(self, k=1):
super(BatchSpectralShrinkage, self).__init__()
self.k = k
def forward(self, feature):
result = 0
u, s, v = torch.svd(feature.t())
num = s.size(0)
for i in range(self.k):
result += torch.pow(s[num-1-i], 2)
return result
|
import abc
from ofx2xlsmbr.reader.IReaderBankStatement import IReaderBankStatement
from ofx2xlsmbr.reader.IReaderCashFlow import IReaderCashFlow
class ReaderAbstractFactory(metaclass=abc.ABCMeta):
@abc.abstractmethod
def createReaderBankStatement(self) -> IReaderBankStatement:
pass
@abc.abstractmethod
def createReaderCashFlow(self) -> IReaderCashFlow:
pass
|
# Codificar un script que cante las cartas de la lotería
# Deben presentarse en orden aleatorio
# Preguntando en cada iteración si ya hubo "buena"; en caso de ser así detenerse
# Si se agotan las cartas y nadie ha ganado, entonces anunciar el fin del juego
import random
print("Bienvenido(a)")
listaDeCartasDisponibles = ["El gallo", "El diablito", "La dama", "l catrín", "El paraguas", "La sirena", "La escalera", "La botella", "El barril",
"El árbol", "El melón ", "El valiente", "El gorrito", "La muerte", "La pera", "La bandera", "El bandolón",
"El violoncello", "La garza", "El pájaro", "La mano", "La bota",
"La luna", "El cotorro", "El borracho", "El negrito", "El corazón", "La sandía", "El tambor", "El camarón",
"Las jaras", "El músico", "La araña", "El soldado", "La estrella", "El cazo", "El mundo", "El apache", "El nopal",
"El alacrán", "La rosa", "La calavera", "La campana", "El cantarito", "El venado", "El sol", "La corona", "La chalupa",
"El pino", "El pescado", "La palma", "La maceta", "El arpa", "La rana"]
print("Lista de cartas:", listaDeCartasDisponibles)
print("------------------------------------")
random.shuffle(listaDeCartasDisponibles) # Barajar las cartas
contador = 0
cantidadTotalCartas = 53
while cantidadTotalCartas > contador:
contador = contador + 1
print("Viene y se va corriendo con:", listaDeCartasDisponibles[contador])
huboBuena = input("¿Hubo buena? (Si/No): ")
agotaCartas = 1
if huboBuena=="Si":
agotaCartas = 0
break;
if agotaCartas==1:
print("> Se agotaron las cartas")
print("Gracias por jugar")
|
# Generated by Django 3.2 on 2021-04-28 15:23
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('library', '0003_auto_20210428_1522'),
]
operations = [
migrations.RemoveField(
model_name='book',
name='color',
),
migrations.RemoveField(
model_name='book',
name='price',
),
migrations.RemoveField(
model_name='book',
name='publish_date',
),
]
|
# Scraper for Texas 9th Court of Appeals
# CourtID: texapp9
# Court Short Name: TX
# Author: Andrei Chelaru
# Reviewer: mlr
# Date: 2014-07-10
from juriscraper.opinions.united_states.state import tex
class Site(tex.Site):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.court_id = self.__module__
self.court_name = "capp_9"
|
#!/usr/bin/python3
import time
from web3 import Web3, KeepAliveRPCProvider, IPCProvider
web3 = Web3(KeepAliveRPCProvider(host='127.0.0.1', port='8545'))
# Global Declarations
global true
global false
global myst_account_0_a
global myst_account_1_a
global myst_account_2_a
global myst_account_3_a
global myst_account_4_a
global myst_account_5_a
global myst_account_6_a
global myst_address
global myst_abi
global myst
global myst_call_0
global myst_call_1
global myst_call_2
global myst_call_3
global myst_call_4
global myst_call_5
global myst_call_6
global myst_call_ab
global myst_accounts
global myst_account_0_pw
global myst_account_1_pw
global myst_account_2_pw
global myst_account_3_pw
global myst_account_4_pw
global myst_account_5_pw
global myst_account_6_pw
global myst_account_0_n
global myst_account_1_n
global myst_account_2_n
global myst_account_3_n
global myst_account_4_n
global myst_account_5_n
global myst_account_6_n
global myst_account1pw
global myst_account2pw
global myst_account3pw
global myst_account4pw
global myst_account5pw
global myst_account6pw
global myst_last_price
global myst_accounts_range
global myst_tokenName
global myst_last_ethereum_price
global myst_unlockTime
global myst_balance
global myst_balanceOf
global myst_unlock
global myst_token_d
global _e_d
# Internal Variable Setup (Leave Alone Unless You Understand What They Do.)
true = True
false = False
myst_token_d = 1e8
_e_d = 1e18
myst_accounts_range = '[0, 6]'
myst_unlock = web3.personal.unlockAccount
myst_last_ethereum_price = 370.00
myst_last_price = 1.53
myst_accounts = web3.personal.listAccounts # For personal accounts, Accounts Can Also Be String ('0x..') Listed.
myst_balance = web3.eth.getBalance
# User Choice Variables (You May Change These Pretty Easily Without Fucking Anything Up.)
myst_tokenName = 'Mysterium Token'
myst_unlockTime = hex(10000) # Must be hex()
myst_account_0_a = myst_accounts[0]
myst_account_1_a = myst_accounts[1]
myst_account_2_a = myst_accounts[2]
myst_account_3_a = myst_accounts[3]
myst_account_4_a = myst_accounts[4]
myst_account_5_a = '0xFBb1b73C4f0BDa4f67dcA266ce6Ef42f520fBB98'
myst_account_6_a = myst_accounts[6]
# Supply Unlock Passwords For Transactions Below
myst_account_0_pw = 'GuildSkrypt2017!@#'
myst_account_1_pw = ''
myst_account_2_pw = 'GuildSkrypt2017!@#'
myst_account_3_pw = ''
myst_account_4_pw = ''
myst_account_5_pw = ''
myst_account_6_pw = ''
# Supply Names Below Standard Is 'Unknown'
myst_account_0_n = 'Skotys Bittrex Account'
myst_account_1_n = 'Jeffs Account'
myst_account_2_n = 'Skrypts Bittrex Account'
myst_account_3_n = 'Skotys Personal Account'
myst_account_4_n = 'Unknown'
myst_account_5_n = 'Watched \'Bittrex\' Account.'
myst_account_6_n = 'Watched Account (1)'
# Contract Information Below :
myst_address = '0xa645264C5603E96c3b0B078cdab68733794B0A71'
myst_abi = [{"constant":false,"inputs":[{"name":"addr","type":"address"},{"name":"state","type":"bool"}],"name":"setTransferAgent","outputs":[],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"mintingFinished","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"name","outputs":[{"name":"","type":"string"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_spender","type":"address"},{"name":"_value","type":"uint256"}],"name":"approve","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"totalSupply","outputs":[{"name":"","type":"uint256"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_from","type":"address"},{"name":"_to","type":"address"},{"name":"_value","type":"uint256"}],"name":"transferFrom","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"addr","type":"address"}],"name":"setReleaseAgent","outputs":[],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"decimals","outputs":[{"name":"","type":"uint256"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"receiver","type":"address"},{"name":"amount","type":"uint256"}],"name":"mint","outputs":[],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"","type":"address"}],"name":"mintAgents","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"addr","type":"address"},{"name":"state","type":"bool"}],"name":"setMintAgent","outputs":[],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"value","type":"uint256"}],"name":"upgrade","outputs":[],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"string"},{"name":"_symbol","type":"string"}],"name":"setTokenInformation","outputs":[],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"upgradeAgent","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":false,"inputs":[],"name":"releaseTokenTransfer","outputs":[],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"upgradeMaster","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_owner","type":"address"}],"name":"balanceOf","outputs":[{"name":"balance","type":"uint256"}],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"getUpgradeState","outputs":[{"name":"","type":"uint8"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"","type":"address"}],"name":"transferAgents","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"owner","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"symbol","outputs":[{"name":"","type":"string"}],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"released","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"canUpgrade","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_to","type":"address"},{"name":"_value","type":"uint256"}],"name":"transfer","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_spender","type":"address"},{"name":"_addedValue","type":"uint256"}],"name":"addApproval","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"totalUpgraded","outputs":[{"name":"","type":"uint256"}],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"releaseAgent","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"agent","type":"address"}],"name":"setUpgradeAgent","outputs":[],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_owner","type":"address"},{"name":"_spender","type":"address"}],"name":"allowance","outputs":[{"name":"remaining","type":"uint256"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_spender","type":"address"},{"name":"_subtractedValue","type":"uint256"}],"name":"subApproval","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"newOwner","type":"address"}],"name":"transferOwnership","outputs":[],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"master","type":"address"}],"name":"setUpgradeMaster","outputs":[],"payable":false,"type":"function"},{"inputs":[{"name":"_name","type":"string"},{"name":"_symbol","type":"string"},{"name":"_initialSupply","type":"uint256"},{"name":"_decimals","type":"uint256"}],"payable":false,"type":"constructor"},{"anonymous":false,"inputs":[{"indexed":false,"name":"newName","type":"string"},{"indexed":false,"name":"newSymbol","type":"string"}],"name":"UpdatedTokenInformation","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"_from","type":"address"},{"indexed":true,"name":"_to","type":"address"},{"indexed":false,"name":"_value","type":"uint256"}],"name":"Upgrade","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"agent","type":"address"}],"name":"UpgradeAgentSet","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"receiver","type":"address"},{"indexed":false,"name":"amount","type":"uint256"}],"name":"Minted","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"from","type":"address"},{"indexed":true,"name":"to","type":"address"},{"indexed":false,"name":"value","type":"uint256"}],"name":"Transfer","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"owner","type":"address"},{"indexed":true,"name":"spender","type":"address"},{"indexed":false,"name":"value","type":"uint256"}],"name":"Approval","type":"event"}]
myst = web3.eth.contract(abi=myst_abi, address=myst_address)
myst_balanceOf = myst.call().balanceOf
# End Contract Information
def myst_update_accounts():
global myst_account0
global myst_account1
global myst_account2
global myst_account3
global myst_account4
global myst_account5
global myst_account6
global myst_account0_n
global myst_account1_n
global myst_account2_n
global myst_account3_n
global myst_account4_n
global myst_account5_n
global myst_account6_n
global myst_account0pw
global myst_account1pw
global myst_account2pw
global myst_account3pw
global myst_account4pw
global myst_account5pw
global myst_account6pw
myst_account0 = myst_account_0_a
myst_account1 = myst_account_1_a
myst_account2 = myst_account_2_a
myst_account3 = myst_account_3_a
myst_account4 = myst_account_4_a
myst_account5 = myst_account_5_a
myst_account6 = myst_account_6_a
myst_account0_n = myst_account_0_n
myst_account1_n = myst_account_1_n
myst_account2_n = myst_account_2_n
myst_account3_n = myst_account_3_n
myst_account4_n = myst_account_4_n
myst_account5_n = myst_account_5_n
myst_account6_n = myst_account_6_n
myst_account0pw = myst_account_0_pw
myst_account1pw = myst_account_1_pw
myst_account2pw = myst_account_2_pw
myst_account3pw = myst_account_3_pw
myst_account4pw = myst_account_4_pw
myst_account5pw = myst_account_5_pw
myst_account6pw = myst_account_6_pw
print(myst_tokenName+' Accounts Updated.')
def myst_update_balances():
global myst_call_0
global myst_call_1
global myst_call_2
global myst_call_3
global myst_call_4
global myst_call_5
global myst_call_6
global myst_w_call_0
global myst_w_call_1
global myst_w_call_2
global myst_w_call_3
global myst_w_call_4
global myst_w_call_5
global myst_w_call_6
myst_update_accounts()
print('Updating '+myst_tokenName+' Balances Please Wait...')
myst_call_0 = myst_balanceOf(myst_account0)
myst_call_1 = myst_balanceOf(myst_account1)
myst_call_2 = myst_balanceOf(myst_account2)
myst_call_3 = myst_balanceOf(myst_account3)
myst_call_4 = myst_balanceOf(myst_account4)
myst_call_5 = myst_balanceOf(myst_account5)
myst_call_6 = myst_balanceOf(myst_account6)
myst_w_call_0 = myst_balance(myst_account0)
myst_w_call_1 = myst_balance(myst_account1)
myst_w_call_2 = myst_balance(myst_account2)
myst_w_call_3 = myst_balance(myst_account3)
myst_w_call_4 = myst_balance(myst_account4)
myst_w_call_5 = myst_balance(myst_account5)
myst_w_call_6 = myst_balance(myst_account6)
print(myst_tokenName+' Balances Updated.')
def myst_list_all_accounts():
myst_update_accounts()
print(myst_tokenName+' '+myst_account0_n+': '+myst_account0)
print(myst_tokenName+' '+myst_account1_n+': '+myst_account1)
print(myst_tokenName+' '+myst_account2_n+': '+myst_account2)
print(myst_tokenName+' '+myst_account3_n+': '+myst_account3)
print(myst_tokenName+' '+myst_account4_n+': '+myst_account4)
print(myst_tokenName+' '+myst_account5_n+': '+myst_account5)
print(myst_tokenName+' '+myst_account6_n+': '+myst_account6)
def myst_account_balance(accountNumber):
myst_update_balances()
myst_ab_account_number = accountNumber
myst_ab_input = [0, 1, 2, 3, 4, 5, 6]
if myst_ab_account_number == myst_ab_input[0]:
print('Calling '+myst_account0_n+' '+myst_tokenName+' Balance: ')
print(myst_account0_n+': '+myst_tokenName+' Balance: '+str(myst_call_0 / myst_token_d)+' Usd/'+myst_tokenName+' Balance: $'+str(myst_call_0 / myst_token_d * myst_last_price))
if myst_ab_account_number == myst_ab_input[1]:
print('Calling '+myst_account1_n+' '+myst_tokenName+' Balance: ')
print(myst_account1_n+': '+myst_tokenName+' Balance: '+str(myst_call_1 / myst_token_d)+' Usd/'+myst_tokenName+' Balance: $'+str(myst_call_1 / myst_token_d * myst_last_price))
if myst_ab_account_number == myst_ab_input[2]:
print('Calling '+myst_account2_n+' '+myst_tokenName+' Balance: ')
print(myst_account2_n+': '+myst_tokenName+' Balance: '+str(myst_call_2 / myst_token_d)+' Usd/'+myst_tokenName+' Balance: $'+str(myst_call_2 / myst_token_d * myst_last_price))
if myst_ab_account_number == myst_ab_input[3]:
print('Calling '+myst_account3_n+' '+myst_tokenName+' Balance: ')
print(myst_account3_n+': '+myst_tokenName+' Balance: '+str(myst_call_3 / myst_token_d)+' Usd/'+myst_tokenName+' Balance: $'+str(myst_call_3 / myst_token_d * myst_last_price))
if myst_ab_account_number == myst_ab_input[4]:
print('Calling '+myst_account4_n+' '+myst_tokenName+' Balance: ')
print(myst_account4_n+': '+myst_tokenName+' Balance: '+str(myst_call_4 / myst_token_d)+' Usd/'+myst_tokenName+' Balance: $'+str(myst_call_4 / myst_token_d * myst_last_price))
if myst_ab_account_number == myst_ab_input[5]:
print('Calling '+myst_account5_n+' '+myst_tokenName+' Balance: ')
print(myst_account5_n+': '+myst_tokenName+' Balance: '+str(myst_call_5 / myst_token_d)+' Usd/'+myst_tokenName+' Balance: $'+str(myst_call_5 / myst_token_d * myst_last_price))
if myst_ab_account_number == myst_ab_input[6]:
print('Calling '+myst_account6_n+' '+myst_tokenName+' Balance: ')
print(myst_account6_n+': '+myst_tokenName+' Balance: '+str(myst_call_6 / myst_token_d)+' Usd/'+myst_tokenName+' Balance: $'+str(myst_call_6 / myst_token_d * myst_last_price))
if myst_ab_account_number not in myst_ab_input:
print('Must Integer Within Range '+myst_accounts_range+'.')
def myst_list_all_account_balances():
myst_update_balances()
print('Loading Account Data...')
#Account 0 Data
print('Calling Account_0 '+myst_tokenName+' Balance: ')
print(myst_account0_n+': '+myst_tokenName+' Balance: '+str(myst_call_0 / myst_token_d)+' Usd/'+myst_tokenName+' Balance: $'+str(myst_call_0 / myst_token_d * myst_last_price))
print('Calling Account_0 Ethereum Balance: ')
print(myst_account0_n+': Ethereum Balance '+str(myst_w_call_0 / _e_d)+' $'+str(myst_w_call_0 / _e_d * myst_last_ethereum_price))
#Account 1 Data
print('Calling Account_1 '+myst_tokenName+' Balance: ')
print(myst_account1_n+': '+myst_tokenName+' Balance: '+str(myst_call_1 / myst_token_d)+' Usd/'+myst_tokenName+' Balance: $'+str(myst_call_1 / myst_token_d * myst_last_price))
print('Calling Account_1 Ethereum Balance: ')
print(myst_account1_n+': Ethereum Balance '+str(myst_w_call_1 / _e_d)+' $'+str(myst_w_call_1 / _e_d * myst_last_ethereum_price))
#Account 2 Data
print('Calling Account_2 '+myst_tokenName+' Balance: ')
print(myst_account2_n+': '+myst_tokenName+' Balance: '+str(myst_call_2 / myst_token_d)+' Usd/'+myst_tokenName+' Balance: $'+str(myst_call_2 / myst_token_d * myst_last_price))
print('Calling Account_2 Ethereum Balance: ')
print(myst_account2_n+': Ethereum Balance '+str(myst_w_call_2 / _e_d)+' $'+str(myst_w_call_2 / _e_d * myst_last_ethereum_price))
#Account 3 Data
print('Calling Account_3 '+myst_tokenName+' Balance: ')
print(myst_account3_n+': '+myst_tokenName+' Balance: '+str(myst_call_3 / myst_token_d)+' Usd/'+myst_tokenName+' Balance: $'+str(myst_call_3 / myst_token_d * myst_last_price))
print('Calling Account_3 Ethereum Balance: ')
print(myst_account3_n+': Ethereum Balance '+str(myst_w_call_3 / _e_d)+' $'+str(myst_w_call_3 / _e_d * myst_last_ethereum_price))
#Account 4 Data
print('Calling Account_4 '+myst_tokenName+' Balance: ')
print(myst_account4_n+': '+myst_tokenName+' Balance: '+str(myst_call_4 / myst_token_d)+' Usd/'+myst_tokenName+' Balance: $'+str(myst_call_4 / myst_token_d * myst_last_price))
print('Calling Account_4 Ethereum Balance: ')
print(myst_account4_n+': Ethereum Balance '+str(myst_w_call_4 / _e_d)+' $'+str(myst_w_call_4 / _e_d * myst_last_ethereum_price))
#Account 5 Data
print('Calling Account_5 '+myst_tokenName+' Balance: ')
print(myst_account5_n+': '+myst_tokenName+' Balance: '+str(myst_call_5 / myst_token_d)+' Usd/'+myst_tokenName+' Balance: $'+str(myst_call_5 / myst_token_d * myst_last_price))
print('Calling Account_5 Ethereum Balance: ')
print(myst_account5_n+': Ethereum Balance '+str(myst_w_call_5 / _e_d)+' $'+str(myst_w_call_5 /_e_d * myst_last_ethereum_price))
#Account 0 Data
print('Calling Account_6 '+myst_tokenName+' Balance: ')
print(myst_account6_n+': '+myst_tokenName+' Balance: '+str(myst_call_6 / myst_token_d)+' Usd/'+myst_tokenName+' Balance: $'+str(myst_call_6 / myst_token_d * myst_last_price))
print('Calling Account_6 Ethereum Balance: ')
print(myst_account6_n+': Ethereum Balance '+str(myst_w_call_6 / _e_d)+' $'+str(myst_w_call_6 / _e_d * myst_last_ethereum_price))
def myst_unlock_all_accounts():
myst_unlock_account_0()
myst_unlock_account_1()
myst_unlock_account_2()
myst_unlock_account_3()
myst_unlock_account_4()
myst_unlock_account_5()
myst_unlock_account_6()
def myst_unlock_account_0():
global myst_account0pw
global myst_account0
global myst_account0_n
myst_update_accounts()
myst_u_a_0 = myst_w_unlock(myst_account0, myst_account0pw, myst_unlockTime)
if myst_u_a_0 == False:
if myst_account0pw == '':
myst_account0pw_r = 'UnAssigned (Blank String (\'\')'
print('Unlock Failure With Account '+myst_account0_n+' Passphrase Denied: '+myst_account0pw_r)
elif myst_account0pw == 'UnAssigned (Blank String (\'\')':
print('Unlock Failure With Account '+myst_account0_n+' Passphrase Denied: '+myst_account0pw)
if myst_u_a_0 == True:
print(myst_account0_n+' Unlocked')
def myst_unlock_account_1():
global myst_account1pw
global myst_account1
global myst_account1_n
myst_update_accounts()
myst_u_a_1 = myst_unlock(myst_account1, myst_account1pw, myst_unlockTime)
if myst_u_a_1 == False:
if myst_account1pw == '':
myst_account1pw_r = 'UnAssigned (Blank String (\'\')'
print('Unlock Failure With Account '+myst_account1_n+' Passphrase Denied: '+myst_account1pw_r)
elif myst_account1pw == 'UnAssigned (Blank String (\'\')':
print('Unlock Failure With Account '+myst_account1_n+' Passphrase Denied: '+myst_account1pw)
if myst_u_a_1 == True:
print(myst_account1_n+' Unlocked')
def myst_unlock_account_2():
global myst_account2pw
global myst_account2
global myst_account2_n
myst_update_accounts()
myst_u_a_2 = myst_unlock(myst_account2, myst_account2pw, myst_unlockTime)
if myst_u_a_2 == False:
if myst_account2pw == '':
myst_account2pw_r = 'UnAssigned (Blank String (\'\')'
print('Unlock Failure With Account '+myst_account2_n+' Passphrase Denied: '+myst_account2pw_r)
elif myst_account2pw != '':
print('Unlock Failure With Account '+myst_account2_n+' Passphrase Denied: '+myst_account2pw)
if myst_u_a_2 == True:
print(myst_account2_n+' Unlocked')
def myst_unlock_account_3():
global myst_account3pw
global myst_account3
global myst_account3_n
myst_update_accounts()
myst_u_a_3 = myst_unlock(myst_account3, myst_account3pw, myst_unlockTime)
if myst_u_a_3 == False:
if myst_account3pw == '':
myst_account3pw_r = 'UnAssigned (Blank String (\'\')'
print('Unlock Failure With Account '+myst_account3_n+' Passphrase Denied: '+myst_account3pw_r)
elif myst_account3pw != '':
print('Unlock Failure With Account '+myst_account3_n+' Passphrase Denied: '+myst_account3pw)
if myst_u_a_3 == True:
print(myst_account3_n+' Unlocked')
def myst_unlock_account_4():
global myst_account4pw
global myst_account4
global myst_account4_n
myst_update_accounts()
myst_u_a_4 = myst_unlock(myst_account4, myst_account4pw, myst_unlockTime)
if myst_u_a_4 == False:
if myst_account4pw == '':
myst_account4pw_r = 'UnAssigned (Blank String (\'\')'
print('Unlock Failure With Account '+myst_account4_n+' Passphrase Denied: '+myst_account4pw_r)
elif myst_account4pw != '':
print('Unlock Failure With Account '+myst_account4_n+' Passphrase Denied: '+myst_account4pw)
if myst_u_a_4 == True:
print(myst_account4_n+' Unlocked')
def myst_unlock_account_5():
global myst_account5pw
global myst_account5
global myst_account5_n
myst_update_accounts()
myst_u_a_5 = myst_unlock(myst_account5, myst_account5pw, myst_unlockTime)
if myst_u_a_5 == False:
if myst_account5pw == '':
myst_account5pw_r = 'UnAssigned (Blank String (\'\')'
print('Unlock Failure With Account '+myst_account5_n+' Passphrase Denied: '+myst_account5pw_r)
elif myst_account5pw != '':
print('Unlock Failure With Account '+myst_account5_n+' Passphrase Denied: '+myst_account5pw)
if myst_u_a_5 == True:
print(myst_account5_n+' Unlocked')
def myst_unlock_account_6():
global myst_account6pw
global myst_account6
global myst_account6_n
myst_update_accounts()
myst_u_a_6 = myst_unlock(myst_account6, myst_account6pw, myst_unlockTime)
if myst_u_a_6 == False:
if myst_account6pw == '':
myst_account6pw_r = 'UnAssigned (Blank String (\'\')'
print('Unlock Failure With Account '+myst_account6_n+' Passphrase Denied: '+myst_account6pw_r)
elif myst_account6pw != '':
print('Unlock Failure With Account '+myst_account6_n+' Passphrase Denied: '+myst_account6pw)
if myst_u_a_6 == True:
print(myst_account6_n+' Unlocked')
def myst_unlock_account(myst_ua_accountNumber):
myst_update_accounts()
myst_ua_account_number = myst_ua_accountNumber
myst_ua_input = [0, 1, 2, 3, 4, 5, 6]
if myst_ua_account_number == myst_ua_input[0]:
myst_unlock_account_0()
if myst_ua_account_number == myst_ua_input[1]:
myst_unlock_account_1()
if myst_ua_account_number == myst_ua_input[2]:
myst_unlock_account_2()
if myst_ua_account_number == myst_ua_input[3]:
myst_unlock_account_3()
if myst_ua_account_number == myst_ua_input[4]:
myst_unlock_account_4()
if myst_ua_account_number == myst_ua_input[5]:
myst_unlock_account_5()
if myst_ua_account_number == myst_ua_input[6]:
myst_unlock_account_6()
if myst_ua_account_number not in myst_ua_input:
print('Must Integer Within Range '+myst_accounts_range+'.')
def myst_approve_between_accounts(fromAccount, toAccount, msgValue):
myst_update_accounts()
myst_a_0 = myst.transact({'from': web3.personal.listAccounts[fromAccount]}).approve(web3.personal.listAccounts[toAccount], msgValue)
print(myst_a_0)
def myst_approve(fromAccountNumber, toAddress, msgValue):
myst_update_accounts()
myst_unlock_account(fromAccountNumber)
myst_a_1 = myst.transact({'from': web3.personal.listAccounts[fromAccount]}).approve(toAddress, msgValue)
print(myst_a_1)
def myst_transfer_between_accounts(fromAccount, toAccount, msgValue):
myst_update_accounts()
myst_unlock_account(fromAccount)
myst_t_0 = myst.transact({'from': web3.personal.listAccounts[fromAccount]}).transfer(web3.personal.listAccounts[toAccount], msgValue)
print(myst_t_0)
def myst_transfer(fromAccountNumber, toAddress, msgValue):
myst_update_accounts()
myst_unlock_account(fromAccountNumber)
myst_t_1 = myst.transact({'from': web3.personal.listAccounts[fromAccount]}).transfer(toAddress, msgValue)
print(myst_t_1)
def myst_transferFrom_between_accounts(callAccount, fromAccount, toAccount, msgValue):
myst_update_accounts()
myst_unlock_account(callAccount)
myst_tf_0 = myst.transact({'from': web3.personal.listAccounts[callAccount]}).transferFrom(web3.personal.listAccounts[fromAccount], web3.personal.listAccounts[toAccount], msgValue)
print(myst_tf_0)
def myst_transferFrom(callAccount, fromAccount, toAccount, msgValue):
myst_update_accounts()
myst_unlock_account(callAccount)
myst_tf_1 = myst.transact({'from': web3.personal.listAccounts[callAccount]}).transferFrom(web3.personal.listAccounts[fromAccount], toAddress, msgValue)
print(myst_tf_1)
def myst_help():
print('Following Functions For '+myst_tokenName+' Are As Follows:')
print('''
@ Tag Listing Below:
~ (Function Name)
-- (Next line same subject)
/ * (variable assigned main function)
** (variable assigned contract call/transaction)
// (if condition is met)
=> (function calls in order)
/ * myst_unlock => web3.personal.unlockAccount
/ * myst_accounts => web3.personal.listAccounts
/ * myst_balance = web3.eth.getBalance
** myst => web3.eth.contract(abi=myst_abi, address=myst_address)
** / * myst_balanceOf => myst.call().balanceOf
~ Function Listing Below:
~ myst_update_accounts()
~ myst_update_balances() \n\r -- => myst_update_accounts()
~ myst_list_all_accounts() \n\r -- => myst_update_accounts()
~ myst_account_balance(accountNumber) \n\r -- => myst_update_balances()
~ myst_list_all_account_balances() \n\r -- => myst_update_balances()
~ myst_unlock_all_accounts() \n\r -- => myst_unlock_account_0() \n\r -- => myst_unlock_account_1() \n\r -- => myst_unlock_account_2() \n\r -- => myst_unlock_account_3() \n\r -- => myst_unlock_account_4() \n\r -- => myst_unlock_account_5() \n\r -- => myst_unlock_account_6()
~ myst_unlock_account_0() \n\r -- => myst_update_accounts() \n\r -- / *myst_w_unlock(myst_account0, myst_account0pw, myst_unlockTime)
~ myst_unlock_account_1() \n\r -- => myst_update_accounts() \n\r -- / *myst_w_unlock(myst_account1, myst_account0pw, myst_unlockTime)
~ myst_unlock_account_2() \n\r -- => myst_update_accounts() \n\r --/ *myst_w_unlock(myst_account2, myst_account0pw, myst_unlockTime)
~ myst_unlock_account_3() \n\r -- => myst_update_accounts() \n\r -- / *myst_w_unlock(myst_account3, myst_account0pw, myst_unlockTime)
~ myst_unlock_account_4() \n\r -- => myst_update_accounts() \n\r -- / *myst_w_unlock(myst_account4, myst_account0pw, myst_unlockTime)
~ myst_unlock_account_5() \n\r -- => myst_update_accounts() \n\r -- / *myst_w_unlock(myst_account5, myst_account0pw, myst_unlockTime)
~ myst_unlock_account_6() \n\r -- => myst_update_accounts() \n\r -- / *myst_w_unlock(myst_account6, myst_account0pw, myst_unlockTime)
~ myst_unlock_account(myst_ua_accountNumber) \n\r -- => myst_update_accounts() \n\r -- // myst_unlock_account_0() \n\r -- // myst_unlock_account_1() \n\r -- // myst_unlock_account_2() \n\r -- // myst_unlock_account_3() \n\r -- // myst_unlock_account_4() \n\r -- // myst_unlock_account_5() \n\r -- // myst_unlock_account_6()
~ myst_approve_between_accounts(fromAccount, toAccount, msgValue) \n\r -- => myst_update_accounts() \n\r -- => myst_unlock_account(fromAccount) \n\r -- / ** myst.transact({'from': web3.personal.listAccounts[fromAccount]}).approve(toAddress, msgValue)
~ myst_approve(fromAccountNumber, toAddress, msgValue) \n\r -- => myst_update_accounts() \n\r -- => myst_unlock_account(fromAccountNumber) \n\r -- / ** myst.transact({'from': web3.personal.listAccounts[fromAccount]}).approve(toAddress, msgValue)
~ myst_transfer_between_accounts(fromAccount, toAccount, msgValue) \n\r -- => myst_update_accounts() \n\r -- => myst_unlock_account(fromAccount) \n\r -- / ** myst.transact({'from': web3.personal.listAccounts[fromAccount]}).transfer(web3.personal.listAccounts[toAccount], msgValue)
~ myst_transfer(fromAccountNumber, toAddress, msgValue) \n\r -- => myst_update_accounts() \n\r -- => myst_unlock_account(fromAccountNumber) \n\r -- / ** myst.transact({'from': web3.personal.listAccounts[callAccount]}).transferFrom(web3.personal.listAccounts[fromAccount], \n\r -- web3.personal.listAccounts[toAccount], msgValue)
~ myst_transferFrom_between_accounts(callAccount, fromAccount, toAccount, msgValue) \n\r -- => myst_update_accounts() \n\r -- => myst_unlock_account(callAccount) \n\r / ** myst.transact({'from': web3.personal.listAccounts[callAccount]}).transferFrom(web3.personal.listAccounts[fromAccount], \n\r -- web3.personal.listAccounts[toAccount], msgValue)
~ myst_transferFrom(callAccount, fromAccount, toAccount, msgValue) \n\r -- => myst_update_accounts() \n\r -- => myst_unlock_account(callAccount) \n\r -- / ** myst.transact({'from': web3.personal.listAccounts[callAccount]}).transferFrom(web3.personal.listAccounts[fromAccount], toAddress, msgValue)
~ myst_help() <-- You Are Here. ''')
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=missing-docstring
"""Tests for comparing the outputs of circuit drawer with expected ones."""
import os
import unittest
from qiskit.tools.visualization import HAS_MATPLOTLIB, pulse_drawer
from qiskit.pulse.channels import (DriveChannel, MeasureChannel, ControlChannel, AcquireChannel,
MemorySlot, RegisterSlot)
from qiskit.pulse.commands import FrameChange, Acquire, PersistentValue, Snapshot, Delay, Gaussian
from qiskit.pulse.schedule import Schedule
from qiskit.pulse import pulse_lib
from .visualization import QiskitVisualizationTestCase, path_to_diagram_reference
class TestPulseVisualizationImplementation(QiskitVisualizationTestCase):
"""Visual accuracy of visualization tools outputs tests."""
pulse_matplotlib_reference = path_to_diagram_reference('pulse_matplotlib_ref.png')
instr_matplotlib_reference = path_to_diagram_reference('instruction_matplotlib_ref.png')
schedule_matplotlib_reference = path_to_diagram_reference('schedule_matplotlib_ref.png')
schedule_show_framechange_ref = path_to_diagram_reference('schedule_show_framechange_ref.png')
parametric_matplotlib_reference = path_to_diagram_reference('parametric_matplotlib_ref.png')
def setUp(self):
self.schedule = Schedule()
def sample_pulse(self):
"""Generate a sample pulse."""
return pulse_lib.gaussian(20, 0.8, 1.0, name='test')
def sample_instruction(self):
"""Generate a sample instruction."""
return self.sample_pulse()(DriveChannel(0))
def sample_schedule(self):
"""Generate a sample schedule that includes the most common elements of
pulse schedules."""
gp0 = pulse_lib.gaussian(duration=20, amp=1.0, sigma=1.0)
gp1 = pulse_lib.gaussian(duration=20, amp=-1.0, sigma=2.0)
gs0 = pulse_lib.gaussian_square(duration=20, amp=-1.0, sigma=2.0, risefall=3)
fc_pi_2 = FrameChange(phase=1.57)
acquire = Acquire(10)
delay = Delay(100)
sched = Schedule()
sched = sched.append(gp0(DriveChannel(0)))
sched = sched.insert(0, PersistentValue(value=0.2 + 0.4j)(ControlChannel(0)))
sched = sched.insert(60, FrameChange(phase=-1.57)(DriveChannel(0)))
sched = sched.insert(30, gp1(DriveChannel(1)))
sched = sched.insert(60, gp0(ControlChannel(0)))
sched = sched.insert(60, gs0(MeasureChannel(0)))
sched = sched.insert(90, fc_pi_2(DriveChannel(0)))
sched = sched.insert(90, acquire(AcquireChannel(1),
MemorySlot(1),
RegisterSlot(1)))
sched = sched.append(delay(DriveChannel(0)))
sched = sched + sched
sched |= Snapshot("snapshot_1", "snap_type") << 60
sched |= Snapshot("snapshot_2", "snap_type") << 120
return sched
@unittest.skipIf(not HAS_MATPLOTLIB, 'matplotlib not available.')
@unittest.skip('Useful for refactoring purposes, skipping by default.')
def test_parametric_pulse_schedule(self):
"""Test that parametric instructions/schedules can be drawn."""
filename = self._get_resource_path('current_schedule_matplotlib_ref.png')
schedule = Schedule(name='test_parametric')
schedule += Gaussian(duration=25, sigma=4, amp=0.5j)(DriveChannel(0))
pulse_drawer(schedule, filename=filename)
self.assertImagesAreEqual(filename, self.parametric_matplotlib_reference)
os.remove(filename)
# TODO: Enable for refactoring purposes and enable by default when we can
# decide if the backend is available or not.
@unittest.skipIf(not HAS_MATPLOTLIB, 'matplotlib not available.')
@unittest.skip('Useful for refactoring purposes, skipping by default.')
def test_pulse_matplotlib_drawer(self):
filename = self._get_resource_path('current_pulse_matplotlib_ref.png')
pulse = self.sample_pulse()
pulse_drawer(pulse, filename=filename)
self.assertImagesAreEqual(filename, self.pulse_matplotlib_reference)
os.remove(filename)
# TODO: Enable for refactoring purposes and enable by default when we can
# decide if the backend is available or not.
@unittest.skipIf(not HAS_MATPLOTLIB, 'matplotlib not available.')
@unittest.skip('Useful for refactoring purposes, skipping by default.')
def test_instruction_matplotlib_drawer(self):
filename = self._get_resource_path('current_instruction_matplotlib_ref.png')
pulse_instruction = self.sample_instruction()
pulse_drawer(pulse_instruction, filename=filename)
self.assertImagesAreEqual(filename, self.instr_matplotlib_reference)
os.remove(filename)
# TODO: Enable for refactoring purposes and enable by default when we can
# decide if the backend is available or not.
@unittest.skipIf(not HAS_MATPLOTLIB, 'matplotlib not available.')
@unittest.skip('Useful for refactoring purposes, skipping by default.')
def test_schedule_matplotlib_drawer(self):
filename = self._get_resource_path('current_schedule_matplotlib_ref.png')
schedule = self.sample_schedule()
pulse_drawer(schedule, filename=filename)
self.assertImagesAreEqual(filename, self.schedule_matplotlib_reference)
os.remove(filename)
# TODO: Enable for refactoring purposes and enable by default when we can
# decide if the backend is available or not.
@unittest.skipIf(not HAS_MATPLOTLIB, 'matplotlib not available.')
@unittest.skip('Useful for refactoring purposes, skipping by default.')
def test_truncate_acquisition(self):
sched = Schedule()
acquire = Acquire(30)
sched = sched.insert(0, acquire(AcquireChannel(1),
MemorySlot(1),
RegisterSlot(1)))
# Check ValueError is not thrown
sched.draw(plot_range=(0, 15))
# TODO: Enable for refactoring purposes and enable by default when we can
# decide if the backend is available or not.
@unittest.skipIf(not HAS_MATPLOTLIB, 'matplotlib not available.')
@unittest.skip('Useful for refactoring purposes, skipping by default.')
def test_schedule_drawer_show_framechange(self):
filename = self._get_resource_path('current_show_framechange_ref.png')
gp0 = pulse_lib.gaussian(duration=20, amp=1.0, sigma=1.0)
sched = Schedule()
sched = sched.append(gp0(DriveChannel(0)))
sched = sched.insert(60, FrameChange(phase=-1.57)(DriveChannel(0)))
sched = sched.insert(30, FrameChange(phase=-1.50)(DriveChannel(1)))
sched = sched.insert(70, FrameChange(phase=1.50)(DriveChannel(1)))
pulse_drawer(sched, filename=filename, show_framechange_channels=False)
self.assertImagesAreEqual(filename, self.schedule_show_framechange_ref)
os.remove(filename)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
# -*- coding: utf-8 -*-
# Copyright © 2018 Damir Jelić <poljar@termina.org.uk>
#
# Permission to use, copy, modify, and/or distribute this software for
# any purpose with or without fee is hereby granted, provided that the
# above copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF
# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import unicode_literals
from builtins import str
from datetime import datetime
from functools import wraps
from typing import Any, Dict, List, Optional, Set, Tuple, Union
import attr
from jsonschema.exceptions import SchemaError, ValidationError
from logbook import Logger
from .events import (AccountDataEvent, BadEventType, Event, InviteEvent,
ToDeviceEvent, EphemeralEvent)
from .log import logger_group
from .schemas import Schemas, validate_json
logger = Logger("nio.responses")
logger_group.add_logger(logger)
__all__ = [
"FileResponse",
"DeleteDevicesAuthResponse",
"DeleteDevicesResponse",
"DeleteDevicesError",
"Device",
"DeviceList",
"DevicesResponse",
"DevicesError",
"DeviceOneTimeKeyCount",
"ErrorResponse",
"InviteInfo",
"JoinResponse",
"JoinError",
"JoinedMembersResponse",
"JoinedMembersError",
"KeysClaimResponse",
"KeysClaimError",
"KeysQueryResponse",
"KeysQueryError",
"KeysUploadResponse",
"KeysUploadError",
"LoginResponse",
"LoginError",
"LogoutResponse",
"LogoutError",
"Response",
"RoomInfo",
"RoomInviteResponse",
"RoomInviteError",
"RoomKickResponse",
"RoomKickResponse",
"RoomLeaveResponse",
"RoomLeaveError",
"RoomForgetResponse",
"RoomForgetError",
"RoomMember",
"RoomMessagesResponse",
"RoomMessagesError",
"RoomPutStateResponse",
"RoomPutStateError",
"RoomRedactResponse",
"RoomRedactError",
"RoomSendResponse",
"RoomSendError",
"RoomSummary",
"Rooms",
"ShareGroupSessionResponse",
"ShareGroupSessionError",
"SyncResponse",
"PartialSyncResponse",
"SyncError",
"Timeline",
"UpdateDeviceResponse",
"UpdateDeviceError",
"RoomTypingResponse",
"RoomTypingError",
"RoomReadMarkersResponse",
"RoomReadMarkersError",
"UploadResponse",
"UploadError",
"ProfileGetResponse",
"ProfileGetError",
"ProfileGetDisplayNameResponse",
"ProfileGetDisplayNameError",
"ProfileSetDisplayNameResponse",
"ProfileSetDisplayNameError",
"ProfileGetAvatarResponse",
"ProfileGetAvatarError",
"ProfileSetAvatarResponse",
"ProfileSetAvatarError",
"RoomKeyRequestResponse",
"RoomKeyRequestError",
"ThumbnailResponse",
"ThumbnailError",
"ToDeviceResponse",
"ToDeviceError",
"RoomContextResponse",
"RoomContextError"
]
def verify(schema, error_class, pass_arguments=True):
def decorator(f):
@wraps(f)
def wrapper(cls, parsed_dict, *args, **kwargs):
try:
logger.info("Validating response schema")
validate_json(parsed_dict, schema)
except (SchemaError, ValidationError) as e:
logger.error("Error validating response: " + str(e.message))
if pass_arguments:
return error_class.from_dict(parsed_dict, *args, **kwargs)
else:
return error_class.from_dict(parsed_dict)
return f(cls, parsed_dict, *args, **kwargs)
return wrapper
return decorator
@attr.s
class Rooms(object):
invite = attr.ib(type=Dict)
join = attr.ib(type=Dict)
leave = attr.ib(type=Dict)
@attr.s
class DeviceOneTimeKeyCount(object):
curve25519 = attr.ib(type=int)
signed_curve25519 = attr.ib(type=int)
@attr.s
class DeviceList(object):
changed = attr.ib(type=List[str])
left = attr.ib(type=List[str])
@attr.s
class Timeline(object):
events = attr.ib(type=List)
limited = attr.ib(type=bool)
prev_batch = attr.ib(type=str)
@attr.s
class InviteInfo(object):
invite_state = attr.ib(type=List)
@attr.s
class RoomSummary(object):
invited_member_count = attr.ib(default=None, type=Optional[int])
joined_member_count = attr.ib(default=None, type=Optional[int])
heroes = attr.ib(default=[], type=List[str])
@attr.s
class RoomInfo(object):
timeline = attr.ib(type=Timeline)
state = attr.ib(type=List)
ephemeral = attr.ib(type=List)
account_data = attr.ib(type=List)
summary = attr.ib(default=None, type=Optional[RoomSummary])
@staticmethod
def parse_account_data(event_dict):
"""Parse the account data dictionary and produce a list of events."""
events = []
for event in event_dict:
events.append(AccountDataEvent.parse_event(event))
return events
@attr.s
class RoomMember(object):
user_id = attr.ib(type=str)
display_name = attr.ib(type=str)
avatar_url = attr.ib(type=str)
@attr.s
class Device(object):
id = attr.ib(type=str)
display_name = attr.ib(type=str)
last_seen_ip = attr.ib(type=str)
last_seen_date = attr.ib(type=datetime)
@classmethod
def from_dict(cls, parsed_dict):
date = None
if parsed_dict["last_seen_ts"] is not None:
date = datetime.fromtimestamp(parsed_dict["last_seen_ts"] / 1000)
return cls(
parsed_dict["device_id"],
parsed_dict["display_name"],
parsed_dict["last_seen_ip"],
date
)
@attr.s
class Response(object):
uuid = "" # type : str
start_time = None # type : Optional[float]
end_time = None # type : Optional[float]
timeout = 0 # type : int
transport_response = attr.ib(init=False, default=None)
@property
def elapsed(self):
if not self.start_time or not self.end_time:
return 0
elapsed = self.end_time - self.start_time
return max(0, elapsed - (self.timeout / 1000))
@attr.s
class FileResponse(Response):
"""A response representing a successful file content request.
Attributes:
body (bytes): The file's content in bytes.
content_type (str): The content MIME type of the file,
e.g. "image/png".
"""
body = attr.ib(type=bytes)
content_type = attr.ib(type=str)
def __str__(self):
return "{} bytes, content type: {}".format(
len(self.body),
self.content_type
)
@classmethod
def from_data(cls, data, content_type):
"""Create a FileResponse from file content returned by the server.
Args:
data (bytes): The file's content in bytes.
content_type (str): The content MIME type of the file,
e.g. "image/png".
"""
raise NotImplementedError()
@attr.s
class ErrorResponse(Response):
message = attr.ib(type=str)
status_code = attr.ib(default=None, type=Optional[int])
retry_after_ms = attr.ib(default=None, type=Optional[int])
def __str__(self):
# type: () -> str
if self.status_code and self.message:
e = "{} {}".format(self.status_code, self.message)
elif self.message:
e = self.message
elif self.status_code:
e = "{} unknown error".format(self.status_code)
else:
e = "unknown error"
if self.retry_after_ms:
e = "{} - retry after {}ms".format(e, self.retry_after_ms)
return "{}: {}".format(self.__class__.__name__, e)
@classmethod
def from_dict(cls, parsed_dict):
# type: (Dict[Any, Any]) -> ErrorResponse
try:
validate_json(parsed_dict, Schemas.error)
except (SchemaError, ValidationError):
return cls("unknown error")
return cls(
parsed_dict["error"],
parsed_dict["errcode"],
parsed_dict.get("retry_after_ms"),
)
@attr.s
class _ErrorWithRoomId(ErrorResponse):
room_id = attr.ib(default="", type=str)
@classmethod
def from_dict(cls, parsed_dict, room_id):
try:
validate_json(parsed_dict, Schemas.error)
except (SchemaError, ValidationError):
return cls("unknown error")
return cls(
parsed_dict["error"],
parsed_dict["errcode"],
parsed_dict.get("retry_after_ms"),
room_id
)
class LoginError(ErrorResponse):
pass
class LogoutError(ErrorResponse):
pass
class SyncError(ErrorResponse):
pass
class RoomSendError(_ErrorWithRoomId):
pass
class RoomPutStateError(_ErrorWithRoomId):
pass
class RoomRedactError(_ErrorWithRoomId):
pass
class RoomTypingError(_ErrorWithRoomId):
"""A response representing a unsuccessful room typing request."""
pass
class RoomReadMarkersError(_ErrorWithRoomId):
"""A response representing a unsuccessful room read markers request."""
pass
class RoomKickError(ErrorResponse):
pass
class RoomInviteError(ErrorResponse):
pass
class JoinError(ErrorResponse):
pass
class RoomLeaveError(ErrorResponse):
pass
class RoomForgetError(_ErrorWithRoomId):
pass
class RoomMessagesError(_ErrorWithRoomId):
pass
class KeysUploadError(ErrorResponse):
pass
class KeysQueryError(ErrorResponse):
pass
class KeysClaimError(_ErrorWithRoomId):
pass
class UploadError(ErrorResponse):
"""A response representing a unsuccessful upload request."""
pass
class ThumbnailError(ErrorResponse):
"""A response representing a unsuccessful thumbnail request."""
pass
@attr.s
class ShareGroupSessionError(_ErrorWithRoomId):
"""Response representing unsuccessful group sessions sharing request."""
users_shared_with = attr.ib(type=set, default=None)
@classmethod
def from_dict(cls, parsed_dict, room_id, users_shared_with):
try:
validate_json(parsed_dict, Schemas.error)
except (SchemaError, ValidationError):
return cls("unknown error")
return cls(parsed_dict["error"], parsed_dict["errcode"], room_id,
users_shared_with)
class DevicesError(ErrorResponse):
pass
class DeleteDevicesError(ErrorResponse):
pass
class UpdateDeviceError(ErrorResponse):
pass
class JoinedMembersError(_ErrorWithRoomId):
pass
class ProfileGetError(ErrorResponse):
pass
class ProfileGetDisplayNameError(ErrorResponse):
pass
class ProfileSetDisplayNameError(ErrorResponse):
pass
class ProfileGetAvatarError(ErrorResponse):
pass
class ProfileSetAvatarError(ErrorResponse):
pass
@attr.s
class LoginResponse(Response):
user_id = attr.ib(type=str)
device_id = attr.ib(type=str)
access_token = attr.ib(type=str)
def __str__(self):
# type: () -> str
return "Logged in as {}, device id: {}.".format(
self.user_id, self.device_id
)
@classmethod
@verify(Schemas.login, LoginError)
def from_dict(cls, parsed_dict):
# type: (Dict[Any, Any]) -> Union[LoginResponse, ErrorResponse]
return cls(
parsed_dict["user_id"],
parsed_dict["device_id"],
parsed_dict["access_token"],
)
@attr.s
class LogoutResponse(Response):
def __str__(self):
# type: () -> str
return "Logged out"
@classmethod
@verify(Schemas.empty, LogoutError)
def from_dict(cls, parsed_dict):
# type: (Dict[Any, Any]) -> Union[LogoutResponse, ErrorResponse]
"""Create a response for logout response from server."""
return cls()
@attr.s
class JoinedMembersResponse(Response):
members = attr.ib(type=List[RoomMember])
room_id = attr.ib(type=str)
@classmethod
@verify(Schemas.joined_members, JoinedMembersError)
def from_dict(
cls,
parsed_dict, # type: Dict[Any, Any]
room_id # type: str
):
# type: (...) -> Union[JoinedMembersResponse, ErrorResponse]
members = []
for user_id, user_info in parsed_dict["joined"].items():
user = RoomMember(
user_id,
user_info.get("display_name", None),
user_info.get("avatar_url", None)
)
members.append(user)
return cls(members, room_id)
@attr.s
class UploadResponse(Response):
"""A response representing a successful upload request."""
content_uri = attr.ib(type=str)
@classmethod
@verify(Schemas.upload, UploadError)
def from_dict(cls, parsed_dict):
# type: (Dict[Any, Any]) -> Union[UploadResponse, ErrorResponse]
return cls(
parsed_dict["content_uri"],
)
@attr.s
class ThumbnailResponse(FileResponse):
"""A response representing a successful thumbnail request."""
@classmethod
def from_data(cls, data, content_type):
# type: (bytes, str) -> Union[ThumbnailResponse, ThumbnailError]
if isinstance(data, bytes):
return cls(body=data, content_type=content_type)
if isinstance(data, dict):
return ThumbnailError.from_dict(data)
return ThumbnailError("invalid data")
@attr.s
class RoomEventIdResponse(Response):
event_id = attr.ib(type=str)
room_id = attr.ib(type=str)
@staticmethod
def create_error(parsed_dict, _room_id):
return ErrorResponse.from_dict(parsed_dict)
@classmethod
def from_dict(
cls,
parsed_dict, # type: Dict[Any, Any]
room_id # type: str
):
# type: (...) -> Union[RoomEventIdResponse, ErrorResponse]
try:
validate_json(parsed_dict, Schemas.room_event_id)
except (SchemaError, ValidationError):
return cls.create_error(parsed_dict, room_id)
return cls(parsed_dict["event_id"], room_id)
class RoomSendResponse(RoomEventIdResponse):
@staticmethod
def create_error(parsed_dict, room_id):
return RoomSendError.from_dict(parsed_dict, room_id)
class RoomPutStateResponse(RoomEventIdResponse):
@staticmethod
def create_error(parsed_dict, room_id):
return RoomPutStateError.from_dict(parsed_dict, room_id)
class RoomRedactResponse(RoomEventIdResponse):
@staticmethod
def create_error(parsed_dict, room_id):
return RoomRedactError.from_dict(parsed_dict, room_id)
class EmptyResponse(Response):
@staticmethod
def create_error(parsed_dict):
return ErrorResponse.from_dict(parsed_dict)
@classmethod
def from_dict(cls, parsed_dict):
# type: (Dict[Any, Any]) -> Union[Any, ErrorResponse]
try:
validate_json(parsed_dict, Schemas.empty)
except (SchemaError, ValidationError):
return cls.create_error(parsed_dict)
return cls()
@attr.s
class _EmptyResponseWithRoomId(Response):
room_id = attr.ib(type=str)
@staticmethod
def create_error(parsed_dict, room_id):
return _ErrorWithRoomId.from_dict(parsed_dict, room_id)
@classmethod
def from_dict(cls, parsed_dict, room_id):
# type: (Dict[Any, Any], str) -> Union[Any, ErrorResponse]
try:
validate_json(parsed_dict, Schemas.empty)
except (SchemaError, ValidationError):
return cls.create_error(parsed_dict, room_id)
return cls(room_id)
class RoomKickResponse(EmptyResponse):
@staticmethod
def create_error(parsed_dict):
return RoomKickError.from_dict(parsed_dict)
class RoomInviteResponse(EmptyResponse):
@staticmethod
def create_error(parsed_dict):
return RoomInviteError.from_dict(parsed_dict)
@attr.s
class ShareGroupSessionResponse(Response):
"""Response representing a successful group sessions sharing request.
Attributes:
room_id (str): The room id of the group session.
users_shared_with (Set[Tuple[str, str]]): A set containing a tuple of
user id device id pairs with whom we shared the group session in
this request.
"""
room_id = attr.ib(type=str)
users_shared_with = attr.ib(type=set)
@classmethod
@verify(Schemas.empty, ShareGroupSessionError)
def from_dict(
cls,
_, # type: Dict[Any, Any]
room_id, # type: str
users_shared_with # type: Set[Tuple[str, str]]
):
# type: (...) -> Union[ShareGroupSessionResponse, ErrorResponse]
"""Create a response from the json dict the server returns.
Args:
parsed_dict (Dict): The dict containing the raw json response.
room_id (str): The room id of the room to which the group session
belongs to.
users_shared_with (Set[Tuple[str, str]]): A set containing a tuple
of user id device id pairs with whom we shared the group
session in this request.
"""
return cls(room_id, users_shared_with)
class RoomTypingResponse(_EmptyResponseWithRoomId):
"""A response representing a successful room typing request."""
@staticmethod
def create_error(parsed_dict, room_id):
return RoomTypingError.from_dict(parsed_dict, room_id)
class RoomReadMarkersResponse(_EmptyResponseWithRoomId):
"""A response representing a successful room read markers request."""
@staticmethod
def create_error(parsed_dict, room_id):
return RoomTypingError.from_dict(parsed_dict, room_id)
@attr.s
class DeleteDevicesAuthResponse(Response):
session = attr.ib(type=str)
flows = attr.ib(type=Dict)
params = attr.ib(type=Dict)
@classmethod
@verify(Schemas.delete_devices, DeleteDevicesError)
def from_dict(
cls,
parsed_dict # type: Dict[Any, Any]
):
# type: (...) -> Union[DeleteDevicesAuthResponse, ErrorResponse]
return cls(
parsed_dict["session"],
parsed_dict["flows"],
parsed_dict["params"]
)
class DeleteDevicesResponse(EmptyResponse):
@staticmethod
def create_error(parsed_dict):
return DeleteDevicesError.from_dict(parsed_dict)
@attr.s
class RoomMessagesResponse(Response):
room_id = attr.ib(type=str)
chunk = attr.ib(type=List[Union[Event, BadEventType]])
start = attr.ib(type=str)
end = attr.ib(type=str)
@classmethod
@verify(Schemas.room_messages, RoomMessagesError)
def from_dict(
cls,
parsed_dict, # type: Dict[Any, Any]
room_id # type: str
):
# type: (...) -> Union[RoomMessagesResponse, ErrorResponse]
chunk = [] # type: List[Union[Event, BadEventType]]
_, chunk = SyncResponse._get_room_events(parsed_dict["chunk"])
return cls(room_id, chunk, parsed_dict["start"], parsed_dict["end"])
@attr.s
class RoomIdResponse(Response):
room_id = attr.ib(type=str)
@staticmethod
def create_error(parsed_dict):
return ErrorResponse.from_dict(parsed_dict)
@classmethod
def from_dict(cls, parsed_dict):
# type: (Dict[Any, Any]) -> Union[RoomIdResponse, ErrorResponse]
try:
validate_json(parsed_dict, Schemas.room_id)
except (SchemaError, ValidationError):
return cls.create_error(parsed_dict)
return cls(parsed_dict["room_id"])
class JoinResponse(RoomIdResponse):
@staticmethod
def create_error(parsed_dict):
return JoinError.from_dict(parsed_dict)
class RoomLeaveResponse(EmptyResponse):
@staticmethod
def create_error(parsed_dict):
return RoomLeaveError.from_dict(parsed_dict)
class RoomForgetResponse(_EmptyResponseWithRoomId):
"""Response representing a successful forget room request."""
@staticmethod
def create_error(parsed_dict, room_id):
return RoomForgetError.from_dict(parsed_dict, room_id)
@attr.s
class KeysUploadResponse(Response):
curve25519_count = attr.ib(type=int)
signed_curve25519_count = attr.ib(type=int)
@classmethod
@verify(Schemas.keys_upload, KeysUploadError)
def from_dict(cls, parsed_dict):
# type: (Dict[Any, Any]) -> Union[KeysUploadResponse, ErrorResponse]
counts = parsed_dict["one_time_key_counts"]
return cls(counts["curve25519"], counts["signed_curve25519"])
@attr.s
class KeysQueryResponse(Response):
device_keys = attr.ib(type=Dict)
failures = attr.ib(type=Dict)
changed = attr.ib(
type=Dict[str, Dict[str, Any]],
init=False,
factory=dict
)
@classmethod
@verify(Schemas.keys_query, KeysQueryError)
def from_dict(cls, parsed_dict):
# type: (Dict[Any, Any]) -> Union[KeysQueryResponse, ErrorResponse]
device_keys = parsed_dict["device_keys"]
failures = parsed_dict["failures"]
return cls(device_keys, failures)
@attr.s
class KeysClaimResponse(Response):
one_time_keys = attr.ib(type=Dict[Any, Any])
failures = attr.ib(type=Dict[Any, Any])
room_id = attr.ib(type=str, default="")
@classmethod
@verify(Schemas.keys_claim, KeysClaimError)
def from_dict(
cls,
parsed_dict, # type: Dict[Any, Any]
room_id="" # type: str
):
# type: (...) -> Union[KeysClaimResponse, ErrorResponse]
one_time_keys = parsed_dict["one_time_keys"]
failures = parsed_dict["failures"]
return cls(one_time_keys, failures, room_id)
@attr.s
class DevicesResponse(Response):
devices = attr.ib(type=List[Device])
@classmethod
@verify(Schemas.devices, DevicesError)
def from_dict(cls, parsed_dict):
# type: (Dict[Any, Any]) -> Union[DevicesResponse, ErrorResponse]
devices = []
for device_dict in parsed_dict["devices"]:
try:
device = Device.from_dict(device_dict)
except ValueError:
continue
devices.append(device)
return cls(devices)
@attr.s
class RoomKeyRequestError(ErrorResponse):
"""Response representing a failed room key request."""
pass
@attr.s
class RoomKeyRequestResponse(Response):
"""Response representing a successful room key request.
Attributes:
request_id (str): The id of the that uniquely identifies this key
request that was requested, if we receive a to_device event it will
contain the same request id.
session_id (str): The id of the session that we requested.
room_id (str): The id of the room that the session belongs to.
algorithm (str): The encryption algorithm of the session.
"""
request_id = attr.ib(type=str)
session_id = attr.ib(type=str)
room_id = attr.ib(type=str)
algorithm = attr.ib(type=str)
@classmethod
@verify(Schemas.empty, RoomKeyRequestError, False)
def from_dict(cls, _, request_id, session_id, room_id, algorithm):
"""Create a RoomKeyRequestResponse from a json response.
Args:
parsed_dict (Dict): The dictionary containing the json response.
request_id (str): The id of that uniquely identifies this key
request that was requested, if we receive a to_device event
it will contain the same request id.
session_id (str): The id of the session that we requested.
room_id (str): The id of the room that the session belongs to.
algorithm (str): The encryption algorithm of the session.
"""
return cls(request_id, session_id, room_id, algorithm)
class UpdateDeviceResponse(EmptyResponse):
@staticmethod
def create_error(parsed_dict):
return UpdateDeviceError.from_dict(parsed_dict)
@attr.s
class ProfileGetResponse(Response):
"""Response representing a successful get profile request.
Attributes:
displayname (str, optional): The display name of the user.
None if the user doesn't have a display name.
avatar_url (str, optional): The matrix content URI for the user's
avatar. None if the user doesn't have an avatar.
other_info (dict): Contains any other information returned for the
user's profile.
"""
displayname = attr.ib(type=Optional[str], default=None)
avatar_url = attr.ib(type=Optional[str], default=None)
other_info = attr.ib(type=Dict[Any, Any], factory=dict)
def __str__(self):
# type: () -> str
return "Display name: {}, avatar URL: {}, other info: {}".format(
self.displayname,
self.avatar_url,
self.other_info,
)
@classmethod
@verify(Schemas.get_profile, ProfileGetError)
def from_dict(cls, parsed_dict):
# type: (Dict[Any, Any]) -> Union[ProfileGetResponse, ErrorResponse]
return cls(
parsed_dict.get("displayname"),
parsed_dict.get("avatar_url"),
{k: v for k, v in parsed_dict.items()
if k not in ("displayname", "avatar_url")},
)
@attr.s
class ProfileGetDisplayNameResponse(Response):
"""Response representing a successful get display name request.
Attributes:
displayname (str, optional): The display name of the user.
None if the user doesn't have a display name.
"""
displayname = attr.ib(type=Optional[str], default=None)
def __str__(self):
# type: () -> str
return "Display name: {}".format(self.displayname)
@classmethod
@verify(Schemas.get_displayname, ProfileGetDisplayNameError)
def from_dict(
cls,
parsed_dict # type: (Dict[Any, Any])
):
# type: (...) -> Union[ProfileGetDisplayNameResponse, ErrorResponse]
return cls(parsed_dict.get("displayname"))
class ProfileSetDisplayNameResponse(EmptyResponse):
@staticmethod
def create_error(parsed_dict):
return ProfileSetDisplayNameError.from_dict(parsed_dict)
@attr.s
class ProfileGetAvatarResponse(Response):
"""Response representing a successful get avatar request.
Attributes:
avatar_url (str, optional): The matrix content URI for the user's
avatar. None if the user doesn't have an avatar.
"""
avatar_url = attr.ib(type=Optional[str], default=None)
def __str__(self):
# type: () -> str
return "Avatar URL: {}".format(self.avatar_url)
@classmethod
@verify(Schemas.get_avatar, ProfileGetAvatarError)
def from_dict(
cls,
parsed_dict # type: (Dict[Any, Any])
):
# type: (...) -> Union[ProfileGetAvatarResponse, ErrorResponse]
return cls(parsed_dict.get("avatar_url"))
class ProfileSetAvatarResponse(EmptyResponse):
@staticmethod
def create_error(parsed_dict):
return ProfileSetAvatarError.from_dict(parsed_dict)
@attr.s
class ToDeviceError(ErrorResponse):
"""Response representing a unsuccessful room key request."""
to_device_message = attr.ib(default=None)
@classmethod
def from_dict(cls, parsed_dict, message):
try:
validate_json(parsed_dict, Schemas.error)
except (SchemaError, ValidationError):
return cls("unknown error", None, message)
return cls(parsed_dict["error"], parsed_dict["errcode"], message)
@attr.s
class ToDeviceResponse(Response):
"""Response representing a successful room key request."""
to_device_message = attr.ib()
@classmethod
@verify(Schemas.empty, ToDeviceError)
def from_dict(cls, parsed_dict, message):
"""Create a ToDeviceResponse from a json response."""
return cls(message)
@attr.s
class RoomContextError(_ErrorWithRoomId):
"""Response representing a unsuccessful room context request."""
@attr.s
class RoomContextResponse(Response):
"""Room event context response.
This Response holds a number of events that happened just before and after
a specified event.
Attributes:
room_id(str): The room id of the room which the events belong to.
start(str): A token that can be used to paginate backwards with.
end(str): A token that can be used to paginate forwards with.
events_before(List[Event]): A list of room events that happened just
before the requested event, in reverse-chronological order.
event(Event): Details of the requested event.
events_after(List[Event]): A list of room events that happened just
after the requested event, in chronological order.
state(List[Event]): The state of the room at the last event returned.
"""
room_id = attr.ib(type=str)
start = attr.ib(type=str)
end = attr.ib(type=str)
event = attr.ib()
events_before = attr.ib(type=List[Union[Event, BadEventType]])
events_after = attr.ib(type=List[Union[Event, BadEventType]])
state = attr.ib(type=List[Union[Event, BadEventType]])
@classmethod
@verify(Schemas.room_context, RoomContextError)
def from_dict(
cls,
parsed_dict, # Dict[Any, Any]
room_id # str
):
# type: (...) -> Union[RoomContextResponse, ErrorResponse]
_, events_before = SyncResponse._get_room_events(
parsed_dict["events_before"]
)
_, events_after = SyncResponse._get_room_events(
parsed_dict["events_after"]
)
event = Event.parse_event(parsed_dict["event"])
_, state = SyncResponse._get_room_events(
parsed_dict["state"]
)
return cls(room_id, parsed_dict["start"], parsed_dict["end"],
event, events_before, events_after, state)
@attr.s
class _SyncResponse(Response):
next_batch = attr.ib(type=str)
rooms = attr.ib(type=Rooms)
device_key_count = attr.ib(type=DeviceOneTimeKeyCount)
device_list = attr.ib(type=DeviceList)
to_device_events = attr.ib(type=List[ToDeviceEvent])
def __str__(self):
# type: () -> str
room_messages = []
for room_id, room_info in self.rooms.join.items():
room_header = " Messages for room {}:\n ".format(room_id)
messages = []
for event in room_info.timeline.events:
messages.append(str(event))
room_message = room_header + "\n ".join(messages)
room_messages.append(room_message)
body = "\n".join(room_messages)
string = ("Sync response until batch: {}:\n{}").format(
self.next_batch, body
)
return string
@staticmethod
def _get_room_events(
parsed_dict, # type: List[Dict[Any, Any]]
max_events=0 # type: int
):
# type: (...) -> Tuple[int, List[Union[Event, BadEventType]]]
events = [] # type: List[Union[Event, BadEventType]]
counter = 0
for counter, event_dict in enumerate(parsed_dict, 1):
event = Event.parse_event(event_dict)
if event:
events.append(event)
if max_events > 0 and counter >= max_events:
break
return counter, events
@staticmethod
def _get_to_device(parsed_dict):
# type: (Dict[Any, Any]) -> List[ToDeviceEvent]
events = [] # type: List[ToDeviceEvent]
for event_dict in parsed_dict["events"]:
event = ToDeviceEvent.parse_event(event_dict)
if event:
events.append(event)
return events
@staticmethod
def _get_timeline(parsed_dict, max_events=0):
# type: (Dict[Any, Any], int) -> Tuple[int, Timeline]
validate_json(parsed_dict, Schemas.room_timeline)
counter, events = _SyncResponse._get_room_events(
parsed_dict["events"],
max_events
)
return counter, Timeline(
events, parsed_dict["limited"], parsed_dict["prev_batch"]
)
@staticmethod
def _get_state(parsed_dict, max_events=0):
validate_json(parsed_dict, Schemas.room_state)
counter, events = _SyncResponse._get_room_events(
parsed_dict["events"],
max_events
)
return counter, events
@staticmethod
def _get_invite_state(parsed_dict):
validate_json(parsed_dict, Schemas.room_state)
events = []
for event_dict in parsed_dict["events"]:
event = InviteEvent.parse_event(event_dict)
if event:
events.append(event)
return events
@staticmethod
def _get_ephemeral_events(parsed_dict):
events = []
for event_dict in parsed_dict:
event = EphemeralEvent.parse_event(event_dict)
if event:
events.append(event)
return events
@staticmethod
def _get_join_info(
state_events, # type: List[Any]
timeline_events, # type: List[Any]
prev_batch, # type: str
limited, # type: bool
ephemeral_events, # type: List[Any]
summary_events, # type: Dict[str, Any]
account_data_events, # type: List[Any]
max_events=0 # type: int
):
# type: (...) -> Tuple[RoomInfo, Optional[RoomInfo]]
counter, state = _SyncResponse._get_room_events(
state_events,
max_events
)
unhandled_state = state_events[counter:]
timeline_max = max_events - counter
if timeline_max <= 0 and max_events > 0:
timeline = Timeline(
[],
limited,
prev_batch,
)
counter = 0
else:
counter, events = _SyncResponse._get_room_events(
timeline_events, timeline_max
)
timeline = Timeline(events, limited, prev_batch)
unhandled_timeline = Timeline(
timeline_events[counter:],
limited,
prev_batch
)
ephemeral_event_list = _SyncResponse._get_ephemeral_events(
ephemeral_events
)
unhandled_info = None
if unhandled_timeline.events or unhandled_state:
unhandled_info = RoomInfo(
unhandled_timeline,
unhandled_state,
[],
[]
)
summary = RoomSummary(
summary_events.get("m.invited_member_count", None),
summary_events.get("m.joined_member_count", None),
summary_events.get("m.heroes", [])
)
account_data = RoomInfo.parse_account_data(account_data_events)
join_info = RoomInfo(
timeline,
state,
ephemeral_event_list,
account_data,
summary,
)
return join_info, unhandled_info
@staticmethod
def _get_room_info(parsed_dict, max_events=0):
# type: (Dict[Any, Any], int) -> Tuple[Rooms, Dict[str, RoomInfo]]
joined_rooms = {
key: None for key in parsed_dict["join"].keys()
} # type: Dict[str, Optional[RoomInfo]]
invited_rooms = {} # type: Dict[str, InviteInfo]
left_rooms = {} # type: Dict[str, RoomInfo]
unhandled_rooms = {}
for room_id, room_dict in parsed_dict["invite"].items():
state = _SyncResponse._get_invite_state(room_dict["invite_state"])
invite_info = InviteInfo(state)
invited_rooms[room_id] = invite_info
for room_id, room_dict in parsed_dict["leave"].items():
_, state = _SyncResponse._get_state(room_dict["state"])
_, timeline = _SyncResponse._get_timeline(room_dict["timeline"])
leave_info = RoomInfo(timeline, state, [], [])
left_rooms[room_id] = leave_info
for room_id, room_dict in parsed_dict["join"].items():
join_info, unhandled_info = _SyncResponse._get_join_info(
room_dict["state"]["events"],
room_dict["timeline"]["events"],
room_dict["timeline"]["prev_batch"],
room_dict["timeline"]["limited"],
room_dict["ephemeral"]["events"],
room_dict.get("summary", {}),
room_dict["account_data"]["events"],
max_events
)
if unhandled_info:
unhandled_rooms[room_id] = unhandled_info
joined_rooms[room_id] = join_info
return Rooms(invited_rooms, joined_rooms, left_rooms), unhandled_rooms
@classmethod
@verify(Schemas.sync, SyncError, False)
def from_dict(
cls,
parsed_dict, # type: Dict[Any, Any]
max_events=0, # type: int
):
# type: (...) -> Union[SyncType, ErrorResponse]
to_device = cls._get_to_device(parsed_dict["to_device"])
key_count_dict = parsed_dict["device_one_time_keys_count"]
key_count = DeviceOneTimeKeyCount(
key_count_dict["curve25519"],
key_count_dict["signed_curve25519"]
)
devices = DeviceList(
parsed_dict["device_lists"]["changed"],
parsed_dict["device_lists"]["left"],
)
rooms, unhandled_rooms = _SyncResponse._get_room_info(
parsed_dict["rooms"], max_events)
if unhandled_rooms:
return PartialSyncResponse(
parsed_dict["next_batch"],
rooms,
key_count,
devices,
to_device,
unhandled_rooms,
)
return SyncResponse(
parsed_dict["next_batch"],
rooms,
key_count,
devices,
to_device,
)
class SyncResponse(_SyncResponse):
pass
@attr.s
class PartialSyncResponse(_SyncResponse):
unhandled_rooms = attr.ib(type=Dict[str, RoomInfo])
def next_part(self, max_events=0):
# type: (int) -> SyncType
unhandled_rooms = {}
joined_rooms = {}
for room_id, room_info in self.unhandled_rooms.items():
join_info, unhandled_info = _SyncResponse._get_join_info(
room_info.state,
room_info.timeline.events,
room_info.timeline.prev_batch,
room_info.timeline.limited,
[],
{},
[],
max_events
)
if unhandled_info:
unhandled_rooms[room_id] = unhandled_info
joined_rooms[room_id] = join_info
new_rooms = Rooms({}, joined_rooms, {})
if unhandled_rooms:
next_response = PartialSyncResponse(
self.next_batch,
new_rooms,
self.device_key_count,
DeviceList([], []),
[],
unhandled_rooms,
) # type: SyncType
else:
next_response = SyncResponse(
self.next_batch,
new_rooms,
self.device_key_count,
DeviceList([], []),
[],
)
if self.uuid:
next_response.uuid = self.uuid
if self.start_time and self.end_time:
next_response.start_time = self.start_time
next_response.end_time = self.end_time
return next_response
SyncType = Union[SyncResponse, PartialSyncResponse]
|
frase = 'Curso em Vídeo Python'
print(frase.replace('Python','Android'))
|
import statistics
from db_model import *
def get_user_by_id(user_id):
return User.get(User.id == user_id)
def get_movie_by_id(movie_id):
return Movie.get(Movie.id == movie_id)
def get_movie_rating_by_user(user_id):
return list(Rating.select().where(Rating.user_id == user_id))
def get_user_who_rate_movie(movie_id):
return list(Rating.select().where(Rating.movie_id == movie_id))
def get_user_movie_rating(user_id, movie_id):
try:
return Rating.get(Rating.user_id == user_id, Rating.movie_id == movie_id).rating
except DoesNotExist:
return 'Unknown'
def get_all_movie():
return list(Movie.select())
def get_movie_average_rating(movie_id):
try:
return AverageRating.get(AverageRating.movie_id == movie_id).rating
except DoesNotExist:
return 'Unknown'
def is_user_watched(user, movies):
"""check whether user has wathed movies
Args:
user: int user id
movies: list list of movie ids.
"""
try:
Rating.get(Rating.user_id == user, Rating.movie_id << movies)
return True
except DoesNotExist:
return False
def get_user_watched_two_movies(movie1_id, movie2_id):
"""return the user who have watched both movies"""
a = list(Rating.select(Rating.user_id, fn.COUNT(Rating.movie_id).alias('watched')).where(
Rating.movie_id << [movie1_id, movie2_id]).group_by(Rating.user_id))
return [i.user_id for i in a if i.watched == 2]
def get_two_movies_average_rating(movie1_id, movie2_id, threshold=50):
"""return the average rating for two movies, based on the users who have watched both of the movies"""
users = get_user_watched_two_movies(movie1_id, movie2_id)
if users and len(users) > threshold:
ratings1 = Rating.select(Rating.rating).where(Rating.movie_id == movie1_id, Rating.user_id << users)
ratings2 = Rating.select(Rating.rating).where(Rating.movie_id == movie2_id, Rating.user_id << users)
return statistics.mean([i.rating for i in ratings1]), statistics.mean([i.rating for i in ratings2]), len(users)
else:
return 0, 0, 0
|
import pygame, sys, math, random
class Ball():
def __init__(self, image, speed=[5,5], startPos=[0,0]):
self.images= [
pygame.image.load("Ball/ball.png"),
pygame.image.load("Ball/ballA.png"),
pygame.image.load("Ball/ballB.png"),
pygame.image.load("Ball/ballAni1.png"),
pygame.image.load("Ball/ballAni3.png"),
pygame.image.load("Ball/ballAni4.png"),
pygame.image.load("Ball/ballAni5.png"),
pygame.image.load("Ball/ballAni6.png"),
pygame.image.load("Ball/ballAni7.png"),
pygame.image.load("Ball/ballAni8.png"),
pygame.image.load("Ball/ballAni9.png"),
pygame.image.load("Ball/ballAni10.png"),
pygame.image.load("Ball/ballAni11.png"),
]
self.image= self.images[0]
self.rect = self.image.get_rect(center = startPos)
self.speedx = speed[0]
self.speedy = speed[1]
self.speed = [self.speedx, self.speedy]
#self.rect = self.rect.move(startPos)
self.radius = (self.rect.width/2 + self.rect.height/2)/2
self.didBounceX = False
self.didBounceY = False
self.speedChange = .2
self.living = True
self.dying = False
self.amount = 2
self.frame = 0
self.frameMax = len(self.images) -1
self.frameTimer = 0
self.frameTimerMax = 60/6/len(self.images)
self.startPos = startPos
self.startSpeed = speed
self.owner = 0
def getDist(self, pt):
x1 = self.rect.centerx
y1 = self.rect.centery
x2 = pt[0]
y2 = pt[1]
return math.sqrt((x2-x1)**2 + (y2-y1)**2)
def move(self):
self.speed = [self.speedx, self.speedy]
self.rect = self.rect.move(self.speed)
def bounceWall(self, size):
width = size[0]
height = size[1]
if self.rect.left < 0 or self.rect.right > width:
if not self.didBounceX:
self.speedx = -self.speedx
self.didBounceX = True
if self.rect.top < 0 or self.rect.bottom > height:
if not self.didBounceY:
self.speedy = -self.speedy
self.didBounceY = True
self.dying = True
self.speedx = 0
self.speedy = 0
self.owner = 0
def update(self, size):
self.didBounceX = False
self.didBounceY = False
self.move()
self.bounceWall(size)
if self.owner == 0:
self.image = self.images[0]
if self.owner == 1:
self.image = self.images[1]
if self.owner == 2:
self.image = self.images[2]
if self.dying == True:
if self.frameTimer < self.frameTimerMax :
self.frameTimer += 1
else:
self.frameTimer = 0
self.frame += 1
if self.frame > self.frameMax:
self.living = False
self.dying = False
self.frame = 0
spawnList = [300,650]
if self.owner == 0:
self.rect.center = [random.randint(200,1000),random.choice(spawnList)]
if self.owner == 1:
self.rect.center = [random.randint(200,1000),300]
if self.owner == 2:
self.rect.center = [random.randint(200,1000),650]
self.speedx = self.startSpeed[0]
self.speedy = self.startSpeed[1]
self.image = self.images[self.frame]
def rktcollide(self, other):
if self.rect.right > other.rect.left:
if self.rect.left < other.rect.right:
if self.rect.top < other.rect.bottom:
if self.rect.bottom > other.rect.top:
if not self.didBounceX:
if self.speedx > 1: #right
if self.rect.centerx < other.rect.centerx:
self.speedx = -self.speedx
self.didBounceX = True
if self.speedx < 1: #left
if self.rect.centerx > other.rect.centerx:
self.speedx = -self.speedx
self.didBounceX = True
if not self.didBounceY:
if self.speedy > 1: #down
if self.rect.centery < other.rect.centery:
self.speedy = -self.speedy
self.didBounceY = True
if self.speedy < 1: #up
if self.rect.centery > other.rect.centery:
self.speedy = -self.speedy
self.didBounceY = True
self.speedx += random.randint(-2, 2)/10.0
self.speedy += random.randint(-2, 2)/10.0
return True
return False
def blockcollide(self, other):
if not other.dying:
if self.rect.right > other.rect.left:
if self.rect.left < other.rect.right:
if self.rect.top < other.rect.bottom:
if self.rect.bottom > other.rect.top:
if self.radius+other.radius > self.getDist(other.rect.center):
if not self.didBounceX:
if self.speedx > 1: #right
if self.rect.centerx < other.rect.centerx:
self.speedx = -self.speedx
self.didBounceX = True
if self.speedx < 1: #left
if self.rect.centerx > other.rect.centerx:
self.speedx = -self.speedx
self.didBounceX = True
if not self.didBounceY:
if self.speedy > 1: #down
if self.rect.centery < other.rect.centery:
self.speedy = -self.speedy
self.didBounceY = True
if self.speedy < 1: #up
if self.rect.centery > other.rect.centery:
self.speedy = -self.speedy
self.didBounceY = True
self.speedx += random.randint(-3, 3)/10.0
self.speedy += random.randint(-3, 3)/10.0
return True
return False
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class TunnelConnectionHealth(Model):
"""VirtualNetworkGatewayConnection properties.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar tunnel: Tunnel name.
:vartype tunnel: str
:ivar connection_status: Virtual network Gateway connection status.
Possible values include: 'Unknown', 'Connecting', 'Connected',
'NotConnected'
:vartype connection_status: str or
~azure.mgmt.network.v2017_06_01.models.VirtualNetworkGatewayConnectionStatus
:ivar ingress_bytes_transferred: The Ingress Bytes Transferred in this
connection
:vartype ingress_bytes_transferred: long
:ivar egress_bytes_transferred: The Egress Bytes Transferred in this
connection
:vartype egress_bytes_transferred: long
:ivar last_connection_established_utc_time: The time at which connection
was established in Utc format.
:vartype last_connection_established_utc_time: str
"""
_validation = {
'tunnel': {'readonly': True},
'connection_status': {'readonly': True},
'ingress_bytes_transferred': {'readonly': True},
'egress_bytes_transferred': {'readonly': True},
'last_connection_established_utc_time': {'readonly': True},
}
_attribute_map = {
'tunnel': {'key': 'tunnel', 'type': 'str'},
'connection_status': {'key': 'connectionStatus', 'type': 'str'},
'ingress_bytes_transferred': {'key': 'ingressBytesTransferred', 'type': 'long'},
'egress_bytes_transferred': {'key': 'egressBytesTransferred', 'type': 'long'},
'last_connection_established_utc_time': {'key': 'lastConnectionEstablishedUtcTime', 'type': 'str'},
}
def __init__(self, **kwargs):
super(TunnelConnectionHealth, self).__init__(**kwargs)
self.tunnel = None
self.connection_status = None
self.ingress_bytes_transferred = None
self.egress_bytes_transferred = None
self.last_connection_established_utc_time = None
|
# pylint: disable-msg=E1101,W0612
import operator
from datetime import datetime
import nose
from numpy import nan
import numpy as np
import pandas as pd
dec = np.testing.dec
from pandas.util.testing import (assert_almost_equal, assert_series_equal,
assert_frame_equal, assert_panel_equal, assertRaisesRegexp)
from numpy.testing import assert_equal
from pandas import Series, DataFrame, bdate_range, Panel
from pandas.core.datetools import BDay
from pandas.core.index import Index
from pandas.tseries.index import DatetimeIndex
import pandas.core.datetools as datetools
from pandas.core.common import isnull
import pandas.util.testing as tm
from pandas.compat import range, lrange, cPickle as pickle, StringIO, lrange
from pandas import compat
import pandas.sparse.frame as spf
from pandas._sparse import BlockIndex, IntIndex
from pandas.sparse.api import (SparseSeries, SparseTimeSeries,
SparseDataFrame, SparsePanel,
SparseArray)
import pandas.tests.test_frame as test_frame
import pandas.tests.test_panel as test_panel
import pandas.tests.test_series as test_series
from .test_array import assert_sp_array_equal
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
def _test_data1():
# nan-based
arr = np.arange(20, dtype=float)
index = np.arange(20)
arr[:2] = nan
arr[5:10] = nan
arr[-3:] = nan
return arr, index
def _test_data2():
# nan-based
arr = np.arange(15, dtype=float)
index = np.arange(15)
arr[7:12] = nan
arr[-1:] = nan
return arr, index
def _test_data1_zero():
# zero-based
arr, index = _test_data1()
arr[np.isnan(arr)] = 0
return arr, index
def _test_data2_zero():
# zero-based
arr, index = _test_data2()
arr[np.isnan(arr)] = 0
return arr, index
def assert_sp_series_equal(a, b, exact_indices=True):
assert(a.index.equals(b.index))
assert_sp_array_equal(a, b)
def assert_sp_frame_equal(left, right, exact_indices=True):
"""
exact: Series SparseIndex objects must be exactly the same, otherwise just
compare dense representations
"""
for col, series in compat.iteritems(left):
assert(col in right)
# trade-off?
if exact_indices:
assert_sp_series_equal(series, right[col])
else:
assert_series_equal(series.to_dense(), right[col].to_dense())
assert_almost_equal(left.default_fill_value,
right.default_fill_value)
# do I care?
# assert(left.default_kind == right.default_kind)
for col in right:
assert(col in left)
def assert_sp_panel_equal(left, right, exact_indices=True):
for item, frame in compat.iteritems(left):
assert(item in right)
# trade-off?
assert_sp_frame_equal(frame, right[item], exact_indices=exact_indices)
assert_almost_equal(left.default_fill_value,
right.default_fill_value)
assert(left.default_kind == right.default_kind)
for item in right:
assert(item in left)
class TestSparseSeries(tm.TestCase,
test_series.CheckNameIntegration):
_multiprocess_can_split_ = True
def setUp(self):
arr, index = _test_data1()
date_index = bdate_range('1/1/2011', periods=len(index))
self.bseries = SparseSeries(arr, index=index, kind='block')
self.bseries.name = 'bseries'
self.ts = self.bseries
self.btseries = SparseSeries(arr, index=date_index, kind='block')
self.iseries = SparseSeries(arr, index=index, kind='integer')
arr, index = _test_data2()
self.bseries2 = SparseSeries(arr, index=index, kind='block')
self.iseries2 = SparseSeries(arr, index=index, kind='integer')
arr, index = _test_data1_zero()
self.zbseries = SparseSeries(arr, index=index, kind='block',
fill_value=0)
self.ziseries = SparseSeries(arr, index=index, kind='integer',
fill_value=0)
arr, index = _test_data2_zero()
self.zbseries2 = SparseSeries(arr, index=index, kind='block',
fill_value=0)
self.ziseries2 = SparseSeries(arr, index=index, kind='integer',
fill_value=0)
def test_iteration_and_str(self):
[x for x in self.bseries]
str(self.bseries)
def test_construct_DataFrame_with_sp_series(self):
# it works!
df = DataFrame({'col': self.bseries})
# printing & access
df.iloc[:1]
df['col']
df.dtypes
str(df)
assert_sp_series_equal(df['col'], self.bseries)
# blocking
expected = Series({'col': 'float64:sparse'})
result = df.ftypes
assert_series_equal(expected, result)
def test_series_density(self):
# GH2803
ts = Series(np.random.randn(10))
ts[2:-2] = nan
sts = ts.to_sparse()
density = sts.density # don't die
self.assertEqual(density, 4 / 10.0)
def test_sparse_to_dense(self):
arr, index = _test_data1()
series = self.bseries.to_dense()
assert_equal(series, arr)
series = self.bseries.to_dense(sparse_only=True)
assert_equal(series, arr[np.isfinite(arr)])
series = self.iseries.to_dense()
assert_equal(series, arr)
arr, index = _test_data1_zero()
series = self.zbseries.to_dense()
assert_equal(series, arr)
series = self.ziseries.to_dense()
assert_equal(series, arr)
def test_dense_to_sparse(self):
series = self.bseries.to_dense()
bseries = series.to_sparse(kind='block')
iseries = series.to_sparse(kind='integer')
assert_sp_series_equal(bseries, self.bseries)
assert_sp_series_equal(iseries, self.iseries)
# non-NaN fill value
series = self.zbseries.to_dense()
zbseries = series.to_sparse(kind='block', fill_value=0)
ziseries = series.to_sparse(kind='integer', fill_value=0)
assert_sp_series_equal(zbseries, self.zbseries)
assert_sp_series_equal(ziseries, self.ziseries)
def test_to_dense_preserve_name(self):
assert(self.bseries.name is not None)
result = self.bseries.to_dense()
self.assertEquals(result.name, self.bseries.name)
def test_constructor(self):
# test setup guys
self.assert_(np.isnan(self.bseries.fill_value))
tm.assert_isinstance(self.bseries.sp_index, BlockIndex)
self.assert_(np.isnan(self.iseries.fill_value))
tm.assert_isinstance(self.iseries.sp_index, IntIndex)
self.assertEquals(self.zbseries.fill_value, 0)
assert_equal(self.zbseries.values.values,
self.bseries.to_dense().fillna(0).values)
# pass SparseSeries
s2 = SparseSeries(self.bseries)
s3 = SparseSeries(self.iseries)
s4 = SparseSeries(self.zbseries)
assert_sp_series_equal(s2, self.bseries)
assert_sp_series_equal(s3, self.iseries)
assert_sp_series_equal(s4, self.zbseries)
# Sparse time series works
date_index = bdate_range('1/1/2000', periods=len(self.bseries))
s5 = SparseSeries(self.bseries, index=date_index)
tm.assert_isinstance(s5, SparseTimeSeries)
# pass Series
bseries2 = SparseSeries(self.bseries.to_dense())
assert_equal(self.bseries.sp_values, bseries2.sp_values)
# pass dict?
# don't copy the data by default
values = np.ones(self.bseries.npoints)
sp = SparseSeries(values, sparse_index=self.bseries.sp_index)
sp.sp_values[:5] = 97
self.assertEqual(values[0], 97)
# but can make it copy!
sp = SparseSeries(values, sparse_index=self.bseries.sp_index,
copy=True)
sp.sp_values[:5] = 100
self.assertEqual(values[0], 97)
def test_constructor_scalar(self):
data = 5
sp = SparseSeries(data, np.arange(100))
sp = sp.reindex(np.arange(200))
self.assert_((sp.ix[:99] == data).all())
self.assert_(isnull(sp.ix[100:]).all())
data = np.nan
sp = SparseSeries(data, np.arange(100))
def test_constructor_ndarray(self):
pass
def test_constructor_nonnan(self):
arr = [0, 0, 0, nan, nan]
sp_series = SparseSeries(arr, fill_value=0)
assert_equal(sp_series.values.values, arr)
def test_copy_astype(self):
cop = self.bseries.astype(np.float64)
self.assertIsNot(cop, self.bseries)
self.assertIs(cop.sp_index, self.bseries.sp_index)
self.assertEqual(cop.dtype, np.float64)
cop2 = self.iseries.copy()
assert_sp_series_equal(cop, self.bseries)
assert_sp_series_equal(cop2, self.iseries)
# test that data is copied
cop[:5] = 97
self.assertEqual(cop.sp_values[0], 97)
self.assertNotEqual(self.bseries.sp_values[0], 97)
# correct fill value
zbcop = self.zbseries.copy()
zicop = self.ziseries.copy()
assert_sp_series_equal(zbcop, self.zbseries)
assert_sp_series_equal(zicop, self.ziseries)
# no deep copy
view = self.bseries.copy(deep=False)
view.sp_values[:5] = 5
self.assert_((self.bseries.sp_values[:5] == 5).all())
def test_astype(self):
self.assertRaises(Exception, self.bseries.astype, np.int64)
def test_kind(self):
self.assertEquals(self.bseries.kind, 'block')
self.assertEquals(self.iseries.kind, 'integer')
def test_pickle(self):
def _test_roundtrip(series):
pickled = pickle.dumps(series, protocol=pickle.HIGHEST_PROTOCOL)
unpickled = pickle.loads(pickled)
assert_sp_series_equal(series, unpickled)
assert_series_equal(series.to_dense(), unpickled.to_dense())
self._check_all(_test_roundtrip)
def _check_all(self, check_func):
check_func(self.bseries)
check_func(self.iseries)
check_func(self.zbseries)
check_func(self.ziseries)
def test_getitem(self):
def _check_getitem(sp, dense):
for idx, val in compat.iteritems(dense):
assert_almost_equal(val, sp[idx])
for i in range(len(dense)):
assert_almost_equal(sp[i], dense[i])
# j = np.float64(i)
# assert_almost_equal(sp[j], dense[j])
# API change 1/6/2012
# negative getitem works
# for i in xrange(len(dense)):
# assert_almost_equal(sp[-i], dense[-i])
_check_getitem(self.bseries, self.bseries.to_dense())
_check_getitem(self.btseries, self.btseries.to_dense())
_check_getitem(self.zbseries, self.zbseries.to_dense())
_check_getitem(self.iseries, self.iseries.to_dense())
_check_getitem(self.ziseries, self.ziseries.to_dense())
# exception handling
self.assertRaises(Exception, self.bseries.__getitem__,
len(self.bseries) + 1)
# index not contained
self.assertRaises(Exception, self.btseries.__getitem__,
self.btseries.index[-1] + BDay())
def test_get_get_value(self):
assert_almost_equal(self.bseries.get(10), self.bseries[10])
self.assertIsNone(self.bseries.get(len(self.bseries) + 1))
dt = self.btseries.index[10]
result = self.btseries.get(dt)
expected = self.btseries.to_dense()[dt]
assert_almost_equal(result, expected)
assert_almost_equal(self.bseries.get_value(10), self.bseries[10])
def test_set_value(self):
idx = self.btseries.index[7]
self.btseries.set_value(idx, 0)
self.assertEqual(self.btseries[idx], 0)
self.iseries.set_value('foobar', 0)
self.assertEqual(self.iseries.index[-1], 'foobar')
self.assertEqual(self.iseries['foobar'], 0)
def test_getitem_slice(self):
idx = self.bseries.index
res = self.bseries[::2]
tm.assert_isinstance(res, SparseSeries)
expected = self.bseries.reindex(idx[::2])
assert_sp_series_equal(res, expected)
res = self.bseries[:5]
tm.assert_isinstance(res, SparseSeries)
assert_sp_series_equal(res, self.bseries.reindex(idx[:5]))
res = self.bseries[5:]
assert_sp_series_equal(res, self.bseries.reindex(idx[5:]))
# negative indices
res = self.bseries[:-3]
assert_sp_series_equal(res, self.bseries.reindex(idx[:-3]))
def test_take(self):
def _compare_with_dense(sp):
dense = sp.to_dense()
def _compare(idx):
dense_result = dense.take(idx).values
sparse_result = sp.take(idx)
self.assertIsInstance(sparse_result, SparseSeries)
assert_almost_equal(dense_result, sparse_result.values.values)
_compare([1., 2., 3., 4., 5., 0.])
_compare([7, 2, 9, 0, 4])
_compare([3, 6, 3, 4, 7])
self._check_all(_compare_with_dense)
self.assertRaises(Exception, self.bseries.take,
[0, len(self.bseries) + 1])
# Corner case
sp = SparseSeries(np.ones(10.) * nan)
assert_almost_equal(sp.take([0, 1, 2, 3, 4]), np.repeat(nan, 5))
def test_setitem(self):
self.bseries[5] = 7.
self.assertEqual(self.bseries[5], 7.)
def test_setslice(self):
self.bseries[5:10] = 7.
assert_series_equal(self.bseries[5:10].to_dense(), Series(
7., index=range(5, 10), name=self.bseries.name))
def test_operators(self):
def _check_op(a, b, op):
sp_result = op(a, b)
adense = a.to_dense() if isinstance(a, SparseSeries) else a
bdense = b.to_dense() if isinstance(b, SparseSeries) else b
dense_result = op(adense, bdense)
assert_almost_equal(sp_result.to_dense(), dense_result)
def check(a, b):
_check_op(a, b, operator.add)
_check_op(a, b, operator.sub)
_check_op(a, b, operator.truediv)
_check_op(a, b, operator.floordiv)
_check_op(a, b, operator.mul)
_check_op(a, b, lambda x, y: operator.add(y, x))
_check_op(a, b, lambda x, y: operator.sub(y, x))
_check_op(a, b, lambda x, y: operator.truediv(y, x))
_check_op(a, b, lambda x, y: operator.floordiv(y, x))
_check_op(a, b, lambda x, y: operator.mul(y, x))
# NaN ** 0 = 1 in C?
# _check_op(a, b, operator.pow)
# _check_op(a, b, lambda x, y: operator.pow(y, x))
check(self.bseries, self.bseries)
check(self.iseries, self.iseries)
check(self.bseries, self.iseries)
check(self.bseries, self.bseries2)
check(self.bseries, self.iseries2)
check(self.iseries, self.iseries2)
# scalar value
check(self.bseries, 5)
# zero-based
check(self.zbseries, self.zbseries * 2)
check(self.zbseries, self.zbseries2)
check(self.ziseries, self.ziseries2)
# with dense
result = self.bseries + self.bseries.to_dense()
assert_sp_series_equal(result, self.bseries + self.bseries)
# @dec.knownfailureif(True, 'Known NumPy failer as of 1.5.1')
def test_operators_corner2(self):
raise nose.SkipTest('known failer on numpy 1.5.1')
# NumPy circumvents __r*__ operations
val = np.float64(3.0)
result = val - self.zbseries
assert_sp_series_equal(result, 3 - self.zbseries)
def test_binary_operators(self):
# skipping for now #####
raise nose.SkipTest("skipping sparse binary operators test")
def _check_inplace_op(iop, op):
tmp = self.bseries.copy()
expected = op(tmp, self.bseries)
iop(tmp, self.bseries)
assert_sp_series_equal(tmp, expected)
inplace_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'pow']
for op in inplace_ops:
_check_inplace_op(
getattr(operator, "i%s" % op), getattr(operator, op))
def test_reindex(self):
def _compare_with_series(sps, new_index):
spsre = sps.reindex(new_index)
series = sps.to_dense()
seriesre = series.reindex(new_index)
seriesre = seriesre.to_sparse(fill_value=sps.fill_value)
assert_sp_series_equal(spsre, seriesre)
assert_series_equal(spsre.to_dense(), seriesre.to_dense())
_compare_with_series(self.bseries, self.bseries.index[::2])
_compare_with_series(self.bseries, list(self.bseries.index[::2]))
_compare_with_series(self.bseries, self.bseries.index[:10])
_compare_with_series(self.bseries, self.bseries.index[5:])
_compare_with_series(self.zbseries, self.zbseries.index[::2])
_compare_with_series(self.zbseries, self.zbseries.index[:10])
_compare_with_series(self.zbseries, self.zbseries.index[5:])
# special cases
same_index = self.bseries.reindex(self.bseries.index)
assert_sp_series_equal(self.bseries, same_index)
self.assertIsNot(same_index, self.bseries)
# corner cases
sp = SparseSeries([], index=[])
sp_zero = SparseSeries([], index=[], fill_value=0)
_compare_with_series(sp, np.arange(10))
# with copy=False
reindexed = self.bseries.reindex(self.bseries.index, copy=True)
reindexed.sp_values[:] = 1.
self.assert_((self.bseries.sp_values != 1.).all())
reindexed = self.bseries.reindex(self.bseries.index, copy=False)
reindexed.sp_values[:] = 1.
np.testing.assert_array_equal(self.bseries.sp_values, 1.)
def test_sparse_reindex(self):
length = 10
def _check(values, index1, index2, fill_value):
first_series = SparseSeries(values, sparse_index=index1,
fill_value=fill_value)
reindexed = first_series.sparse_reindex(index2)
self.assertIs(reindexed.sp_index, index2)
int_indices1 = index1.to_int_index().indices
int_indices2 = index2.to_int_index().indices
expected = Series(values, index=int_indices1)
expected = expected.reindex(int_indices2).fillna(fill_value)
assert_almost_equal(expected.values, reindexed.sp_values)
# make sure level argument asserts
expected = expected.reindex(int_indices2).fillna(fill_value)
def _check_with_fill_value(values, first, second, fill_value=nan):
i_index1 = IntIndex(length, first)
i_index2 = IntIndex(length, second)
b_index1 = i_index1.to_block_index()
b_index2 = i_index2.to_block_index()
_check(values, i_index1, i_index2, fill_value)
_check(values, b_index1, b_index2, fill_value)
def _check_all(values, first, second):
_check_with_fill_value(values, first, second, fill_value=nan)
_check_with_fill_value(values, first, second, fill_value=0)
index1 = [2, 4, 5, 6, 8, 9]
values1 = np.arange(6.)
_check_all(values1, index1, [2, 4, 5])
_check_all(values1, index1, [2, 3, 4, 5, 6, 7, 8, 9])
_check_all(values1, index1, [0, 1])
_check_all(values1, index1, [0, 1, 7, 8, 9])
_check_all(values1, index1, [])
first_series = SparseSeries(values1, sparse_index=IntIndex(length,
index1),
fill_value=nan)
with tm.assertRaisesRegexp(TypeError,
'new index must be a SparseIndex'):
reindexed = first_series.sparse_reindex(0)
def test_repr(self):
bsrepr = repr(self.bseries)
isrepr = repr(self.iseries)
def test_iter(self):
pass
def test_truncate(self):
pass
def test_fillna(self):
pass
def test_groupby(self):
pass
def test_reductions(self):
def _compare_with_dense(obj, op):
sparse_result = getattr(obj, op)()
series = obj.to_dense()
dense_result = getattr(series, op)()
self.assertEquals(sparse_result, dense_result)
to_compare = ['count', 'sum', 'mean', 'std', 'var', 'skew']
def _compare_all(obj):
for op in to_compare:
_compare_with_dense(obj, op)
_compare_all(self.bseries)
self.bseries.sp_values[5:10] = np.NaN
_compare_all(self.bseries)
_compare_all(self.zbseries)
self.zbseries.sp_values[5:10] = np.NaN
_compare_all(self.zbseries)
series = self.zbseries.copy()
series.fill_value = 2
_compare_all(series)
nonna = Series(np.random.randn(20)).to_sparse()
_compare_all(nonna)
nonna2 = Series(np.random.randn(20)).to_sparse(fill_value=0)
_compare_all(nonna2)
def test_dropna(self):
sp = SparseSeries([0, 0, 0, nan, nan, 5, 6],
fill_value=0)
sp_valid = sp.valid()
expected = sp.to_dense().valid()
expected = expected[expected != 0]
assert_almost_equal(sp_valid.values, expected.values)
self.assert_(sp_valid.index.equals(expected.index))
self.assertEquals(len(sp_valid.sp_values), 2)
result = self.bseries.dropna()
expected = self.bseries.to_dense().dropna()
self.assertNotIsInstance(result, SparseSeries)
tm.assert_series_equal(result, expected)
def test_homogenize(self):
def _check_matches(indices, expected):
data = {}
for i, idx in enumerate(indices):
data[i] = SparseSeries(idx.to_int_index().indices,
sparse_index=idx)
homogenized = spf.homogenize(data)
for k, v in compat.iteritems(homogenized):
assert(v.sp_index.equals(expected))
indices1 = [BlockIndex(10, [2], [7]),
BlockIndex(10, [1, 6], [3, 4]),
BlockIndex(10, [0], [10])]
expected1 = BlockIndex(10, [2, 6], [2, 3])
_check_matches(indices1, expected1)
indices2 = [BlockIndex(10, [2], [7]),
BlockIndex(10, [2], [7])]
expected2 = indices2[0]
_check_matches(indices2, expected2)
# must have NaN fill value
data = {'a': SparseSeries(np.arange(7), sparse_index=expected2,
fill_value=0)}
assertRaisesRegexp(TypeError, "NaN fill value", spf.homogenize, data)
def test_fill_value_corner(self):
cop = self.zbseries.copy()
cop.fill_value = 0
result = self.bseries / cop
self.assert_(np.isnan(result.fill_value))
cop2 = self.zbseries.copy()
cop2.fill_value = 1
result = cop2 / cop
self.assert_(np.isnan(result.fill_value))
def test_shift(self):
series = SparseSeries([nan, 1., 2., 3., nan, nan],
index=np.arange(6))
shifted = series.shift(0)
self.assertIsNot(shifted, series)
assert_sp_series_equal(shifted, series)
f = lambda s: s.shift(1)
_dense_series_compare(series, f)
f = lambda s: s.shift(-2)
_dense_series_compare(series, f)
series = SparseSeries([nan, 1., 2., 3., nan, nan],
index=bdate_range('1/1/2000', periods=6))
f = lambda s: s.shift(2, freq='B')
_dense_series_compare(series, f)
f = lambda s: s.shift(2, freq=datetools.bday)
_dense_series_compare(series, f)
def test_cumsum(self):
result = self.bseries.cumsum()
expected = self.bseries.to_dense().cumsum()
tm.assert_isinstance(result, SparseSeries)
self.assertEquals(result.name, self.bseries.name)
assert_series_equal(result.to_dense(), expected)
result = self.zbseries.cumsum()
expected = self.zbseries.to_dense().cumsum()
tm.assert_isinstance(result, Series)
assert_series_equal(result, expected)
def test_combine_first(self):
s = self.bseries
result = s[::2].combine_first(s)
result2 = s[::2].combine_first(s.to_dense())
expected = s[::2].to_dense().combine_first(s.to_dense())
expected = expected.to_sparse(fill_value=s.fill_value)
assert_sp_series_equal(result, result2)
assert_sp_series_equal(result, expected)
class TestSparseTimeSeries(tm.TestCase):
pass
class TestSparseDataFrame(tm.TestCase, test_frame.SafeForSparse):
klass = SparseDataFrame
_multiprocess_can_split_ = True
def setUp(self):
self.data = {'A': [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6],
'B': [0, 1, 2, nan, nan, nan, 3, 4, 5, 6],
'C': np.arange(10),
'D': [0, 1, 2, 3, 4, 5, nan, nan, nan, nan]}
self.dates = bdate_range('1/1/2011', periods=10)
self.frame = SparseDataFrame(self.data, index=self.dates)
self.iframe = SparseDataFrame(self.data, index=self.dates,
default_kind='integer')
values = self.frame.values.copy()
values[np.isnan(values)] = 0
self.zframe = SparseDataFrame(values, columns=['A', 'B', 'C', 'D'],
default_fill_value=0,
index=self.dates)
values = self.frame.values.copy()
values[np.isnan(values)] = 2
self.fill_frame = SparseDataFrame(values, columns=['A', 'B', 'C', 'D'],
default_fill_value=2,
index=self.dates)
self.empty = SparseDataFrame()
def test_as_matrix(self):
empty = self.empty.as_matrix()
self.assertEqual(empty.shape, (0, 0))
no_cols = SparseDataFrame(index=np.arange(10))
mat = no_cols.as_matrix()
self.assertEqual(mat.shape, (10, 0))
no_index = SparseDataFrame(columns=np.arange(10))
mat = no_index.as_matrix()
self.assertEqual(mat.shape, (0, 10))
def test_copy(self):
cp = self.frame.copy()
tm.assert_isinstance(cp, SparseDataFrame)
assert_sp_frame_equal(cp, self.frame)
self.assert_(cp.index.is_(self.frame.index))
def test_constructor(self):
for col, series in compat.iteritems(self.frame):
tm.assert_isinstance(series, SparseSeries)
tm.assert_isinstance(self.iframe['A'].sp_index, IntIndex)
# constructed zframe from matrix above
self.assertEquals(self.zframe['A'].fill_value, 0)
assert_almost_equal([0, 0, 0, 0, 1, 2, 3, 4, 5, 6],
self.zframe['A'].values)
# construct no data
sdf = SparseDataFrame(columns=np.arange(10), index=np.arange(10))
for col, series in compat.iteritems(sdf):
tm.assert_isinstance(series, SparseSeries)
# construct from nested dict
data = {}
for c, s in compat.iteritems(self.frame):
data[c] = s.to_dict()
sdf = SparseDataFrame(data)
assert_sp_frame_equal(sdf, self.frame)
# TODO: test data is copied from inputs
# init dict with different index
idx = self.frame.index[:5]
cons = SparseDataFrame(self.frame, index=idx,
columns=self.frame.columns,
default_fill_value=self.frame.default_fill_value,
default_kind=self.frame.default_kind,
copy=True)
reindexed = self.frame.reindex(idx)
assert_sp_frame_equal(cons, reindexed, exact_indices=False)
# assert level parameter breaks reindex
self.assertRaises(TypeError, self.frame.reindex, idx, level=0)
repr(self.frame)
def test_constructor_ndarray(self):
# no index or columns
sp = SparseDataFrame(self.frame.values)
# 1d
sp = SparseDataFrame(self.data['A'], index=self.dates,
columns=['A'])
assert_sp_frame_equal(sp, self.frame.reindex(columns=['A']))
# raise on level argument
self.assertRaises(TypeError, self.frame.reindex, columns=['A'],
level=1)
# wrong length index / columns
assertRaisesRegexp(
ValueError, "^Index length", SparseDataFrame, self.frame.values,
index=self.frame.index[:-1])
assertRaisesRegexp(
ValueError, "^Column length", SparseDataFrame, self.frame.values,
columns=self.frame.columns[:-1])
def test_constructor_empty(self):
sp = SparseDataFrame()
self.assertEqual(len(sp.index), 0)
self.assertEqual(len(sp.columns), 0)
def test_constructor_dataframe(self):
dense = self.frame.to_dense()
sp = SparseDataFrame(dense)
assert_sp_frame_equal(sp, self.frame)
def test_constructor_convert_index_once(self):
arr = np.array([1.5, 2.5, 3.5])
sdf = SparseDataFrame(columns=lrange(4), index=arr)
self.assertTrue(sdf[0].index is sdf[1].index)
def test_constructor_from_series(self):
# GH 2873
x = Series(np.random.randn(10000), name='a')
x = x.to_sparse(fill_value=0)
tm.assert_isinstance(x,SparseSeries)
df = SparseDataFrame(x)
tm.assert_isinstance(df,SparseDataFrame)
x = Series(np.random.randn(10000), name='a')
y = Series(np.random.randn(10000), name='b')
x2 = x.astype(float)
x2.ix[:9998] = np.NaN
x_sparse = x2.to_sparse(fill_value=np.NaN)
# Currently fails too with weird ufunc error
# df1 = SparseDataFrame([x_sparse, y])
y.ix[:9998] = 0
y_sparse = y.to_sparse(fill_value=0)
# without sparse value raises error
# df2 = SparseDataFrame([x2_sparse, y])
def test_dtypes(self):
df = DataFrame(np.random.randn(10000, 4))
df.ix[:9998] = np.nan
sdf = df.to_sparse()
result = sdf.get_dtype_counts()
expected = Series({'float64': 4})
assert_series_equal(result, expected)
def test_str(self):
df = DataFrame(np.random.randn(10000, 4))
df.ix[:9998] = np.nan
sdf = df.to_sparse()
str(sdf)
def test_array_interface(self):
res = np.sqrt(self.frame)
dres = np.sqrt(self.frame.to_dense())
assert_frame_equal(res.to_dense(), dres)
def test_pickle(self):
def _test_roundtrip(frame):
pickled = pickle.dumps(frame, protocol=pickle.HIGHEST_PROTOCOL)
unpickled = pickle.loads(pickled)
assert_sp_frame_equal(frame, unpickled)
_test_roundtrip(SparseDataFrame())
self._check_all(_test_roundtrip)
def test_dense_to_sparse(self):
df = DataFrame({'A': [nan, nan, nan, 1, 2],
'B': [1, 2, nan, nan, nan]})
sdf = df.to_sparse()
tm.assert_isinstance(sdf, SparseDataFrame)
self.assert_(np.isnan(sdf.default_fill_value))
tm.assert_isinstance(sdf['A'].sp_index, BlockIndex)
tm.assert_frame_equal(sdf.to_dense(), df)
sdf = df.to_sparse(kind='integer')
tm.assert_isinstance(sdf['A'].sp_index, IntIndex)
df = DataFrame({'A': [0, 0, 0, 1, 2],
'B': [1, 2, 0, 0, 0]}, dtype=float)
sdf = df.to_sparse(fill_value=0)
self.assertEquals(sdf.default_fill_value, 0)
tm.assert_frame_equal(sdf.to_dense(), df)
def test_density(self):
df = SparseSeries([nan, nan, nan, 0, 1, 2, 3, 4, 5, 6])
self.assertEquals(df.density, 0.7)
def test_sparse_to_dense(self):
pass
def test_sparse_series_ops(self):
import sys
buf = StringIO()
tmp = sys.stderr
sys.stderr = buf
try:
self._check_frame_ops(self.frame)
finally:
sys.stderr = tmp
def test_sparse_series_ops_i(self):
import sys
buf = StringIO()
tmp = sys.stderr
sys.stderr = buf
try:
self._check_frame_ops(self.iframe)
finally:
sys.stderr = tmp
def test_sparse_series_ops_z(self):
import sys
buf = StringIO()
tmp = sys.stderr
sys.stderr = buf
try:
self._check_frame_ops(self.zframe)
finally:
sys.stderr = tmp
def test_sparse_series_ops_fill(self):
import sys
buf = StringIO()
tmp = sys.stderr
sys.stderr = buf
try:
self._check_frame_ops(self.fill_frame)
finally:
sys.stderr = tmp
def _check_frame_ops(self, frame):
fill = frame.default_fill_value
def _compare_to_dense(a, b, da, db, op):
sparse_result = op(a, b)
dense_result = op(da, db)
dense_result = dense_result.to_sparse(fill_value=fill)
assert_sp_frame_equal(sparse_result, dense_result,
exact_indices=False)
if isinstance(a, DataFrame) and isinstance(db, DataFrame):
mixed_result = op(a, db)
tm.assert_isinstance(mixed_result, SparseDataFrame)
assert_sp_frame_equal(mixed_result, sparse_result,
exact_indices=False)
opnames = ['add', 'sub', 'mul', 'truediv', 'floordiv']
ops = [getattr(operator, name) for name in opnames]
fidx = frame.index
# time series operations
series = [frame['A'], frame['B'],
frame['C'], frame['D'],
frame['A'].reindex(fidx[:7]),
frame['A'].reindex(fidx[::2]),
SparseSeries([], index=[])]
for op in ops:
_compare_to_dense(frame, frame[::2], frame.to_dense(),
frame[::2].to_dense(), op)
for s in series:
_compare_to_dense(frame, s, frame.to_dense(),
s.to_dense(), op)
_compare_to_dense(s, frame, s.to_dense(),
frame.to_dense(), op)
# cross-sectional operations
series = [frame.xs(fidx[0]),
frame.xs(fidx[3]),
frame.xs(fidx[5]),
frame.xs(fidx[7]),
frame.xs(fidx[5])[:2]]
for op in ops:
for s in series:
_compare_to_dense(frame, s, frame.to_dense(),
s, op)
_compare_to_dense(s, frame, s,
frame.to_dense(), op)
# it works!
result = self.frame + self.frame.ix[:, ['A', 'B']]
def test_op_corners(self):
empty = self.empty + self.empty
self.assert_(empty.empty)
foo = self.frame + self.empty
tm.assert_isinstance(foo.index, DatetimeIndex)
assert_frame_equal(foo, self.frame * np.nan)
foo = self.empty + self.frame
assert_frame_equal(foo, self.frame * np.nan)
def test_scalar_ops(self):
pass
def test_getitem(self):
# 1585 select multiple columns
sdf = SparseDataFrame(index=[0, 1, 2], columns=['a', 'b', 'c'])
result = sdf[['a', 'b']]
exp = sdf.reindex(columns=['a', 'b'])
assert_sp_frame_equal(result, exp)
self.assertRaises(Exception, sdf.__getitem__, ['a', 'd'])
def test_icol(self):
# 2227
result = self.frame.icol(0)
self.assertTrue(isinstance(result, SparseSeries))
assert_sp_series_equal(result, self.frame['A'])
# preserve sparse index type. #2251
data = {'A': [0, 1]}
iframe = SparseDataFrame(data, default_kind='integer')
self.assertEquals(type(iframe['A'].sp_index),
type(iframe.icol(0).sp_index))
def test_set_value(self):
# ok as the index gets conver to object
frame = self.frame.copy()
res = frame.set_value('foobar', 'B', 1.5)
self.assertEqual(res.index.dtype, 'object')
res = self.frame
res.index = res.index.astype(object)
res = self.frame.set_value('foobar', 'B', 1.5)
self.assertIsNot(res, self.frame)
self.assertEqual(res.index[-1], 'foobar')
self.assertEqual(res.get_value('foobar', 'B'), 1.5)
res2 = res.set_value('foobar', 'qux', 1.5)
self.assertIsNot(res2, res)
self.assert_numpy_array_equal(res2.columns,
list(self.frame.columns) + ['qux'])
self.assertEqual(res2.get_value('foobar', 'qux'), 1.5)
def test_fancy_index_misc(self):
# axis = 0
sliced = self.frame.ix[-2:, :]
expected = self.frame.reindex(index=self.frame.index[-2:])
assert_sp_frame_equal(sliced, expected)
# axis = 1
sliced = self.frame.ix[:, -2:]
expected = self.frame.reindex(columns=self.frame.columns[-2:])
assert_sp_frame_equal(sliced, expected)
def test_getitem_overload(self):
# slicing
sl = self.frame[:20]
assert_sp_frame_equal(sl, self.frame.reindex(self.frame.index[:20]))
# boolean indexing
d = self.frame.index[5]
indexer = self.frame.index > d
subindex = self.frame.index[indexer]
subframe = self.frame[indexer]
self.assert_numpy_array_equal(subindex, subframe.index)
self.assertRaises(Exception, self.frame.__getitem__, indexer[:-1])
def test_setitem(self):
def _check_frame(frame):
N = len(frame)
# insert SparseSeries
frame['E'] = frame['A']
tm.assert_isinstance(frame['E'], SparseSeries)
assert_sp_series_equal(frame['E'], frame['A'])
# insert SparseSeries differently-indexed
to_insert = frame['A'][::2]
frame['E'] = to_insert
expected = to_insert.to_dense().reindex(
frame.index).fillna(to_insert.fill_value)
assert_series_equal(frame['E'].to_dense(), expected)
# insert Series
frame['F'] = frame['A'].to_dense()
tm.assert_isinstance(frame['F'], SparseSeries)
assert_sp_series_equal(frame['F'], frame['A'])
# insert Series differently-indexed
to_insert = frame['A'].to_dense()[::2]
frame['G'] = to_insert
expected = to_insert.reindex(
frame.index).fillna(frame.default_fill_value)
assert_series_equal(frame['G'].to_dense(), expected)
# insert ndarray
frame['H'] = np.random.randn(N)
tm.assert_isinstance(frame['H'], SparseSeries)
to_sparsify = np.random.randn(N)
to_sparsify[N // 2:] = frame.default_fill_value
frame['I'] = to_sparsify
self.assertEquals(len(frame['I'].sp_values), N // 2)
# insert ndarray wrong size
self.assertRaises(Exception, frame.__setitem__, 'foo',
np.random.randn(N - 1))
# scalar value
frame['J'] = 5
self.assertEquals(len(frame['J'].sp_values), N)
self.assert_((frame['J'].sp_values == 5).all())
frame['K'] = frame.default_fill_value
self.assertEquals(len(frame['K'].sp_values), 0)
self._check_all(_check_frame)
def test_setitem_corner(self):
self.frame['a'] = self.frame['B']
assert_sp_series_equal(self.frame['a'], self.frame['B'])
def test_setitem_array(self):
arr = self.frame['B']
self.frame['E'] = arr
assert_sp_series_equal(self.frame['E'], self.frame['B'])
self.frame['F'] = arr[:-1]
index = self.frame.index[:-1]
assert_sp_series_equal(
self.frame['E'].reindex(index), self.frame['F'].reindex(index))
def test_delitem(self):
A = self.frame['A']
C = self.frame['C']
del self.frame['B']
self.assertNotIn('B', self.frame)
assert_sp_series_equal(self.frame['A'], A)
assert_sp_series_equal(self.frame['C'], C)
del self.frame['D']
self.assertNotIn('D', self.frame)
del self.frame['A']
self.assertNotIn('A', self.frame)
def test_set_columns(self):
self.frame.columns = self.frame.columns
self.assertRaises(Exception, setattr, self.frame, 'columns',
self.frame.columns[:-1])
def test_set_index(self):
self.frame.index = self.frame.index
self.assertRaises(Exception, setattr, self.frame, 'index',
self.frame.index[:-1])
def test_append(self):
a = self.frame[:5]
b = self.frame[5:]
appended = a.append(b)
assert_sp_frame_equal(appended, self.frame, exact_indices=False)
a = self.frame.ix[:5, :3]
b = self.frame.ix[5:]
appended = a.append(b)
assert_sp_frame_equal(
appended.ix[:, :3], self.frame.ix[:, :3], exact_indices=False)
def test_apply(self):
applied = self.frame.apply(np.sqrt)
tm.assert_isinstance(applied, SparseDataFrame)
assert_almost_equal(applied.values, np.sqrt(self.frame.values))
applied = self.fill_frame.apply(np.sqrt)
self.assertEqual(applied['A'].fill_value, np.sqrt(2))
# agg / broadcast
broadcasted = self.frame.apply(np.sum, broadcast=True)
tm.assert_isinstance(broadcasted, SparseDataFrame)
assert_frame_equal(broadcasted.to_dense(),
self.frame.to_dense().apply(np.sum, broadcast=True))
self.assertIs(self.empty.apply(np.sqrt), self.empty)
from pandas.core import nanops
applied = self.frame.apply(np.sum)
assert_series_equal(applied,
self.frame.to_dense().apply(nanops.nansum))
def test_apply_nonuq(self):
df_orig = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
df = df_orig.to_sparse()
rs = df.apply(lambda s: s[0], axis=1)
xp = Series([1., 4., 7.], ['a', 'a', 'c'])
assert_series_equal(rs, xp)
# df.T breaks
df = df_orig.T.to_sparse()
rs = df.apply(lambda s: s[0], axis=0)
# no non-unique columns supported in sparse yet
# assert_series_equal(rs, xp)
def test_applymap(self):
# just test that it works
result = self.frame.applymap(lambda x: x * 2)
tm.assert_isinstance(result, SparseDataFrame)
def test_astype(self):
self.assertRaises(Exception, self.frame.astype, np.int64)
def test_fillna(self):
df = self.zframe.reindex(lrange(5))
result = df.fillna(0)
expected = df.to_dense().fillna(0).to_sparse(fill_value=0)
assert_sp_frame_equal(result, expected, exact_indices=False)
result = df.copy()
result.fillna(0, inplace=True)
expected = df.to_dense().fillna(0).to_sparse(fill_value=0)
assert_sp_frame_equal(result, expected, exact_indices=False)
result = df.copy()
result = df['A']
result.fillna(0, inplace=True)
assert_series_equal(result, df['A'].fillna(0))
def test_rename(self):
# just check this works
renamed = self.frame.rename(index=str)
renamed = self.frame.rename(columns=lambda x: '%s%d' % (x, len(x)))
def test_corr(self):
res = self.frame.corr()
assert_frame_equal(res, self.frame.to_dense().corr())
def test_describe(self):
self.frame['foo'] = np.nan
self.frame.get_dtype_counts()
str(self.frame)
desc = self.frame.describe()
def test_join(self):
left = self.frame.ix[:, ['A', 'B']]
right = self.frame.ix[:, ['C', 'D']]
joined = left.join(right)
assert_sp_frame_equal(joined, self.frame, exact_indices=False)
right = self.frame.ix[:, ['B', 'D']]
self.assertRaises(Exception, left.join, right)
with tm.assertRaisesRegexp(ValueError, 'Other Series must have a name'):
self.frame.join(Series(np.random.randn(len(self.frame)),
index=self.frame.index))
def test_reindex(self):
def _check_frame(frame):
index = frame.index
sidx = index[::2]
sidx2 = index[:5]
sparse_result = frame.reindex(sidx)
dense_result = frame.to_dense().reindex(sidx)
assert_frame_equal(sparse_result.to_dense(), dense_result)
assert_frame_equal(frame.reindex(list(sidx)).to_dense(),
dense_result)
sparse_result2 = sparse_result.reindex(index)
dense_result2 = dense_result.reindex(
index).fillna(frame.default_fill_value)
assert_frame_equal(sparse_result2.to_dense(), dense_result2)
# propagate CORRECT fill value
assert_almost_equal(sparse_result.default_fill_value,
frame.default_fill_value)
assert_almost_equal(sparse_result['A'].fill_value,
frame['A'].fill_value)
# length zero
length_zero = frame.reindex([])
self.assertEquals(len(length_zero), 0)
self.assertEquals(len(length_zero.columns), len(frame.columns))
self.assertEquals(len(length_zero['A']), 0)
# frame being reindexed has length zero
length_n = length_zero.reindex(index)
self.assertEquals(len(length_n), len(frame))
self.assertEquals(len(length_n.columns), len(frame.columns))
self.assertEquals(len(length_n['A']), len(frame))
# reindex columns
reindexed = frame.reindex(columns=['A', 'B', 'Z'])
self.assertEquals(len(reindexed.columns), 3)
assert_almost_equal(reindexed['Z'].fill_value,
frame.default_fill_value)
self.assert_(np.isnan(reindexed['Z'].sp_values).all())
_check_frame(self.frame)
_check_frame(self.iframe)
_check_frame(self.zframe)
_check_frame(self.fill_frame)
# with copy=False
reindexed = self.frame.reindex(self.frame.index, copy=False)
reindexed['F'] = reindexed['A']
self.assertIn('F', self.frame)
reindexed = self.frame.reindex(self.frame.index)
reindexed['G'] = reindexed['A']
self.assertNotIn('G', self.frame)
def test_reindex_fill_value(self):
rng = bdate_range('20110110', periods=20)
result = self.zframe.reindex(rng, fill_value=0)
expected = self.zframe.reindex(rng).fillna(0)
assert_sp_frame_equal(result, expected)
def test_take(self):
result = self.frame.take([1, 0, 2], axis=1)
expected = self.frame.reindex(columns=['B', 'A', 'C'])
assert_sp_frame_equal(result, expected)
def test_density(self):
df = SparseDataFrame({'A': [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6],
'B': [0, 1, 2, nan, nan, nan, 3, 4, 5, 6],
'C': np.arange(10),
'D': [0, 1, 2, 3, 4, 5, nan, nan, nan, nan]})
self.assertEquals(df.density, 0.75)
def test_to_dense(self):
def _check(frame):
dense_dm = frame.to_dense()
assert_frame_equal(frame, dense_dm)
self._check_all(_check)
def test_stack_sparse_frame(self):
def _check(frame):
dense_frame = frame.to_dense()
wp = Panel.from_dict({'foo': frame})
from_dense_lp = wp.to_frame()
from_sparse_lp = spf.stack_sparse_frame(frame)
self.assert_numpy_array_equal(from_dense_lp.values,
from_sparse_lp.values)
_check(self.frame)
_check(self.iframe)
# for now
self.assertRaises(Exception, _check, self.zframe)
self.assertRaises(Exception, _check, self.fill_frame)
def test_transpose(self):
def _check(frame):
transposed = frame.T
untransposed = transposed.T
assert_sp_frame_equal(frame, untransposed)
self._check_all(_check)
def test_shift(self):
def _check(frame):
shifted = frame.shift(0)
assert_sp_frame_equal(shifted, frame)
f = lambda s: s.shift(1)
_dense_frame_compare(frame, f)
f = lambda s: s.shift(-2)
_dense_frame_compare(frame, f)
f = lambda s: s.shift(2, freq='B')
_dense_frame_compare(frame, f)
f = lambda s: s.shift(2, freq=datetools.bday)
_dense_frame_compare(frame, f)
self._check_all(_check)
def test_count(self):
result = self.frame.count()
dense_result = self.frame.to_dense().count()
assert_series_equal(result, dense_result)
result = self.frame.count(1)
dense_result = self.frame.to_dense().count(1)
# win32 don't check dtype
assert_series_equal(result, dense_result, check_dtype=False)
def test_cumsum(self):
result = self.frame.cumsum()
expected = self.frame.to_dense().cumsum()
tm.assert_isinstance(result, SparseDataFrame)
assert_frame_equal(result.to_dense(), expected)
def _check_all(self, check_func):
check_func(self.frame)
check_func(self.iframe)
check_func(self.zframe)
check_func(self.fill_frame)
def test_combine_first(self):
df = self.frame
result = df[::2].combine_first(df)
result2 = df[::2].combine_first(df.to_dense())
expected = df[::2].to_dense().combine_first(df.to_dense())
expected = expected.to_sparse(fill_value=df.default_fill_value)
assert_sp_frame_equal(result, result2)
assert_sp_frame_equal(result, expected)
def test_combine_add(self):
df = self.frame.to_dense()
df2 = df.copy()
df2['C'][:3] = np.nan
df['A'][:3] = 5.7
result = df.to_sparse().add(df2.to_sparse(), fill_value=0)
expected = df.add(df2, fill_value=0).to_sparse()
assert_sp_frame_equal(result, expected)
def test_isin(self):
sparse_df = DataFrame({'flag': [1., 0., 1.]}).to_sparse(fill_value=0.)
xp = sparse_df[sparse_df.flag == 1.]
rs = sparse_df[sparse_df.flag.isin([1.])]
assert_frame_equal(xp, rs)
def test_sparse_pow_issue(self):
# 2220
df = SparseDataFrame({'A': [1.1, 3.3], 'B': [2.5, -3.9]})
# note : no error without nan
df = SparseDataFrame({'A': [nan, 0, 1]})
# note that 2 ** df works fine, also df ** 1
result = 1 ** df
r1 = result.take([0], 1)['A']
r2 = result['A']
self.assertEqual(len(r2.sp_values), len(r1.sp_values))
def _dense_series_compare(s, f):
result = f(s)
assert(isinstance(result, SparseSeries))
dense_result = f(s.to_dense())
assert_series_equal(result.to_dense(), dense_result)
def _dense_frame_compare(frame, f):
result = f(frame)
assert(isinstance(frame, SparseDataFrame))
dense_result = f(frame.to_dense()).fillna(frame.default_fill_value)
assert_frame_equal(result.to_dense(), dense_result)
def panel_data1():
index = bdate_range('1/1/2011', periods=8)
return DataFrame({
'A': [nan, nan, nan, 0, 1, 2, 3, 4],
'B': [0, 1, 2, 3, 4, nan, nan, nan],
'C': [0, 1, 2, nan, nan, nan, 3, 4],
'D': [nan, 0, 1, nan, 2, 3, 4, nan]
}, index=index)
def panel_data2():
index = bdate_range('1/1/2011', periods=9)
return DataFrame({
'A': [nan, nan, nan, 0, 1, 2, 3, 4, 5],
'B': [0, 1, 2, 3, 4, 5, nan, nan, nan],
'C': [0, 1, 2, nan, nan, nan, 3, 4, 5],
'D': [nan, 0, 1, nan, 2, 3, 4, 5, nan]
}, index=index)
def panel_data3():
index = bdate_range('1/1/2011', periods=10).shift(-2)
return DataFrame({
'A': [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6],
'B': [0, 1, 2, 3, 4, 5, 6, nan, nan, nan],
'C': [0, 1, 2, nan, nan, nan, 3, 4, 5, 6],
'D': [nan, 0, 1, nan, 2, 3, 4, 5, 6, nan]
}, index=index)
class TestSparsePanel(tm.TestCase,
test_panel.SafeForLongAndSparse,
test_panel.SafeForSparse):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_sp_panel_equal(x, y)
def setUp(self):
self.data_dict = {
'ItemA': panel_data1(),
'ItemB': panel_data2(),
'ItemC': panel_data3(),
'ItemD': panel_data1(),
}
self.panel = SparsePanel(self.data_dict)
@staticmethod
def _test_op(panel, op):
# arithmetic tests
result = op(panel, 1)
assert_sp_frame_equal(result['ItemA'], op(panel['ItemA'], 1))
def test_constructor(self):
self.assertRaises(ValueError, SparsePanel, self.data_dict,
items=['Item0', 'ItemA', 'ItemB'])
with tm.assertRaisesRegexp(TypeError,
"input must be a dict, a 'list' was passed"):
SparsePanel(['a', 'b', 'c'])
def test_from_dict(self):
fd = SparsePanel.from_dict(self.data_dict)
assert_sp_panel_equal(fd, self.panel)
def test_pickle(self):
def _test_roundtrip(panel):
pickled = pickle.dumps(panel, protocol=pickle.HIGHEST_PROTOCOL)
unpickled = pickle.loads(pickled)
tm.assert_isinstance(unpickled.items, Index)
tm.assert_isinstance(unpickled.major_axis, Index)
tm.assert_isinstance(unpickled.minor_axis, Index)
assert_sp_panel_equal(panel, unpickled)
_test_roundtrip(self.panel)
def test_dense_to_sparse(self):
wp = Panel.from_dict(self.data_dict)
dwp = wp.to_sparse()
tm.assert_isinstance(dwp['ItemA']['A'], SparseSeries)
def test_to_dense(self):
dwp = self.panel.to_dense()
dwp2 = Panel.from_dict(self.data_dict)
assert_panel_equal(dwp, dwp2)
def test_to_frame(self):
def _compare_with_dense(panel):
slp = panel.to_frame()
dlp = panel.to_dense().to_frame()
self.assert_numpy_array_equal(slp.values, dlp.values)
self.assert_(slp.index.equals(dlp.index))
_compare_with_dense(self.panel)
_compare_with_dense(self.panel.reindex(items=['ItemA']))
zero_panel = SparsePanel(self.data_dict, default_fill_value=0)
self.assertRaises(Exception, zero_panel.to_frame)
self.assertRaises(Exception, self.panel.to_frame,
filter_observations=False)
def test_long_to_wide_sparse(self):
pass
def test_values(self):
pass
def test_setitem(self):
self.panel['ItemE'] = self.panel['ItemC']
self.panel['ItemF'] = self.panel['ItemC'].to_dense()
assert_sp_frame_equal(self.panel['ItemE'], self.panel['ItemC'])
assert_sp_frame_equal(self.panel['ItemF'], self.panel['ItemC'])
assert_almost_equal(self.panel.items, ['ItemA', 'ItemB', 'ItemC',
'ItemD', 'ItemE', 'ItemF'])
self.assertRaises(Exception, self.panel.__setitem__, 'item6', 1)
def test_set_value(self):
def _check_loc(item, major, minor, val=1.5):
res = self.panel.set_value(item, major, minor, val)
self.assertIsNot(res, self.panel)
self.assertEquals(res.get_value(item, major, minor), val)
_check_loc('ItemA', self.panel.major_axis[4], self.panel.minor_axis[3])
_check_loc('ItemF', self.panel.major_axis[4], self.panel.minor_axis[3])
_check_loc('ItemF', 'foo', self.panel.minor_axis[3])
_check_loc('ItemE', 'foo', 'bar')
def test_delitem_pop(self):
del self.panel['ItemB']
assert_almost_equal(self.panel.items, ['ItemA', 'ItemC', 'ItemD'])
crackle = self.panel['ItemC']
pop = self.panel.pop('ItemC')
self.assertIs(pop, crackle)
assert_almost_equal(self.panel.items, ['ItemA', 'ItemD'])
self.assertRaises(KeyError, self.panel.__delitem__, 'ItemC')
def test_copy(self):
cop = self.panel.copy()
assert_sp_panel_equal(cop, self.panel)
def test_reindex(self):
def _compare_with_dense(swp, items, major, minor):
swp_re = swp.reindex(items=items, major=major,
minor=minor)
dwp_re = swp.to_dense().reindex(items=items, major=major,
minor=minor)
assert_panel_equal(swp_re.to_dense(), dwp_re)
_compare_with_dense(self.panel, self.panel.items[:2],
self.panel.major_axis[::2],
self.panel.minor_axis[::2])
_compare_with_dense(self.panel, None,
self.panel.major_axis[::2],
self.panel.minor_axis[::2])
self.assertRaises(ValueError, self.panel.reindex)
# TODO: do something about this later...
self.assertRaises(Exception, self.panel.reindex,
items=['item0', 'ItemA', 'ItemB'])
# test copying
cp = self.panel.reindex(self.panel.major_axis, copy=True)
cp['ItemA']['E'] = cp['ItemA']['A']
self.assertNotIn('E', self.panel['ItemA'])
def test_operators(self):
def _check_ops(panel):
def _dense_comp(op):
dense = panel.to_dense()
sparse_result = op(panel)
dense_result = op(dense)
assert_panel_equal(sparse_result.to_dense(), dense_result)
def _mixed_comp(op):
result = op(panel, panel.to_dense())
expected = op(panel.to_dense(), panel.to_dense())
assert_panel_equal(result, expected)
op1 = lambda x: x + 2
_dense_comp(op1)
op2 = lambda x: x.add(x.reindex(major=x.major_axis[::2]))
_dense_comp(op2)
op3 = lambda x: x.subtract(x.mean(0), axis=0)
_dense_comp(op3)
op4 = lambda x: x.subtract(x.mean(1), axis=1)
_dense_comp(op4)
op5 = lambda x: x.subtract(x.mean(2), axis=2)
_dense_comp(op5)
_mixed_comp(Panel.multiply)
_mixed_comp(Panel.subtract)
# TODO: this case not yet supported!
# op6 = lambda x: x.add(x.to_frame())
# _dense_comp(op6)
_check_ops(self.panel)
def test_major_xs(self):
def _dense_comp(sparse):
dense = sparse.to_dense()
for idx in sparse.major_axis:
dslice = dense.major_xs(idx)
sslice = sparse.major_xs(idx)
assert_frame_equal(dslice, sslice)
_dense_comp(self.panel)
def test_minor_xs(self):
def _dense_comp(sparse):
dense = sparse.to_dense()
for idx in sparse.minor_axis:
dslice = dense.minor_xs(idx)
sslice = sparse.minor_xs(idx).to_dense()
assert_frame_equal(dslice, sslice)
_dense_comp(self.panel)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
# nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure',
# '--with-profile'],
# exit=False)
|
import pytest
import networkx as nx
from networkx.testing import assert_edges_equal
def test_union_attributes():
g = nx.Graph()
g.add_node(0, x=4)
g.add_node(1, x=5)
g.add_edge(0, 1, size=5)
g.graph['name'] = 'g'
h = g.copy()
h.graph['name'] = 'h'
h.graph['attr'] = 'attr'
h.nodes[0]['x'] = 7
gh = nx.union(g, h, rename=('g', 'h'))
assert set(gh.nodes()) == set(['h0', 'h1', 'g0', 'g1'])
for n in gh:
graph, node = n
assert gh.nodes[n] == eval(graph).nodes[int(node)]
assert gh.graph['attr'] == 'attr'
assert gh.graph['name'] == 'h' # h graph attributes take precendent
def test_intersection():
G = nx.Graph()
H = nx.Graph()
G.add_nodes_from([1, 2, 3, 4])
G.add_edge(1, 2)
G.add_edge(2, 3)
H.add_nodes_from([1, 2, 3, 4])
H.add_edge(2, 3)
H.add_edge(3, 4)
I = nx.intersection(G, H)
assert set(I.nodes()) == set([1, 2, 3, 4])
assert sorted(I.edges()) == [(2, 3)]
def test_intersection_attributes():
g = nx.Graph()
g.add_node(0, x=4)
g.add_node(1, x=5)
g.add_edge(0, 1, size=5)
g.graph['name'] = 'g'
h = g.copy()
h.graph['name'] = 'h'
h.graph['attr'] = 'attr'
h.nodes[0]['x'] = 7
gh = nx.intersection(g, h)
assert set(gh.nodes()) == set(g.nodes())
assert set(gh.nodes()) == set(h.nodes())
assert sorted(gh.edges()) == sorted(g.edges())
h.remove_node(0)
pytest.raises(nx.NetworkXError, nx.intersection, g, h)
def test_intersection_multigraph_attributes():
g = nx.MultiGraph()
g.add_edge(0, 1, key=0)
g.add_edge(0, 1, key=1)
g.add_edge(0, 1, key=2)
h = nx.MultiGraph()
h.add_edge(0, 1, key=0)
h.add_edge(0, 1, key=3)
gh = nx.intersection(g, h)
assert set(gh.nodes()) == set(g.nodes())
assert set(gh.nodes()) == set(h.nodes())
assert sorted(gh.edges()) == [(0, 1)]
assert sorted(gh.edges(keys=True)) == [(0, 1, 0)]
def test_difference():
G = nx.Graph()
H = nx.Graph()
G.add_nodes_from([1, 2, 3, 4])
G.add_edge(1, 2)
G.add_edge(2, 3)
H.add_nodes_from([1, 2, 3, 4])
H.add_edge(2, 3)
H.add_edge(3, 4)
D = nx.difference(G, H)
assert set(D.nodes()) == set([1, 2, 3, 4])
assert sorted(D.edges()) == [(1, 2)]
D = nx.difference(H, G)
assert set(D.nodes()) == set([1, 2, 3, 4])
assert sorted(D.edges()) == [(3, 4)]
D = nx.symmetric_difference(G, H)
assert set(D.nodes()) == set([1, 2, 3, 4])
assert sorted(D.edges()) == [(1, 2), (3, 4)]
def test_difference2():
G = nx.Graph()
H = nx.Graph()
G.add_nodes_from([1, 2, 3, 4])
H.add_nodes_from([1, 2, 3, 4])
G.add_edge(1, 2)
H.add_edge(1, 2)
G.add_edge(2, 3)
D = nx.difference(G, H)
assert set(D.nodes()) == set([1, 2, 3, 4])
assert sorted(D.edges()) == [(2, 3)]
D = nx.difference(H, G)
assert set(D.nodes()) == set([1, 2, 3, 4])
assert sorted(D.edges()) == []
H.add_edge(3, 4)
D = nx.difference(H, G)
assert set(D.nodes()) == set([1, 2, 3, 4])
assert sorted(D.edges()) == [(3, 4)]
def test_difference_attributes():
g = nx.Graph()
g.add_node(0, x=4)
g.add_node(1, x=5)
g.add_edge(0, 1, size=5)
g.graph['name'] = 'g'
h = g.copy()
h.graph['name'] = 'h'
h.graph['attr'] = 'attr'
h.nodes[0]['x'] = 7
gh = nx.difference(g, h)
assert set(gh.nodes()) == set(g.nodes())
assert set(gh.nodes()) == set(h.nodes())
assert sorted(gh.edges()) == []
h.remove_node(0)
pytest.raises(nx.NetworkXError, nx.intersection, g, h)
def test_difference_multigraph_attributes():
g = nx.MultiGraph()
g.add_edge(0, 1, key=0)
g.add_edge(0, 1, key=1)
g.add_edge(0, 1, key=2)
h = nx.MultiGraph()
h.add_edge(0, 1, key=0)
h.add_edge(0, 1, key=3)
gh = nx.difference(g, h)
assert set(gh.nodes()) == set(g.nodes())
assert set(gh.nodes()) == set(h.nodes())
assert sorted(gh.edges()) == [(0, 1), (0, 1)]
assert sorted(gh.edges(keys=True)) == [(0, 1, 1), (0, 1, 2)]
def test_difference_raise():
G = nx.path_graph(4)
H = nx.path_graph(3)
pytest.raises(nx.NetworkXError, nx.difference, G, H)
pytest.raises(nx.NetworkXError, nx.symmetric_difference, G, H)
def test_symmetric_difference_multigraph():
g = nx.MultiGraph()
g.add_edge(0, 1, key=0)
g.add_edge(0, 1, key=1)
g.add_edge(0, 1, key=2)
h = nx.MultiGraph()
h.add_edge(0, 1, key=0)
h.add_edge(0, 1, key=3)
gh = nx.symmetric_difference(g, h)
assert set(gh.nodes()) == set(g.nodes())
assert set(gh.nodes()) == set(h.nodes())
assert sorted(gh.edges()) == 3 * [(0, 1)]
assert (sorted(sorted(e) for e in gh.edges(keys=True)) ==
[[0, 1, 1], [0, 1, 2], [0, 1, 3]])
def test_union_and_compose():
K3 = nx.complete_graph(3)
P3 = nx.path_graph(3)
G1 = nx.DiGraph()
G1.add_edge('A', 'B')
G1.add_edge('A', 'C')
G1.add_edge('A', 'D')
G2 = nx.DiGraph()
G2.add_edge('1', '2')
G2.add_edge('1', '3')
G2.add_edge('1', '4')
G = nx.union(G1, G2)
H = nx.compose(G1, G2)
assert_edges_equal(G.edges(), H.edges())
assert not G.has_edge('A', 1)
pytest.raises(nx.NetworkXError, nx.union, K3, P3)
H1 = nx.union(H, G1, rename=('H', 'G1'))
assert (sorted(H1.nodes()) ==
['G1A', 'G1B', 'G1C', 'G1D',
'H1', 'H2', 'H3', 'H4', 'HA', 'HB', 'HC', 'HD'])
H2 = nx.union(H, G2, rename=("H", ""))
assert (sorted(H2.nodes()) ==
['1', '2', '3', '4',
'H1', 'H2', 'H3', 'H4', 'HA', 'HB', 'HC', 'HD'])
assert not H1.has_edge('NB', 'NA')
G = nx.compose(G, G)
assert_edges_equal(G.edges(), H.edges())
G2 = nx.union(G2, G2, rename=('', 'copy'))
assert (sorted(G2.nodes()) ==
['1', '2', '3', '4', 'copy1', 'copy2', 'copy3', 'copy4'])
assert sorted(G2.neighbors('copy4')) == []
assert sorted(G2.neighbors('copy1')) == ['copy2', 'copy3', 'copy4']
assert len(G) == 8
assert nx.number_of_edges(G) == 6
E = nx.disjoint_union(G, G)
assert len(E) == 16
assert nx.number_of_edges(E) == 12
E = nx.disjoint_union(G1, G2)
assert sorted(E.nodes()) == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
G = nx.Graph()
H = nx.Graph()
G.add_nodes_from([(1, {'a1': 1})])
H.add_nodes_from([(1, {'b1': 1})])
R = nx.compose(G, H)
assert R.nodes == {1: {'a1': 1, 'b1': 1}}
def test_union_multigraph():
G = nx.MultiGraph()
G.add_edge(1, 2, key=0)
G.add_edge(1, 2, key=1)
H = nx.MultiGraph()
H.add_edge(3, 4, key=0)
H.add_edge(3, 4, key=1)
GH = nx.union(G, H)
assert set(GH) == set(G) | set(H)
assert (set(GH.edges(keys=True)) ==
set(G.edges(keys=True)) | set(H.edges(keys=True)))
def test_disjoint_union_multigraph():
G = nx.MultiGraph()
G.add_edge(0, 1, key=0)
G.add_edge(0, 1, key=1)
H = nx.MultiGraph()
H.add_edge(2, 3, key=0)
H.add_edge(2, 3, key=1)
GH = nx.disjoint_union(G, H)
assert set(GH) == set(G) | set(H)
assert (set(GH.edges(keys=True)) ==
set(G.edges(keys=True)) | set(H.edges(keys=True)))
def test_compose_multigraph():
G = nx.MultiGraph()
G.add_edge(1, 2, key=0)
G.add_edge(1, 2, key=1)
H = nx.MultiGraph()
H.add_edge(3, 4, key=0)
H.add_edge(3, 4, key=1)
GH = nx.compose(G, H)
assert set(GH) == set(G) | set(H)
assert (set(GH.edges(keys=True)) ==
set(G.edges(keys=True)) | set(H.edges(keys=True)))
H.add_edge(1, 2, key=2)
GH = nx.compose(G, H)
assert set(GH) == set(G) | set(H)
assert (set(GH.edges(keys=True)) ==
set(G.edges(keys=True)) | set(H.edges(keys=True)))
def test_full_join_graph():
# Simple Graphs
G = nx.Graph()
G.add_node(0)
G.add_edge(1, 2)
H = nx.Graph()
H.add_edge(3, 4)
U = nx.full_join(G, H)
assert set(U) == set(G) | set(H)
assert len(U) == len(G) + len(H)
assert (len(U.edges()) ==
len(G.edges()) + len(H.edges()) + len(G) * len(H))
# Rename
U = nx.full_join(G, H, rename=('g', 'h'))
assert set(U) == set(['g0', 'g1', 'g2', 'h3', 'h4'])
assert len(U) == len(G) + len(H)
assert (len(U.edges()) ==
len(G.edges()) + len(H.edges()) + len(G) * len(H))
# Rename graphs with string-like nodes
G = nx.Graph()
G.add_node("a")
G.add_edge("b", "c")
H = nx.Graph()
H.add_edge("d", "e")
U = nx.full_join(G, H, rename=('g', 'h'))
assert set(U) == set(['ga', 'gb', 'gc', 'hd', 'he'])
assert len(U) == len(G) + len(H)
assert (len(U.edges()) ==
len(G.edges()) + len(H.edges()) + len(G) * len(H))
# DiGraphs
G = nx.DiGraph()
G.add_node(0)
G.add_edge(1, 2)
H = nx.DiGraph()
H.add_edge(3, 4)
U = nx.full_join(G, H)
assert set(U) == set(G) | set(H)
assert len(U) == len(G) + len(H)
assert (len(U.edges()) ==
len(G.edges()) + len(H.edges()) + len(G)*len(H) * 2)
# DiGraphs Rename
U = nx.full_join(G, H, rename=('g', 'h'))
assert set(U) == set(['g0', 'g1', 'g2', 'h3', 'h4'])
assert len(U) == len(G) + len(H)
assert (len(U.edges()) ==
len(G.edges()) + len(H.edges()) + len(G) * len(H) * 2)
def test_full_join_multigraph():
# MultiGraphs
G = nx.MultiGraph()
G.add_node(0)
G.add_edge(1, 2)
H = nx.MultiGraph()
H.add_edge(3, 4)
U = nx.full_join(G, H)
assert set(U) == set(G) | set(H)
assert len(U) == len(G) + len(H)
assert (len(U.edges()) ==
len(G.edges()) + len(H.edges()) + len(G) * len(H))
# MultiGraphs rename
U = nx.full_join(G, H, rename=('g', 'h'))
assert set(U) == set(['g0', 'g1', 'g2', 'h3', 'h4'])
assert len(U) == len(G) + len(H)
assert (len(U.edges()) ==
len(G.edges()) + len(H.edges()) + len(G) * len(H))
# MultiDiGraphs
G = nx.MultiDiGraph()
G.add_node(0)
G.add_edge(1, 2)
H = nx.MultiDiGraph()
H.add_edge(3, 4)
U = nx.full_join(G, H)
assert set(U) == set(G) | set(H)
assert len(U) == len(G) + len(H)
assert (len(U.edges()) ==
len(G.edges()) + len(H.edges()) + len(G) * len(H) * 2)
# MultiDiGraphs rename
U = nx.full_join(G, H, rename=('g', 'h'))
assert set(U) == set(['g0', 'g1', 'g2', 'h3', 'h4'])
assert len(U) == len(G) + len(H)
assert (len(U.edges()) ==
len(G.edges()) + len(H.edges()) + len(G) * len(H) * 2)
def test_mixed_type_union():
G = nx.Graph()
H = nx.MultiGraph()
pytest.raises(nx.NetworkXError, nx.union, G, H)
pytest.raises(nx.NetworkXError, nx.disjoint_union, G, H)
pytest.raises(nx.NetworkXError, nx.intersection, G, H)
pytest.raises(nx.NetworkXError, nx.difference, G, H)
pytest.raises(nx.NetworkXError, nx.symmetric_difference, G, H)
pytest.raises(nx.NetworkXError, nx.compose, G, H)
|
import string
import itertools
def chunker(seq, size):
it = iter(seq)
while True:
chunk = tuple(itertools.islice(it, size))
if not chunk:
return
yield chunk
def prepare_input(dirty):
"""
Prepare the plaintext by up-casing it
and separating repeated letters with X's
"""
dirty = "".join([c.upper() for c in dirty if c in string.ascii_letters])
clean = ""
if len(dirty) < 2:
return dirty
for i in range(len(dirty) - 1):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(clean) & 1:
clean += "X"
return clean
def generate_table(key):
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
alphabet = "ABCDEFGHIKLMNOPQRSTUVWXYZ"
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
table = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(char)
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(char)
return table
def encode(plaintext, key):
table = generate_table(key)
plaintext = prepare_input(plaintext)
ciphertext = ""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for char1, char2 in chunker(plaintext, 2):
row1, col1 = divmod(table.index(char1), 5)
row2, col2 = divmod(table.index(char2), 5)
if row1 == row2:
ciphertext += table[row1 * 5 + (col1 + 1) % 5]
ciphertext += table[row2 * 5 + (col2 + 1) % 5]
elif col1 == col2:
ciphertext += table[((row1 + 1) % 5) * 5 + col1]
ciphertext += table[((row2 + 1) % 5) * 5 + col2]
else: # rectangle
ciphertext += table[row1 * 5 + col2]
ciphertext += table[row2 * 5 + col1]
return ciphertext
def decode(ciphertext, key):
table = generate_table(key)
plaintext = ""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for char1, char2 in chunker(ciphertext, 2):
row1, col1 = divmod(table.index(char1), 5)
row2, col2 = divmod(table.index(char2), 5)
if row1 == row2:
plaintext += table[row1 * 5 + (col1 - 1) % 5]
plaintext += table[row2 * 5 + (col2 - 1) % 5]
elif col1 == col2:
plaintext += table[((row1 - 1) % 5) * 5 + col1]
plaintext += table[((row2 - 1) % 5) * 5 + col2]
else: # rectangle
plaintext += table[row1 * 5 + col2]
plaintext += table[row2 * 5 + col1]
return plaintext
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^login/$', views.Login),
url(r'^logout/$', views.Logout),
url(r'^home/$', views.Home),
url(r'^blog/$', views.Blog),
]
|
"""
File to run AISTATS experiments from shell
"""
# Author: Alicia Curth
import argparse
import sys
from typing import Any
import catenets.logger as log
from experiments.experiments_AISTATS21.ihdp_experiments import do_ihdp_experiments
from experiments.experiments_AISTATS21.simulations_AISTATS import main_AISTATS
log.add(sink=sys.stderr, level="DEBUG")
def init_arg() -> Any:
# arg parser if script is run from shell
parser = argparse.ArgumentParser()
parser.add_argument("--experiment", default="simulation", type=str)
parser.add_argument("--setting", default=1, type=int)
parser.add_argument("--models", default=None, type=str)
parser.add_argument("--file_name", default="results", type=str)
parser.add_argument("--n_repeats", default=10, type=int)
return parser.parse_args()
if __name__ == "__main__":
args = init_arg()
if args.experiment == "simulation":
main_AISTATS(
setting=args.setting,
models=args.models,
file_name=args.file_name,
n_repeats=args.n_repeats,
)
elif args.experiment == "ihdp":
do_ihdp_experiments(
models=args.models, file_name=args.file_name, n_exp=args.n_repeats
)
|
import pytest
from fingan.wgan import WGAN
from fingan.TestData import SineData
import matplotlib.pyplot as plt
def testWGAN():
data = SineData(100, 400, [3, 10], [1, 5])
model = WGAN()
model.train(data, 20)
x = model.generate(1)
print(x.detach().numpy())
plt.plot(x.detach().numpy()[0])
plt.plot(data.dataset[0])
plt.show()
# plt.plot(model.losses['g'], label='g')
# plt.plot(model.losses['c'], label='c')
# plt.legend()
plt.show()
|
# pylint: disable=no-else-return, unidiomatic-typecheck, invalid-name, unused-argument
"""Convert an NNVM graph to Relay."""
import json
import numpy
from tvm import relay, nd
from tvm.relay import op, expr, var
from tvm.relay.frontend.common import StrAttrsDict
from tvm.relay.frontend.nnvm_common import _rename
from .symbol import Symbol
from .compiler import graph_attr
from .graph import create as graph_create
def _nn_batch_flatten(children, attrs, odtype='float32'):
assert len(children) == 1
return op.nn.batch_flatten(children[0])
def _dense(children, attrs, odtype='float32'):
use_bias = attrs.get_bool('use_bias', True)
units = attrs.get_int('units')
dense = op.nn.dense(children[0], children[1], units=units)
if use_bias:
return op.nn.bias_add(dense, children[2])
else:
return dense
def _nn_softmax(children, attrs, odtype='float32'):
assert len(children) == 1
axis = attrs.get_int('axis', 1)
return op.nn.softmax(children[0], axis)
def _conv2d(children, attrs, odtype='float32'):
use_bias = attrs.get_bool('use_bias', True)
if use_bias:
data, weight, bias = children
else:
data, weight = children
kernel_size = attrs.get_int_tuple('kernel_size')
channels = attrs.get_int('channels')
strides = attrs.get_int_tuple('strides', (1, 1))
padding = attrs.get_int_tuple('padding', (0, 0))
dilation = attrs.get_int_tuple('dilation', (1, 1))
groups = attrs.get_int('groups', 1)
data_layout = attrs.get_str('layout', 'NCHW')
kernel_layout = attrs.get_str('kernel_layout', 'OIHW')
out_layout = ''
out_dtype = attrs.get_str('out_dtype', '')
conv_out = op.nn.conv2d(
data,
weight,
kernel_size=kernel_size,
channels=channels,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
data_layout=data_layout,
kernel_layout=kernel_layout,
out_layout=out_layout,
out_dtype=out_dtype)
if use_bias:
return op.nn.bias_add(conv_out, bias)
else:
return conv_out
def _conv2d_transpose(children, attrs, odtype='float32'):
use_bias = attrs.get_bool('use_bias', False)
if use_bias:
data, weight, bias = children
else:
data, weight = children
strides = attrs.get_int_tuple('strides', (1, 1))
padding = attrs.get_int_tuple('padding', (0, 0))
dilation = attrs.get_int_tuple('dilation', (1, 1))
groups = attrs.get_int('groups', 1)
data_layout = attrs.get_str('layout', 'NCHW')
kernel_layout = attrs.get_str('kernel_layout', 'OIHW')
out_dtype = attrs.get_str('out_dtype', '')
out_conv2d = op.nn.conv2d_transpose(
data,
weight,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
data_layout=data_layout,
kernel_layout=kernel_layout,
out_dtype=out_dtype)
if use_bias:
return op.nn.bias_add(out_conv2d, bias)
else:
return out_conv2d
def _batch_norm(children, attrs, odtype='float32'):
data, gamma, beta, moving_mean, moving_view = children
axis = attrs.get_int('axis', 1)
epsilon = attrs.get_float('epsilon', 1e-05)
center = attrs.get_bool('center', True)
scale = attrs.get_bool('scale', True)
return op.nn.batch_norm(
data,
gamma,
beta,
moving_mean,
moving_view,
axis=axis,
epsilon=epsilon,
center=center,
scale=scale)[0]
def _max_pool2d(children, attrs, odtype='float32'):
assert len(children) == 1
data = children[0]
pool_size = attrs.get_int_tuple('pool_size', (1, 1))
strides = attrs.get_int_tuple('strides', (1, 1))
padding = attrs.get_int_tuple('padding', (0, 0))
layout = attrs.get_str('layout', 'NCHW')
ceil_mode = attrs.get_bool('ceil_mode', False)
return op.nn.max_pool2d(
data,
pool_size=pool_size,
strides=strides,
padding=padding,
layout=layout,
ceil_mode=ceil_mode)
def _reshape(children, attrs, odtype='float32'):
data = children[0]
shape = attrs.get_int_list('shape')
return op.reshape(data, shape)
def _transpose(children, attrs, odtype='float32'):
axes = attrs.get_int_list('axes', None)
return op.transpose(children[0], axes=axes)
def _add(children, attrs, odtype='float32'):
if len(children) == 1:
left = children[0]
scalar = attrs.get_float('scalar')
right = relay.const(scalar, dtype=odtype)
else:
assert len(children) == 2
left = children[0]
right = children[1]
return op.add(left, right)
def _subtract(children, attrs, odtype='float32'):
if len(children) == 1:
left = children[0]
scalar = attrs.get_float('scalar')
right = relay.const(scalar, dtype=odtype)
else:
assert len(children) == 2
left = children[0]
right = children[1]
return op.subtract(left, right)
def _rsubtract(children, attrs, odtype='float32'):
if len(children) == 1:
left = children[0]
scalar = attrs.get_float('scalar')
right = relay.const(scalar, dtype=odtype)
else:
assert len(children) == 2
left = children[0]
right = children[1]
return op.subtract(right, left)
def _multiply(children, attrs, odtype='float32'):
if len(children) == 1:
left = children[0]
scalar = attrs.get_float('scalar')
right = relay.const(scalar, dtype=odtype)
else:
assert len(children) == 2
left = children[0]
right = children[1]
return op.multiply(left, right)
def _divide(children, attrs, odtype='float32'):
if len(children) == 1:
left = children[0]
scalar = attrs.get_float('scalar')
right = relay.const(scalar, dtype=odtype)
else:
assert len(children) == 2
left = children[0]
right = children[1]
return op.divide(left, right)
def _rshift(children, attrs, odtype='float32'):
if len(children) == 1:
left = children[0]
scalar = attrs.get_float('scalar')
right = relay.const(scalar, dtype='int32')
else:
assert len(children) == 2
left = children[0]
right = children[1]
return op.right_shift(left, right)
def _clip(children, attrs, odtype='float32'):
a_min = attrs.get_float('a_min')
a_max = attrs.get_float('a_max')
return op.clip(children[0], a_min, a_max)
def _cast(children, attrs, odtype='float32'):
data = children[0]
dtype = attrs.get_str('dtype')
return data.astype(dtype)
def _expand_dims(children, attrs, odtype='float32'):
data = children[0]
axis = attrs.get_int('axis')
num_newaxis = attrs.get_int('num_newaxis', 1)
return op.transform.expand_dims(data, axis, num_newaxis=num_newaxis)
def broadcast_to(children, attrs, odtype='float32'):
# TODO(@jroesch) export broadcast to?
data = children[0]
shape = attrs.get_int_tuple('shape')
array = numpy.zeros(shape).astype(odtype)
rconst = relay.Constant(nd.array(array))
return op.broadcast_to_like(data, rconst)
def _copy(children, attrs, odtype='float32'):
return op.copy(children[0])
def _global_avg_pool2d(children, attrs, odtype='float32'):
data = children[0]
layout = attrs.get_str('layout', "NCHW")
return op.nn.global_avg_pool2d(data, layout)
def _avg_pool2d(children, attrs, odtype='float32'):
data = children[0]
pool_size = attrs.get_int_tuple('pool_size', (1, 1))
strides = attrs.get_int_tuple('strides', (1, 1))
padding = attrs.get_int_tuple('padding', (0, 0))
layout = attrs.get_str('layout', "NCHW")
ceil_mode = attrs.get_bool('ceil_mode', False)
count_include_pad = attrs.get_bool('layout', False)
return op.nn.avg_pool2d(
data,
pool_size=pool_size,
strides=strides,
padding=padding,
layout=layout,
ceil_mode=ceil_mode,
count_include_pad=count_include_pad)
def _upsampling(children, attrs, odtype='float32'):
scale = attrs.get_int('scale')
layout = attrs.get_str('layout', 'NCHW')
method = attrs.get_str('method', 'NEAREST_NEIGHBOR')
return op.nn.upsampling(
children[0],
scale=scale,
layout=layout,
method=method)
def _pad(children, attrs, odtype='float32'):
pad_value = attrs.get_float('pad_value', 0.0)
pad_width = attrs.get_tuple_tuple_int('pad_width')
return op.nn.pad(children[0], pad_width, pad_value=pad_value)
def _leaky_relu(children, attrs, odtype='float32'):
alpha = attrs.get_float('alpha')
return op.nn.leaky_relu(children[0], alpha)
def _full_like(children, attrs, odtype='float32'):
fill_value = relay.const(attrs.get_float('fill_value'), dtype='float32')
return op.full_like(children[0], fill_value)
def _greater(children, attrs, odtype='float32'):
out_type = attrs.get_str('out_type')
if out_type:
return op.greater(children[0], children[1]).astype(out_type)
else:
return op.greater(children[0], children[1])
def _greater_equal(children, attrs, odtype='float32'):
out_type = attrs.get_str('out_type', None)
if out_type:
return op.greater_equal(children[0], children[1]).astype(out_type)
else:
return op.greater_equal(children[0], children[1])
def _less(children, attrs, odtype='float32'):
out_type = attrs.get_str('out_type', None)
if out_type:
return op.less(children[0], children[1]).astype(out_type)
else:
return op.less(children[0], children[1])
def _less_equal(children, attrs, odtype='float32'):
out_type = attrs.get_str('out_type', None)
if out_type:
return op.less_equal(children[0], children[1]).astype(out_type)
else:
return op.less_equal(children[0], children[1])
def _strided_slice(children, attrs, odtype='float32'):
begin = attrs.get_int_list('begin')
end = attrs.get_int_list('end')
strides = attrs.get_int_list('strides', None)
return op.strided_slice(children[0], begin, end, strides=strides)
def _split(children, attrs, odtype='float32'):
indices_or_sections = None
try:
indices_or_sections = attrs.get_int('indices_or_sections', None)
except ValueError:
indices_or_sections = indices_or_sections or attrs.get_int_tuple(
'indices_or_sections')
axis = attrs.get_int('axis', 0)
return op.split(children[0], indices_or_sections, axis)
def _squeeze(children, attrs, odtype='float32'):
axis = None
try:
axis = [attrs.get_int('axis', None)]
except ValueError:
axis = axis or attrs.get_int_tuple('axis', None)
return op.squeeze(children[0], axis)
def _concatenate(children, attrs, odtype='float32'):
axis = attrs.get_int('axis', 1)
return op.concatenate(children, axis)
def _dropout(children, attrs, odtype='float32'):
rate = attrs.get_float('rate', 0.5)
return op.nn.dropout(children[0], rate)
NNVM_OP_2_RELAY_OP = {
'flatten': _nn_batch_flatten,
'dense': _dense,
'softmax': _nn_softmax,
'conv2d': _conv2d,
'batch_norm': _batch_norm,
'max_pool2d': _max_pool2d,
'reshape': _reshape,
'transpose': _transpose,
'dropout': _dropout,
# Addition
'__add_scalar__': _add,
'broadcast_add': _add,
'elemwise_add': _add,
# Subtraction
'__sub_scalar__': _subtract,
'__rsub_scalar__': _rsubtract,
'broadcast_sub': _subtract,
'elemwise_sub': _subtract,
# Multiply
'__mul_scalar__': _multiply,
'broadcast_mul': _multiply,
'elemwise_mul': _multiply,
# Division
'__div_scalar__': _divide,
'broadcast_div': _divide,
'elemwise_div': _divide,
# Negative
'negative': _rename("negative"),
# Comparsion
'greater': _greater,
'greater_equal': _greater_equal,
'less': _less,
'less_equal': _less_equal,
# Activations
'sigmoid': _rename('sigmoid'),
'relu': _rename('nn.relu'),
'exp': _rename('exp'),
'log': _rename('log'),
'tanh': _rename('tanh'),
'leaky_relu': _leaky_relu,
'clip': _clip,
'round': _rename('round'),
'cast': _cast,
'expand_dims': _expand_dims,
'broadcast_to': broadcast_to,
'__rshift_scalar__': _rshift,
'copy': _copy,
'global_avg_pool2d': _global_avg_pool2d,
'avg_pool2d': _avg_pool2d,
'conv2d_transpose': _conv2d_transpose,
'upsampling': _upsampling,
'pad': _pad,
'full_like': _full_like,
'strided_slice': _strided_slice,
'split': _split,
'squeeze': _squeeze,
'concatenate': _concatenate,
}
def to_relay(graph, shape_dict, dtype_dict, params):
"""Convert an NNVM graph into the corresponding Relay expression.
Parameters
----------
graph : Graph
The input graph.
shape_dict : dict of str to shape
The input shape.
dtype_dict : dict of str to str/dtype
The input shape.
params : dict of str to array
The parameters.
Returns
-------
(expr, params) : Tuple[relay.Expr, dict of str to array]
The corresponding Relay expression and parameters.
"""
if isinstance(graph, Symbol):
graph = graph_create(graph)
param_shapes = dict((k, params[k].shape) for k in params)
shape_dict = shape_dict.copy()
shape_dict.update(param_shapes)
graph = graph_attr.set_shape_inputs(graph, shape_dict)
graph = graph_attr.set_dtype_inputs(graph, dtype_dict)
graph = graph.apply(["InferShape", "InferType"])
shape = graph.json_attr("shape")
dtype = [graph_attr.TCODE_TO_DTYPE[di] for di in graph.json_attr("dtype")]
heads = [x[0] for x in json.loads(graph.json())['heads']]
gidx = graph.index
relay_map = {}
fn_params = []
output_ids = []
for nid, node in enumerate(gidx.nodes):
children = []
for i in node['inputs']:
child = relay_map[i[0]]
if isinstance(child, expr.TupleWrapper):
children.append(child[i[1]])
else:
children.append(child)
oshape = shape[gidx.entry_id(nid, 0)]
odtype = dtype[gidx.entry_id(nid, 0)]
attrs = node.get("attrs", {})
node_name = node["name"]
op_name = node["op"]
if op_name == "null":
v = var(node_name, shape=oshape, dtype=odtype)
fn_params.append(v)
relay_map[nid] = v
else:
if nid in heads:
output_ids.append(nid)
if op_name in NNVM_OP_2_RELAY_OP:
str_attrs = StrAttrsDict(attrs)
call = NNVM_OP_2_RELAY_OP[op_name](children, str_attrs, odtype)
relay_map[nid] = call
else:
raise Exception(
"nnvm.to_relay: unsupported operator: {0}".format(op_name))
outputs = [relay_map[nid] for nid in output_ids]
if len(outputs) == 1:
body = outputs[0]
else:
body = expr.Tuple(outputs)
func = relay.Function(fn_params, body)
return func, params
|
import pytest
# @pytest.mark.django_db
# def test_process_event_no_messages(subscription1, occurence1):
# channel = subscription1.channel
# channel.messages.all().delete()
# event = subscription1.event
# with pytest.raises(ObjectDoesNotExist):
# assert process_channel(channel.pk, event.pk, occurence1.pk, {})
from bitcaster.tasks.event import trigger_event
# @pytest.mark.django_db
# def test_process_event(subscription1, occurence1):
# channel = subscription1.channel
# event = subscription1.event
# assert process_channel(channel.pk, event.pk, occurence1.pk, {})
#
# @pytest.mark.django_db
# def test_process_event_errors_threshold(subscription1):
# channel = subscription1.channel
# channel.errors_threshold = 0
# event = subscription1.event
# with pytest.raises(MaxChannelError):
# with mock.patch('bitcaster.models.Notification.log'):
# H = fqn(channel.handler)
# H.emit = MagicMock(side_effect=Mock(side_effect=Exception))
# # with mock.patch(,
# # emit=):
# assert process_channel(channel.pk, event.pk, {})
# assert not channel.enabled
@pytest.mark.django_db
def test_emit_event(occurence1, patch_metadata):
from bitcaster.models import DispatcherMetaData
DispatcherMetaData.objects.inspect()
assert trigger_event(occurence1.pk, {})
@pytest.mark.django_db
def test_acknowledge():
pass
|
def day8(fileName):
codeChars = 0
memoryChars = 0
encodedChars = 0
with open(fileName) as infile:
for line in infile:
encodedString = line.strip().replace('\\', '\\\\').replace('"', '\\"')
encodedChars += len(encodedString) + 2
lineCodeChars = len(line.strip())
codeChars += lineCodeChars
lineString = eval(f"{line.strip()}")
lineMemoryChars = len(lineString)
memoryChars += lineMemoryChars
return codeChars, memoryChars, encodedChars
codeChars, memoryChars, encodedChars = day8("8.txt")
print(f"{codeChars} - {memoryChars} = {codeChars - memoryChars}")
print(f"{encodedChars} - {codeChars} = {encodedChars - codeChars}")
|
from setuptools import setup, find_packages
import os
import shop
CLASSIFIERS = [
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
]
setup(
author="Christopher Glass",
author_email="tribaal@gmail.com",
name='django-shop',
version=shop.__version__,
description='An Advanced Django Shop',
long_description=open(os.path.join(os.path.dirname(__file__), 'README.rst')).read(),
url='http://www.django-shop.org/',
license='BSD License',
platforms=['OS Independent'],
classifiers=CLASSIFIERS,
install_requires=[
'Django>=1.2',
'django-classy-tags>=0.3.3',
'django-polymorphic>=0.2',
'south>=0.7.2'
],
packages=find_packages(exclude=["example", "example.*"]),
include_package_data=True,
zip_safe = False,
)
|
import logging
from multiprocessing import Pool
from tqdm import tqdm
log = logging.getLogger(__name__)
class Arena():
"""
An Arena class where any 2 agents can be pit against each other.
"""
def __init__(self, player1, player2, game, display=None):
"""
Input:
player 1,2: two functions that takes board as input, return action
game: Game object
display: a function that takes board as input and prints it (e.g.
display in othello/OthelloGame). Is necessary for verbose
mode.
see othello/OthelloPlayers.py for an example. See pit.py for pitting
human players/other baselines with each other.
"""
self.player1 = player1
self.player2 = player2
self.game = game
self.display = display
def playGame(self, verbose=False):
"""
Executes one episode of a game.
Returns:
either
winner: player who won the game (1 if player1, -1 if player2)
or
draw result returned from the game that is neither 1, -1, nor 0.
"""
players = [self.player2, None, self.player1]
curPlayer = 1
board = self.game.getInitBoard()
it = 0
while self.game.getGameEnded(board, curPlayer) == 0:
it += 1
if verbose:
assert self.display
#print("Turn ", str(it), "Player ", str(curPlayer))
log.info(f'Turn {str(it)} Player {str(curPlayer)}')
self.display(board)
# print('\t getting action', flush=True)
action = players[curPlayer + 1](self.game.getCanonicalForm(board, curPlayer))
# print('\t calculation valids', flush=True)
valids = self.game.getValidMoves(self.game.getCanonicalForm(board, curPlayer), 1)
if valids[action] == 0:
log.error(f'Action {action} is not valid!')
log.debug(f'valids = {valids}')
assert valids[action] > 0
# print('\t playing action', flush=True)
board, curPlayer = self.game.getNextState(board, curPlayer, action)
if verbose:
assert self.display
#print("Game over: Turn ", str(it), "Result ", str(self.game.getGameEnded(board, 1)))
log.info(f'Game over: Turn {str(it)} Result {str(self.game.getGameEnded(board, 1))}')
self.display(board)
return curPlayer * self.game.getGameEnded(board, curPlayer)
def playGamesMultiProcess(self, num, verbose=False):
raise NotImplementedError('Doesnt work due to multiprocces being naješen as fuck')
"""
Plays num games in which player1 starts num/2 games and player2 starts
num/2 games.
Returns:
oneWon: games won by player1
twoWon: games won by player2
draws: games won by nobody
"""
num = int(num / 2)
oneWon = 0
twoWon = 0
draws = 0
log.info('Starting arena compare..')
def f(_):
print('X', end='', flush=True)
return self.playGame(verbose=verbose)
with Pool() as p:
results = p.map(f, range(num))
for gameResult in results:
if gameResult == 1:
oneWon += 1
elif gameResult == -1:
twoWon += 1
else:
draws += 1
# for _ in tqdm(range(num), desc="Arena.playGames (1)"):
# gameResult = self.playGame(verbose=verbose)
# if gameResult == 1:
# oneWon += 1
# elif gameResult == -1:
# twoWon += 1
# else:
# draws += 1
#
self.player1, self.player2 = self.player2, self.player1
with Pool() as p:
results = p.map(f, range(num))
for gameResult in results:
if gameResult == -1:
oneWon += 1
elif gameResult == 1:
twoWon += 1
else:
draws += 1
return oneWon, twoWon, draws
def playGames(self, num, verbose=False):
"""
Plays num games in which player1 starts num/2 games and player2 starts
num/2 games.
Returns:
oneWon: games won by player1
twoWon: games won by player2
draws: games won by nobody
"""
num = int(num / 2)
oneWon = 0
twoWon = 0
draws = 0
for _ in tqdm(range(num), desc="Arena.playGames (1)"):
gameResult = self.playGame(verbose=verbose)
if gameResult == 1:
oneWon += 1
elif gameResult == -1:
twoWon += 1
else:
draws += 1
self.player1, self.player2 = self.player2, self.player1
for _ in tqdm(range(num), desc="Arena.playGames (2)"):
gameResult = self.playGame(verbose=verbose)
if gameResult == -1:
oneWon += 1
elif gameResult == 1:
twoWon += 1
else:
draws += 1
return oneWon, twoWon, draws
|
#-------------------------------------#
# 创建YOLO类
#-------------------------------------#
import colorsys
import os
import time
import numpy as np
import torch
import torch.nn as nn
from PIL import Image, ImageDraw, ImageFont
from nets.yolo3 import YoloBody
from utils.utils import (DecodeBox, letterbox_image, non_max_suppression,
yolo_correct_boxes)
#--------------------------------------------#
# 使用自己训练好的模型预测需要修改2个参数
# model_path和classes_path都需要修改!
# 如果出现shape不匹配,一定要注意
# 训练时的model_path和classes_path参数的修改
#--------------------------------------------#
class YOLO(object):
_defaults = {
"model_path" : 'logs/Epoch100-Total_Loss19.3690-Val_Loss13.4382.pth',
"anchors_path" : 'model_data/yolo_anchors.txt',
"classes_path" : 'model_data/new_classes.txt',
"model_image_size" : (416, 416, 3),
"confidence" : 0.5,
"iou" : 0.3,
"cuda" : True,
#---------------------------------------------------------------------#
# 该变量用于控制是否使用letterbox_image对输入图像进行不失真的resize,
# 在多次测试后,发现关闭letterbox_image直接resize的效果更好
#---------------------------------------------------------------------#
"letterbox_image" : False,
}
@classmethod
def get_defaults(cls, n):
if n in cls._defaults:
return cls._defaults[n]
else:
return "Unrecognized attribute name '" + n + "'"
#---------------------------------------------------#
# 初始化YOLO
#---------------------------------------------------#
def __init__(self, **kwargs):
self.__dict__.update(self._defaults)
self.class_names = self._get_class()
self.anchors = self._get_anchors()
self.generate()
#---------------------------------------------------#
# 获得所有的分类
#---------------------------------------------------#
def _get_class(self):
classes_path = os.path.expanduser(self.classes_path)
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
#---------------------------------------------------#
# 获得所有的先验框
#---------------------------------------------------#
def _get_anchors(self):
anchors_path = os.path.expanduser(self.anchors_path)
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape([-1, 3, 2])[::-1,:,:]
#---------------------------------------------------#
# 生成模型
#---------------------------------------------------#
def generate(self):
self.num_classes = len(self.class_names)
#---------------------------------------------------#
# 建立yolov3模型
#---------------------------------------------------#
self.net = YoloBody(self.anchors, self.num_classes)
#---------------------------------------------------#
# 载入yolov3模型的权重
#---------------------------------------------------#
print('Loading weights into state dict...')
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
state_dict = torch.load(self.model_path, map_location=device)
self.net.load_state_dict(state_dict)
self.net = self.net.eval()
if self.cuda:
self.net = nn.DataParallel(self.net)
self.net = self.net.cuda()
#---------------------------------------------------#
# 建立三个特征层解码用的工具
#---------------------------------------------------#
self.yolo_decodes = []
for i in range(3):
self.yolo_decodes.append(DecodeBox(self.anchors[i], self.num_classes, (self.model_image_size[1], self.model_image_size[0])))
print('{} model, anchors, and classes loaded.'.format(self.model_path))
# 画框设置不同的颜色
hsv_tuples = [(x / len(self.class_names), 1., 1.)
for x in range(len(self.class_names))]
self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
self.colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
self.colors))
#---------------------------------------------------#
# 检测图片
#---------------------------------------------------#
def detect_image(self, image):
#---------------------------------------------------------#
# 在这里将图像转换成RGB图像,防止灰度图在预测时报错。
#---------------------------------------------------------#
image = image.convert('RGB')
image_shape = np.array(np.shape(image)[0:2])
#---------------------------------------------------------#
# 给图像增加灰条,实现不失真的resize
# 也可以直接resize进行识别
#---------------------------------------------------------#
if self.letterbox_image:
crop_img = np.array(letterbox_image(image, (self.model_image_size[1], self.model_image_size[0])))
else:
crop_img = image.resize((self.model_image_size[1], self.model_image_size[0]), Image.BICUBIC)
photo = np.array(crop_img,dtype = np.float32) / 255.0
photo = np.transpose(photo, (2, 0, 1))
#---------------------------------------------------------#
# 添加上batch_size维度
#---------------------------------------------------------#
images = [photo]
with torch.no_grad():
images = torch.from_numpy(np.asarray(images))
if self.cuda:
images = images.cuda()
#---------------------------------------------------------#
# 将图像输入网络当中进行预测!
#---------------------------------------------------------#
outputs = self.net(images)
output_list = []
for i in range(3):
output_list.append(self.yolo_decodes[i](outputs[i]))
#---------------------------------------------------------#
# 将预测框进行堆叠,然后进行非极大抑制
#---------------------------------------------------------#
output = torch.cat(output_list, 1)
batch_detections = non_max_suppression(output, self.num_classes, conf_thres=self.confidence, nms_thres=self.iou)
#---------------------------------------------------------#
# 如果没有检测出物体,返回原图
#---------------------------------------------------------#
try :
batch_detections = batch_detections[0].cpu().numpy()
except:
return image
#---------------------------------------------------------#
# 对预测框进行得分筛选
#---------------------------------------------------------#
top_index = batch_detections[:, 4] * batch_detections[:, 5] > self.confidence
top_conf = batch_detections[top_index, 4] * batch_detections[top_index, 5]
top_label = np.array(batch_detections[top_index, -1],np.int32)
top_bboxes = np.array(batch_detections[top_index, :4])
top_xmin, top_ymin, top_xmax, top_ymax = np.expand_dims(top_bboxes[:,0],-1),np.expand_dims(top_bboxes[:,1],-1),np.expand_dims(top_bboxes[:,2],-1),np.expand_dims(top_bboxes[:,3],-1)
#-----------------------------------------------------------------#
# 在图像传入网络预测前会进行letterbox_image给图像周围添加灰条
# 因此生成的top_bboxes是相对于有灰条的图像的
# 我们需要对其进行修改,去除灰条的部分。
#-----------------------------------------------------------------#
if self.letterbox_image:
boxes = yolo_correct_boxes(top_ymin, top_xmin, top_ymax, top_xmax, np.array([self.model_image_size[0],self.model_image_size[1]]), image_shape)
else:
top_xmin = top_xmin / self.model_image_size[1] * image_shape[1]
top_ymin = top_ymin / self.model_image_size[0] * image_shape[0]
top_xmax = top_xmax / self.model_image_size[1] * image_shape[1]
top_ymax = top_ymax / self.model_image_size[0] * image_shape[0]
boxes = np.concatenate([top_ymin,top_xmin,top_ymax,top_xmax], axis=-1)
font = ImageFont.truetype(font='model_data/simhei.ttf',size=np.floor(3e-2 * np.shape(image)[1] + 0.5).astype('int32'))
thickness = max((np.shape(image)[0] + np.shape(image)[1]) // self.model_image_size[0], 1)
for i, c in enumerate(top_label):
predicted_class = self.class_names[c]
score = top_conf[i]
top, left, bottom, right = boxes[i]
top = top - 5
left = left - 5
bottom = bottom + 5
right = right + 5
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(np.shape(image)[0], np.floor(bottom + 0.5).astype('int32'))
right = min(np.shape(image)[1], np.floor(right + 0.5).astype('int32'))
# 画框框
label = '{} {:.2f}'.format(predicted_class, score)
draw = ImageDraw.Draw(image)
label_size = draw.textsize(label, font)
label = label.encode('utf-8')
print(label, top, left, bottom, right)
if top - label_size[1] >= 0:
text_origin = np.array([left, top - label_size[1]])
else:
text_origin = np.array([left, top + 1])
for i in range(thickness):
draw.rectangle(
[left + i, top + i, right - i, bottom - i],
outline=self.colors[self.class_names.index(predicted_class)])
draw.rectangle(
[tuple(text_origin), tuple(text_origin + label_size)],
fill=self.colors[self.class_names.index(predicted_class)])
draw.text(text_origin, str(label,'UTF-8'), fill=(0, 0, 0), font=font)
del draw
return image
def get_FPS(self, image, test_interval):
image_shape = np.array(np.shape(image)[0:2])
#---------------------------------------------------------#
# 给图像增加灰条,实现不失真的resize
# 也可以直接resize进行识别
#---------------------------------------------------------#
if self.letterbox_image:
crop_img = np.array(letterbox_image(image, (self.model_image_size[1],self.model_image_size[0])))
else:
crop_img = image.convert('RGB')
crop_img = crop_img.resize((self.model_image_size[1],self.model_image_size[0]), Image.BICUBIC)
photo = np.array(crop_img,dtype = np.float32) / 255.0
photo = np.transpose(photo, (2, 0, 1))
#---------------------------------------------------------#
# 添加上batch_size维度
#---------------------------------------------------------#
images = [photo]
with torch.no_grad():
images = torch.from_numpy(np.asarray(images))
if self.cuda:
images = images.cuda()
outputs = self.net(images)
output_list = []
for i in range(3):
output_list.append(self.yolo_decodes[i](outputs[i]))
output = torch.cat(output_list, 1)
batch_detections = non_max_suppression(output, len(self.class_names),
conf_thres=self.confidence,
nms_thres=self.iou)
try:
batch_detections = batch_detections[0].cpu().numpy()
top_index = batch_detections[:,4]*batch_detections[:,5] > self.confidence
top_conf = batch_detections[top_index,4]*batch_detections[top_index,5]
top_label = np.array(batch_detections[top_index,-1],np.int32)
top_bboxes = np.array(batch_detections[top_index,:4])
top_xmin, top_ymin, top_xmax, top_ymax = np.expand_dims(top_bboxes[:,0],-1),np.expand_dims(top_bboxes[:,1],-1),np.expand_dims(top_bboxes[:,2],-1),np.expand_dims(top_bboxes[:,3],-1)
if self.letterbox_image:
boxes = yolo_correct_boxes(top_ymin,top_xmin,top_ymax,top_xmax,np.array([self.model_image_size[0],self.model_image_size[1]]),image_shape)
else:
top_xmin = top_xmin / self.model_image_size[1] * image_shape[1]
top_ymin = top_ymin / self.model_image_size[0] * image_shape[0]
top_xmax = top_xmax / self.model_image_size[1] * image_shape[1]
top_ymax = top_ymax / self.model_image_size[0] * image_shape[0]
boxes = np.concatenate([top_ymin,top_xmin,top_ymax,top_xmax], axis=-1)
except:
pass
t1 = time.time()
for _ in range(test_interval):
with torch.no_grad():
outputs = self.net(images)
output_list = []
for i in range(3):
output_list.append(self.yolo_decodes[i](outputs[i]))
output = torch.cat(output_list, 1)
batch_detections = non_max_suppression(output, len(self.class_names),
conf_thres=self.confidence,
nms_thres=self.iou)
try:
batch_detections = batch_detections[0].cpu().numpy()
top_index = batch_detections[:,4]*batch_detections[:,5] > self.confidence
top_conf = batch_detections[top_index,4]*batch_detections[top_index,5]
top_label = np.array(batch_detections[top_index,-1],np.int32)
top_bboxes = np.array(batch_detections[top_index,:4])
top_xmin, top_ymin, top_xmax, top_ymax = np.expand_dims(top_bboxes[:,0],-1),np.expand_dims(top_bboxes[:,1],-1),np.expand_dims(top_bboxes[:,2],-1),np.expand_dims(top_bboxes[:,3],-1)
if self.letterbox_image:
boxes = yolo_correct_boxes(top_ymin,top_xmin,top_ymax,top_xmax,np.array([self.model_image_size[0],self.model_image_size[1]]),image_shape)
else:
top_xmin = top_xmin / self.model_image_size[1] * image_shape[1]
top_ymin = top_ymin / self.model_image_size[0] * image_shape[0]
top_xmax = top_xmax / self.model_image_size[1] * image_shape[1]
top_ymax = top_ymax / self.model_image_size[0] * image_shape[0]
boxes = np.concatenate([top_ymin,top_xmin,top_ymax,top_xmax], axis=-1)
except:
pass
t2 = time.time()
tact_time = (t2 - t1) / test_interval
return tact_time
|
# PACKAGE IMPORTS FOR EXTERNAL USAGE
from tests.unit_tests.dataset_interface_test import TestDatasetInterface
from tests.unit_tests.factories_test import FactoriesTest
from tests.unit_tests.load_checkpoint_from_direct_path_test import LoadCheckpointFromDirectPathTest
from tests.unit_tests.strictload_enum_test import StrictLoadEnumTest
from tests.unit_tests.zero_weight_decay_on_bias_bn_test import ZeroWdForBnBiasTest
from tests.unit_tests.save_ckpt_test import SaveCkptListUnitTest
from tests.unit_tests.yolov5_unit_test import TestYoloV5
from tests.unit_tests.all_architectures_test import AllArchitecturesTest
from tests.unit_tests.average_meter_test import TestAverageMeter
from tests.unit_tests.module_utils_test import TestModuleUtils
from tests.unit_tests.repvgg_unit_test import TestRepVgg
from tests.unit_tests.test_without_train_test import TestWithoutTrainTest
from tests.unit_tests.train_with_intialized_param_args_test import TrainWithInitializedObjectsTest
from tests.unit_tests.test_auto_augment import TestAutoAugment
from tests.unit_tests.ohem_loss_test import OhemLossTest
from tests.unit_tests.early_stop_test import EarlyStopTest
from tests.unit_tests.segmentation_transforms_test import SegmentationTransformsTest
from tests.unit_tests.pretrained_models_unit_test import PretrainedModelsUnitTest
from tests.unit_tests.conv_bn_relu_test import TestConvBnRelu
from tests.unit_tests.initialize_with_dataloaders_test import InitializeWithDataloadersTest
__all__ = ['TestDatasetInterface', 'ZeroWdForBnBiasTest', 'SaveCkptListUnitTest',
'TestYoloV5', 'AllArchitecturesTest', 'TestAverageMeter', 'TestModuleUtils', 'TestRepVgg', 'TestWithoutTrainTest',
'LoadCheckpointFromDirectPathTest', 'StrictLoadEnumTest', 'TrainWithInitializedObjectsTest', 'TestAutoAugment',
'OhemLossTest', 'EarlyStopTest', 'SegmentationTransformsTest', 'PretrainedModelsUnitTest', 'TestConvBnRelu',
'FactoriesTest', 'InitializeWithDataloadersTest']
|
# -*- coding: utf-8 -*-
"""
sphinx.ext.autosummary
~~~~~~~~~~~~~~~~~~~~~~
Sphinx extension that adds an autosummary:: directive, which can be
used to generate function/method/attribute/etc. summary lists, similar
to those output eg. by Epydoc and other API doc generation tools.
An :autolink: role is also provided.
autosummary directive
---------------------
The autosummary directive has the form::
.. autosummary::
:nosignatures:
:toctree: generated/
module.function_1
module.function_2
...
and it generates an output table (containing signatures, optionally)
======================== =============================================
module.function_1(args) Summary line from the docstring of function_1
module.function_2(args) Summary line from the docstring
...
======================== =============================================
If the :toctree: option is specified, files matching the function names
are inserted to the toctree with the given prefix:
generated/module.function_1
generated/module.function_2
...
Note: The file names contain the module:: or currentmodule:: prefixes.
.. seealso:: autosummary_generate.py
autolink role
-------------
The autolink role functions as ``:obj:`` when the name referred can be
resolved to a Python object, and otherwise it becomes simple emphasis.
This can be used as the default role to make links 'smart'.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import re
import sys
import inspect
import posixpath
from six import string_types
from types import ModuleType
from six import text_type
from docutils.parsers.rst import directives
from docutils.statemachine import ViewList
from docutils import nodes
import sphinx
from sphinx import addnodes
from sphinx.util import import_object, rst
from sphinx.util.compat import Directive
from sphinx.pycode import ModuleAnalyzer, PycodeError
from sphinx.ext.autodoc import Options
# -- autosummary_toc node ------------------------------------------------------
class autosummary_toc(nodes.comment):
pass
def process_autosummary_toc(app, doctree):
"""Insert items described in autosummary:: to the TOC tree, but do
not generate the toctree:: list.
"""
env = app.builder.env
crawled = {}
def crawl_toc(node, depth=1):
crawled[node] = True
for j, subnode in enumerate(node):
try:
if (isinstance(subnode, autosummary_toc) and
isinstance(subnode[0], addnodes.toctree)):
env.note_toctree(env.docname, subnode[0])
continue
except IndexError:
continue
if not isinstance(subnode, nodes.section):
continue
if subnode not in crawled:
crawl_toc(subnode, depth+1)
crawl_toc(doctree)
def autosummary_toc_visit_html(self, node):
"""Hide autosummary toctree list in HTML output."""
raise nodes.SkipNode
def autosummary_noop(self, node):
pass
# -- autosummary_table node ----------------------------------------------------
class autosummary_table(nodes.comment):
pass
def autosummary_table_visit_html(self, node):
"""Make the first column of the table non-breaking."""
try:
tbody = node[0][0][-1]
for row in tbody:
col1_entry = row[0]
par = col1_entry[0]
for j, subnode in enumerate(list(par)):
if isinstance(subnode, nodes.Text):
new_text = text_type(subnode.astext())
new_text = new_text.replace(u" ", u"\u00a0")
par[j] = nodes.Text(new_text)
except IndexError:
pass
# -- autodoc integration -------------------------------------------------------
class FakeDirective(object):
env = {}
genopt = Options()
def get_documenter(obj, parent):
"""Get an autodoc.Documenter class suitable for documenting the given
object.
*obj* is the Python object to be documented, and *parent* is an
another Python object (e.g. a module or a class) to which *obj*
belongs to.
"""
from sphinx.ext.autodoc import AutoDirective, DataDocumenter, \
ModuleDocumenter
if inspect.ismodule(obj):
# ModuleDocumenter.can_document_member always returns False
return ModuleDocumenter
# Construct a fake documenter for *parent*
if parent is not None:
parent_doc_cls = get_documenter(parent, None)
else:
parent_doc_cls = ModuleDocumenter
if hasattr(parent, '__name__'):
parent_doc = parent_doc_cls(FakeDirective(), parent.__name__)
else:
parent_doc = parent_doc_cls(FakeDirective(), "")
# Get the corrent documenter class for *obj*
classes = [cls for cls in AutoDirective._registry.values()
if cls.can_document_member(obj, '', False, parent_doc)]
if classes:
classes.sort(key=lambda cls: cls.priority)
return classes[-1]
else:
return DataDocumenter
# -- .. autosummary:: ----------------------------------------------------------
class Autosummary(Directive):
"""
Pretty table containing short signatures and summaries of functions etc.
autosummary can also optionally generate a hidden toctree:: node.
"""
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
has_content = True
option_spec = {
'toctree': directives.unchanged,
'nosignatures': directives.flag,
'template': directives.unchanged,
}
def warn(self, msg):
self.warnings.append(self.state.document.reporter.warning(
msg, line=self.lineno))
def run(self):
self.env = env = self.state.document.settings.env
self.genopt = Options()
self.warnings = []
self.result = ViewList()
names = [x.strip().split()[0] for x in self.content
if x.strip() and re.search(r'^[~a-zA-Z_]', x.strip()[0])]
items = self.get_items(names)
nodes = self.get_table(items)
if 'toctree' in self.options:
dirname = posixpath.dirname(env.docname)
tree_prefix = self.options['toctree'].strip()
docnames = []
for name, sig, summary, real_name in items:
docname = posixpath.join(tree_prefix, real_name)
docname = posixpath.normpath(posixpath.join(dirname, docname))
if docname not in env.found_docs:
self.warn('toctree references unknown document %r'
% docname)
docnames.append(docname)
tocnode = addnodes.toctree()
tocnode['includefiles'] = docnames
tocnode['entries'] = [(None, docn) for docn in docnames]
tocnode['maxdepth'] = -1
tocnode['glob'] = None
tocnode = autosummary_toc('', '', tocnode)
nodes.append(tocnode)
return self.warnings + nodes
def get_items(self, names):
"""Try to import the given names, and return a list of
``[(name, signature, summary_string, real_name), ...]``.
"""
env = self.state.document.settings.env
prefixes = get_import_prefixes_from_env(env)
items = []
max_item_chars = 50
for name in names:
display_name = name
if name.startswith('~'):
name = name[1:]
display_name = name.split('.')[-1]
try:
real_name, obj, parent, modname = import_by_name(name, prefixes=prefixes)
except ImportError:
self.warn('failed to import %s' % name)
items.append((name, '', '', name))
continue
self.result = ViewList() # initialize for each documenter
full_name = real_name
if not isinstance(obj, ModuleType):
# give explicitly separated module name, so that members
# of inner classes can be documented
full_name = modname + '::' + full_name[len(modname)+1:]
# NB. using full_name here is important, since Documenters
# handle module prefixes slightly differently
documenter = get_documenter(obj, parent)(self, full_name)
if not documenter.parse_name():
self.warn('failed to parse name %s' % real_name)
items.append((display_name, '', '', real_name))
continue
if not documenter.import_object():
self.warn('failed to import object %s' % real_name)
items.append((display_name, '', '', real_name))
continue
if documenter.options.members and not documenter.check_module():
continue
# try to also get a source code analyzer for attribute docs
try:
documenter.analyzer = ModuleAnalyzer.for_module(
documenter.get_real_modname())
# parse right now, to get PycodeErrors on parsing (results will
# be cached anyway)
documenter.analyzer.find_attr_docs()
except PycodeError as err:
documenter.env.app.debug(
'[autodoc] module analyzer failed: %s', err)
# no source file -- e.g. for builtin and C modules
documenter.analyzer = None
# -- Grab the signature
sig = documenter.format_signature()
if not sig:
sig = ''
else:
max_chars = max(10, max_item_chars - len(display_name))
sig = mangle_signature(sig, max_chars=max_chars)
sig = sig.replace('*', r'\*')
# -- Grab the summary
documenter.add_content(None)
doc = list(documenter.process_doc([self.result.data]))
while doc and not doc[0].strip():
doc.pop(0)
# If there's a blank line, then we can assume the first sentence /
# paragraph has ended, so anything after shouldn't be part of the
# summary
for i, piece in enumerate(doc):
if not piece.strip():
doc = doc[:i]
break
# Try to find the "first sentence", which may span multiple lines
m = re.search(r"^([A-Z].*?\.)(?:\s|$)", " ".join(doc).strip())
if m:
summary = m.group(1).strip()
elif doc:
summary = doc[0].strip()
else:
summary = ''
items.append((display_name, sig, summary, real_name))
return items
def get_table(self, items):
"""Generate a proper list of table nodes for autosummary:: directive.
*items* is a list produced by :meth:`get_items`.
"""
table_spec = addnodes.tabular_col_spec()
table_spec['spec'] = 'p{0.5\linewidth}p{0.5\linewidth}'
table = autosummary_table('')
real_table = nodes.table('', classes=['longtable'])
table.append(real_table)
group = nodes.tgroup('', cols=2)
real_table.append(group)
group.append(nodes.colspec('', colwidth=10))
group.append(nodes.colspec('', colwidth=90))
body = nodes.tbody('')
group.append(body)
def append_row(*column_texts):
row = nodes.row('')
for text in column_texts:
node = nodes.paragraph('')
vl = ViewList()
vl.append(text, '<autosummary>')
self.state.nested_parse(vl, 0, node)
try:
if isinstance(node[0], nodes.paragraph):
node = node[0]
except IndexError:
pass
row.append(nodes.entry('', node))
body.append(row)
for name, sig, summary, real_name in items:
qualifier = 'obj'
if 'nosignatures' not in self.options:
col1 = ':%s:`%s <%s>`\ %s' % (qualifier, name, real_name, rst.escape(sig))
else:
col1 = ':%s:`%s <%s>`' % (qualifier, name, real_name)
col2 = summary
append_row(col1, col2)
return [table_spec, table]
def mangle_signature(sig, max_chars=30):
"""Reformat a function signature to a more compact form."""
s = re.sub(r"^\((.*)\)$", r"\1", sig).strip()
# Strip strings (which can contain things that confuse the code below)
s = re.sub(r"\\\\", "", s)
s = re.sub(r"\\'", "", s)
s = re.sub(r"'[^']*'", "", s)
# Parse the signature to arguments + options
args = []
opts = []
opt_re = re.compile(r"^(.*, |)([a-zA-Z0-9_*]+)=")
while s:
m = opt_re.search(s)
if not m:
# The rest are arguments
args = s.split(', ')
break
opts.insert(0, m.group(2))
s = m.group(1)[:-2]
# Produce a more compact signature
sig = limited_join(", ", args, max_chars=max_chars-2)
if opts:
if not sig:
sig = "[%s]" % limited_join(", ", opts, max_chars=max_chars-4)
elif len(sig) < max_chars - 4 - 2 - 3:
sig += "[, %s]" % limited_join(", ", opts,
max_chars=max_chars-len(sig)-4-2)
return u"(%s)" % sig
def limited_join(sep, items, max_chars=30, overflow_marker="..."):
"""Join a number of strings to one, limiting the length to *max_chars*.
If the string overflows this limit, replace the last fitting item by
*overflow_marker*.
Returns: joined_string
"""
full_str = sep.join(items)
if len(full_str) < max_chars:
return full_str
n_chars = 0
n_items = 0
for j, item in enumerate(items):
n_chars += len(item) + len(sep)
if n_chars < max_chars - len(overflow_marker):
n_items += 1
else:
break
return sep.join(list(items[:n_items]) + [overflow_marker])
# -- Importing items -----------------------------------------------------------
def get_import_prefixes_from_env(env):
"""
Obtain current Python import prefixes (for `import_by_name`)
from ``document.env``
"""
prefixes = [None]
currmodule = env.ref_context.get('py:module')
if currmodule:
prefixes.insert(0, currmodule)
currclass = env.ref_context.get('py:class')
if currclass:
if currmodule:
prefixes.insert(0, currmodule + "." + currclass)
else:
prefixes.insert(0, currclass)
return prefixes
def import_by_name(name, prefixes=[None]):
"""Import a Python object that has the given *name*, under one of the
*prefixes*. The first name that succeeds is used.
"""
tried = []
for prefix in prefixes:
try:
if prefix:
prefixed_name = '.'.join([prefix, name])
else:
prefixed_name = name
obj, parent, modname = _import_by_name(prefixed_name)
return prefixed_name, obj, parent, modname
except ImportError:
tried.append(prefixed_name)
raise ImportError('no module named %s' % ' or '.join(tried))
def _import_by_name(name):
"""Import a Python object given its full name."""
try:
name_parts = name.split('.')
# try first interpret `name` as MODNAME.OBJ
modname = '.'.join(name_parts[:-1])
if modname:
try:
__import__(modname)
mod = sys.modules[modname]
return getattr(mod, name_parts[-1]), mod, modname
except (ImportError, IndexError, AttributeError):
pass
# ... then as MODNAME, MODNAME.OBJ1, MODNAME.OBJ1.OBJ2, ...
last_j = 0
modname = None
for j in reversed(range(1, len(name_parts)+1)):
last_j = j
modname = '.'.join(name_parts[:j])
try:
__import__(modname)
except ImportError:
continue
if modname in sys.modules:
break
if last_j < len(name_parts):
parent = None
obj = sys.modules[modname]
for obj_name in name_parts[last_j:]:
parent = obj
obj = getattr(obj, obj_name)
return obj, parent, modname
else:
return sys.modules[modname], None, modname
except (ValueError, ImportError, AttributeError, KeyError) as e:
raise ImportError(*e.args)
# -- :autolink: (smart default role) -------------------------------------------
def autolink_role(typ, rawtext, etext, lineno, inliner,
options={}, content=[]):
"""Smart linking role.
Expands to ':obj:`text`' if `text` is an object that can be imported;
otherwise expands to '*text*'.
"""
env = inliner.document.settings.env
r = env.get_domain('py').role('obj')(
'obj', rawtext, etext, lineno, inliner, options, content)
pnode = r[0][0]
prefixes = get_import_prefixes_from_env(env)
try:
name, obj, parent, modname = import_by_name(pnode['reftarget'], prefixes)
except ImportError:
content = pnode[0]
r[0][0] = nodes.emphasis(rawtext, content[0].astext(),
classes=content['classes'])
return r
def get_rst_suffix(app):
def get_supported_format(suffix):
parser_class = app.config.source_parsers.get(suffix)
if parser_class is None:
return ('restructuredtext',)
if isinstance(parser_class, string_types):
parser_class = import_object(parser_class, 'source parser')
return parser_class.supported
for suffix in app.config.source_suffix:
if 'restructuredtext' in get_supported_format(suffix):
return suffix
return None
def process_generate_options(app):
genfiles = app.config.autosummary_generate
if genfiles and not hasattr(genfiles, '__len__'):
env = app.builder.env
genfiles = [env.doc2path(x, base=None) for x in env.found_docs
if os.path.isfile(env.doc2path(x))]
if not genfiles:
return
from sphinx.ext.autosummary.generate import generate_autosummary_docs
ext = app.config.source_suffix
genfiles = [genfile + (not genfile.endswith(tuple(ext)) and ext[0] or '')
for genfile in genfiles]
suffix = get_rst_suffix(app)
if suffix is None:
app.warn('autosummary generats .rst files internally. '
'But your source_suffix does not contain .rst. Skipped.')
return
generate_autosummary_docs(genfiles, builder=app.builder,
warn=app.warn, info=app.info, suffix=suffix,
base_path=app.srcdir)
def setup(app):
# I need autodoc
app.setup_extension('sphinx.ext.autodoc')
app.add_node(autosummary_toc,
html=(autosummary_toc_visit_html, autosummary_noop),
latex=(autosummary_noop, autosummary_noop),
text=(autosummary_noop, autosummary_noop),
man=(autosummary_noop, autosummary_noop),
texinfo=(autosummary_noop, autosummary_noop))
app.add_node(autosummary_table,
html=(autosummary_table_visit_html, autosummary_noop),
latex=(autosummary_noop, autosummary_noop),
text=(autosummary_noop, autosummary_noop),
man=(autosummary_noop, autosummary_noop),
texinfo=(autosummary_noop, autosummary_noop))
app.add_directive('autosummary', Autosummary)
app.add_role('autolink', autolink_role)
app.connect('doctree-read', process_autosummary_toc)
app.connect('builder-inited', process_generate_options)
app.add_config_value('autosummary_generate', [], True, [bool])
return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
|
from __future__ import print_function
from __future__ import division
import pandas as pd
import numpy as np
from urbansim_defaults.randomfile import fixedrandomseed,seednum
class Developer(object):
"""
Pass the dataframe that is returned by feasibility here
Can also be a dictionary where keys are building forms and values are
the individual data frames returned by the proforma lookup routine.
"""
def __init__(self, feasibility):
if isinstance(feasibility, dict):
feasibility = pd.concat(feasibility.values(), keys=feasibility.keys(), axis=1)
self.feasibility = feasibility
@staticmethod
def _max_form(f, colname):
"""
Assumes dataframe with hierarchical columns with first index equal to the
use and second index equal to the attribute.
e.g. f.columns equal to::
mixedoffice building_cost
building_revenue
building_size
max_profit
max_profit_far
total_cost
industrial building_cost
building_revenue
building_size
max_profit
max_profit_far
total_cost
"""
df = f.stack(level=0)[[colname]].stack().unstack(level=1).reset_index(level=1, drop=True)
return df.idxmax(axis=1)
def keep_form_with_max_profit(self, forms=None):
"""
This converts the dataframe, which shows all profitable forms,
to the form with the greatest profit, so that more profitable
forms outcompete less profitable forms.
Parameters
----------
forms: list of strings
List of forms which compete which other. Can leave some out.
Returns
-------
Nothing. Goes from a multi-index to a single index with only the
most profitable form.
"""
f = self.feasibility
if forms is not None:
f = f[forms]
if len(f) > 0:
mu = self._max_form(f, "max_profit")
indexes = [tuple(x) for x in mu.reset_index().values]
else:
indexes = []
df = f.stack(level=0).loc[indexes]
df.index.names = ["parcel_id", "form"]
df = df.reset_index(level=1)
return df
@staticmethod
def compute_units_to_build(num_agents, num_units, target_vacancy):
"""
Compute number of units to build to match target vacancy.
Parameters
----------
num_agents : int
number of agents that need units in the region
num_units : int
number of units in buildings
target_vacancy : float (0-1.0)
target vacancy rate
Returns
-------
number_of_units : int
the number of units that need to be built
"""
print("Number of agents: {:,}".format(num_agents))
print("Number of agent spaces: {:,}".format(int(num_units)))
assert target_vacancy < 1.0
target_units = int(max(num_agents / (1 - target_vacancy) - num_units, 0))
print("Current vacancy = {:.2f}"
.format(1 - num_agents / float(num_units)))
print("Target vacancy = {:.2f}, target of new units = {:,}"
.format(target_vacancy, target_units))
return target_units
def pick(self, form, target_units, parcel_size, ave_unit_size,
current_units, max_parcel_size=200000, min_unit_size=400,
drop_after_build=True, residential=True, bldg_sqft_per_job=400.0,
profit_to_prob_func=None):
"""
Choose the buildings from the list that are feasible to build in
order to match the specified demand.
Parameters
----------
form : string or list
One or more of the building forms from the pro forma specification -
e.g. "residential" or "mixedresidential" - these are configuration
parameters passed previously to the pro forma. If more than one form
is passed the forms compete with each other (based on profitability)
for which one gets built in order to meet demand.
target_units : int
The number of units to build. For non-residential buildings this
should be passed as the number of job spaces that need to be created.
parcel_size : series
The size of the parcels. This was passed to feasibility as well,
but should be passed here as well. Index should be parcel_ids.
ave_unit_size : series
The average residential unit size around each parcel - this is
indexed by parcel, but is usually a disaggregated version of a
zonal or accessibility aggregation.
bldg_sqft_per_job : float (default 400.0)
The average square feet per job for this building form.
min_unit_size : float
Values less than this number in ave_unit_size will be set to this
number. Deals with cases where units are currently not built.
current_units : series
The current number of units on the parcel. Is used to compute the
net number of units produced by the developer model. Many times
the developer model is redeveloping units (demolishing them) and
is trying to meet a total number of net units produced.
max_parcel_size : float
Parcels larger than this size will not be considered for
development - usually large parcels should be specified manually
in a development projects table.
drop_after_build : bool
Whether or not to drop parcels from consideration after they
have been chosen for development. Usually this is true so as
to not develop the same parcel twice.
residential: bool
If creating non-residential buildings set this to false and
developer will fill in job_spaces rather than residential_units
profit_to_prob_func: function
As there are so many ways to turn the development feasibility
into a probability to select it for building, the user may pass
a function which takes the feasibility dataframe and returns
a series of probabilities. If no function is passed, the behavior
of this method will not change
Returns
-------
None if thar are no feasible buildings
new_buildings : dataframe
DataFrame of buildings to add. These buildings are rows from the
DataFrame that is returned from feasibility.
"""
if len(self.feasibility) == 0:
# no feasible buildings, might as well bail
return
if form is None:
df = self.feasibility
elif isinstance(form, list):
df = self.keep_form_with_max_profit(form)
else:
df = self.feasibility[form]
# feasible buildings only for this building type
df = df[df.max_profit > 0]
ave_unit_size[ave_unit_size < min_unit_size] = min_unit_size
df["ave_unit_size"] = ave_unit_size
df["parcel_size"] = parcel_size
df['current_units'] = current_units
df = df[df.parcel_size < max_parcel_size]
df['residential_units'] = (df.residential_sqft / df.ave_unit_size).round()
df['job_spaces'] = (df.non_residential_sqft / bldg_sqft_per_job).round()
if residential:
df['net_units'] = df.residential_units - df.current_units
else:
df['net_units'] = df.job_spaces - df.current_units
df = df[df.net_units > 0]
if len(df) == 0:
print("WARNING THERE ARE NO FEASIBLE BUILDING TO CHOOSE FROM")
return
# print "Describe of net units\n", df.net_units.describe()
print("Sum of net units that are profitable: {:,}"
.format(int(df.net_units.sum())))
if profit_to_prob_func:
p = profit_to_prob_func(df)
else:
df['max_profit_per_size'] = df.max_profit / df.parcel_size
p = df.max_profit_per_size.values / df.max_profit_per_size.sum()
if df.net_units.sum() < target_units:
print("WARNING THERE WERE NOT ENOUGH PROFITABLE UNITS TO",
"MATCH DEMAND")
build_idx = df.index.values
elif target_units <= 0:
build_idx = []
else:
# we don't know how many developments we will need, as they differ in net_units.
# If all developments have net_units of 1 than we need target_units of them.
# So we choose the smaller of available developments and target_units.
if fixedrandomseed==0: np.random.seed(seednum)
choices = np.random.choice(df.index.values, size=min(len(df.index), target_units),
replace=False, p=p)
tot_units = df.net_units.loc[choices].values.cumsum()
ind = int(np.searchsorted(tot_units, target_units, side="left")) + 1
build_idx = choices[:ind]
if drop_after_build:
self.feasibility = self.feasibility.drop(build_idx)
new_df = df.loc[build_idx]
new_df.index.name = "parcel_id"
return new_df.reset_index()
@staticmethod
def merge(old_df, new_df, return_index=False):
"""
Merge two dataframes of buildings. The old dataframe is
usually the buildings dataset and the new dataframe is a modified
(by the user) version of what is returned by the pick method.
Parameters
----------
old_df : dataframe
Current set of buildings
new_df : dataframe
New buildings to add, usually comes from this module
return_index : bool
If return_index is true, this method will return the new
index of new_df (which changes in order to create a unique
index after the merge)
Returns
-------
df : dataframe
Combined DataFrame of buildings, makes sure indexes don't overlap
index : pd.Index
If and only if return_index is True, return the new index for the
new_df dataframe (which changes in order to create a unique index
after the merge)
"""
maxind = np.max(old_df.index.values)
new_df = new_df.reset_index(drop=True)
new_df.index = new_df.index + maxind + 1
concat_df = pd.concat([old_df, new_df], verify_integrity=True)
concat_df.index.name = 'building_id'
if return_index:
return concat_df, new_df.index
return concat_df
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.