text stringlengths 957 885k |
|---|
import argparse
import os
import sys
##############################################
parser = argparse.ArgumentParser()
parser.add_argument('--epochs', type=int, default=10)
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--lr', type=float, default=1e-2)
parser.add_argument('--eps', type=float, default=1.)
parser.add_argument('--gpu', type=int, default=0)
args = parser.parse_args()
if args.gpu >= 0:
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=str(args.gpu)
exxact = 1
if exxact:
val_path = '/home/bcrafton3/Data_SSD/datasets/imagenet64/tfrecord/val/'
train_path = '/home/bcrafton3/Data_SSD/datasets/imagenet64/tfrecord/train/'
else:
val_path = '/usr/scratch/datasets/imagenet64/tfrecord/val/'
train_path = '/usr/scratch/datasets/imagenet64/tfrecord/train/'
##############################################
import keras
import tensorflow as tf
import numpy as np
import time
np.set_printoptions(threshold=1000)
from bc_utils.init_tensor import init_filters
from bc_utils.init_tensor import init_matrix
MEAN = [122.77093945, 116.74601272, 104.09373519]
##############################################
def parse_function(filename, label):
conv = tf.read_file(filename)
return conv, label
def get_val_filenames():
val_filenames = []
print ("building validation dataset")
for subdir, dirs, files in os.walk(val_path):
for file in files:
val_filenames.append(os.path.join(val_path, file))
np.random.shuffle(val_filenames)
remainder = len(val_filenames) % args.batch_size
val_filenames = val_filenames[:(-remainder)]
return val_filenames
def get_train_filenames():
train_filenames = []
print ("building training dataset")
for subdir, dirs, files in os.walk(train_path):
for file in files:
train_filenames.append(os.path.join(train_path, file))
np.random.shuffle(train_filenames)
remainder = len(train_filenames) % args.batch_size
train_filenames = train_filenames[:(-remainder)]
return train_filenames
def extract_fn(record):
_feature={
'label': tf.FixedLenFeature([], tf.int64),
'image_raw': tf.FixedLenFeature([], tf.string)
}
sample = tf.parse_single_example(record, _feature)
image = tf.decode_raw(sample['image_raw'], tf.uint8)
# this was tricky ... stored as uint8, not float32.
image = tf.cast(image, dtype=tf.float32)
image = tf.reshape(image, (1, 64, 64, 3))
means = tf.reshape(tf.constant(MEAN), [1, 1, 1, 3])
image = (image - means) / 255. * 2.
label = sample['label']
return [image, label]
###############################################################
train_filenames = get_train_filenames()
val_filenames = get_val_filenames()
filename = tf.placeholder(tf.string, shape=[None])
###############################################################
val_dataset = tf.data.TFRecordDataset(filename)
val_dataset = val_dataset.map(extract_fn, num_parallel_calls=4)
val_dataset = val_dataset.batch(args.batch_size)
val_dataset = val_dataset.repeat()
val_dataset = val_dataset.prefetch(8)
###############################################################
train_dataset = tf.data.TFRecordDataset(filename)
train_dataset = train_dataset.map(extract_fn, num_parallel_calls=4)
train_dataset = train_dataset.batch(args.batch_size)
train_dataset = train_dataset.repeat()
train_dataset = train_dataset.prefetch(8)
###############################################################
handle = tf.placeholder(tf.string, shape=[])
iterator = tf.data.Iterator.from_string_handle(handle, train_dataset.output_types, train_dataset.output_shapes)
features, labels = iterator.get_next()
features = tf.reshape(features, (args.batch_size, 64, 64, 3))
labels = tf.one_hot(labels, depth=1000)
train_iterator = train_dataset.make_initializable_iterator()
val_iterator = val_dataset.make_initializable_iterator()
###############################################################
def batch_norm(x, f, name):
gamma = tf.Variable(np.ones(shape=f), dtype=tf.float32, name=name+'_gamma')
beta = tf.Variable(np.zeros(shape=f), dtype=tf.float32, name=name+'_beta')
mean = tf.reduce_mean(x, axis=[0,1,2])
_, var = tf.nn.moments(x - mean, axes=[0,1,2])
bn = tf.nn.batch_normalization(x=x, mean=mean, variance=var, offset=beta, scale=gamma, variance_epsilon=1e-3)
return bn
def block(x, f1, f2, p, name):
filters1 = tf.Variable(init_filters(size=[3,3,f1,f2], init='alexnet'), dtype=tf.float32, name=name+'_conv1')
filters2 = tf.Variable(init_filters(size=[3,3,f2,f2], init='alexnet'), dtype=tf.float32, name=name+'_conv2')
conv1 = tf.nn.conv2d(x, filters1, [1,1,1,1], 'SAME')
bn1 = batch_norm(conv1, f2, name+'_bn1')
relu1 = tf.nn.relu(bn1)
conv2 = tf.nn.conv2d(relu1, filters2, [1,1,1,1], 'SAME')
bn2 = batch_norm(conv2, f2, name+'_bn2')
relu2 = tf.nn.relu(bn2)
pool = tf.nn.avg_pool(relu2, ksize=[1,p,p,1], strides=[1,p,p,1], padding='SAME')
return pool
###############################################################
dropout_rate = tf.placeholder(tf.float32, shape=())
learning_rate = tf.placeholder(tf.float32, shape=())
block1 = block(features, 3, 64, 2, 'block1') # 64
block2 = block(block1, 64, 128, 2, 'block2') # 32
block3 = block(block2, 128, 256, 2, 'block3') # 16
block4 = block(block3, 256, 512, 2, 'block4') # 8
block5 = block(block4, 512, 1024, 1, 'block5') # 4
pool = tf.nn.avg_pool(block5, ksize=[1,4,4,1], strides=[1,4,4,1], padding='SAME') # 1
flat = tf.reshape(pool, [args.batch_size, 1024])
mat1 = tf.Variable(init_matrix(size=(1024, 1000), init='alexnet'), dtype=tf.float32, name='fc1')
bias1 = tf.Variable(np.zeros(shape=1000), dtype=tf.float32, name='fc1_bias')
fc1 = tf.matmul(flat, mat1) + bias1
###############################################################
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=fc1, labels=labels))
correct = tf.equal(tf.argmax(fc1, axis=1), tf.argmax(labels, 1))
total_correct = tf.reduce_sum(tf.cast(correct, tf.float32))
train = tf.train.AdamOptimizer(learning_rate=learning_rate, epsilon=args.eps).minimize(loss)
###############################################################
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth=True
sess = tf.Session(config=config)
sess.run(tf.global_variables_initializer())
train_handle = sess.run(train_iterator.string_handle())
val_handle = sess.run(val_iterator.string_handle())
###############################################################
for ii in range(0, args.epochs):
print('epoch %d/%d' % (ii, args.epochs))
##################################################################
sess.run(train_iterator.initializer, feed_dict={filename: train_filenames})
train_total = 0.0
train_correct = 0.0
train_acc = 0.0
start = time.time()
for jj in range(0, len(train_filenames), args.batch_size):
[_total_correct, _] = sess.run([total_correct, train], feed_dict={handle: train_handle, learning_rate: args.lr})
train_total += args.batch_size
train_correct += _total_correct
train_acc = train_correct / train_total
if (jj % (100 * args.batch_size) == 0):
img_per_sec = (jj + args.batch_size) / (time.time() - start)
p = "%d | train accuracy: %f | img/s: %f" % (jj, train_acc, img_per_sec)
print (p)
##################################################################
sess.run(val_iterator.initializer, feed_dict={filename: val_filenames})
val_total = 0.0
val_correct = 0.0
val_acc = 0.0
for jj in range(0, len(val_filenames), args.batch_size):
[_total_correct] = sess.run([total_correct], feed_dict={handle: val_handle, learning_rate: 0.0})
val_total += args.batch_size
val_correct += _total_correct
val_acc = val_correct / val_total
if (jj % (100 * args.batch_size) == 0):
p = "val accuracy: %f" % (val_acc)
print (p)
|
<gh_stars>0
# Generated by Django 2.2.5 on 2020-10-13 02:44
import ckeditor_uploader.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='City',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default=None, max_length=20, null=True, verbose_name='城市名称')),
('ishot', models.SmallIntegerField(choices=[(0, '不是热门'), (1, '是热门')], default=1, null=True, verbose_name='是否热门')),
],
options={
'verbose_name_plural': '城市',
'verbose_name': '城市',
'db_table': 'tb_city',
},
),
migrations.CreateModel(
name='Enterprise',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default=None, max_length=100, null=True, verbose_name='企业名称')),
('summary', models.CharField(default=None, max_length=1000, null=True, verbose_name='企业简介')),
('content', models.CharField(default=None, max_length=1000, null=True, verbose_name='企业详细信息')),
('city', models.CharField(default=None, max_length=100, null=True, verbose_name='企业所在城市')),
('address', models.CharField(default=None, max_length=100, null=True, verbose_name='企业地址')),
('labels', models.CharField(default=None, help_text='多个标签以空格隔开', max_length=100, null=True, verbose_name='标签列表')),
('coordinate', models.CharField(default=None, max_length=100, null=True, verbose_name='企业坐标')),
('logo', models.ImageField(default=None, null=True, upload_to='', verbose_name='Logo')),
('url', models.CharField(default=None, max_length=100, null=True, verbose_name='URL')),
('visits', models.BigIntegerField(default=0, null=True, verbose_name='浏览量')),
],
options={
'verbose_name_plural': '企业',
'verbose_name': '企业',
'db_table': 'tb_enterprise',
},
),
migrations.CreateModel(
name='Recruit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('jobname', models.CharField(default=None, max_length=100, null=True, verbose_name='职位名称')),
('salary', models.CharField(default=None, max_length=1000, null=True, verbose_name='薪资范围')),
('condition', models.CharField(default=None, max_length=100, null=True, verbose_name='经验要求')),
('education', models.CharField(default=None, max_length=100, null=True, verbose_name='学历要求')),
('type', models.CharField(default=None, max_length=10, null=True, verbose_name='任职方式')),
('city', models.CharField(default=None, max_length=100, null=True, verbose_name='办公所在城市')),
('address', models.CharField(default=None, max_length=100, null=True, verbose_name='办公地址')),
('state', models.SmallIntegerField(choices=[(0, '不可用'), (1, '可用')], default=1, null=True, verbose_name='状态')),
('labels', models.CharField(default=None, help_text='多个标签以空格隔开', max_length=100, null=True, verbose_name='职位标签')),
('detailcontent', ckeditor_uploader.fields.RichTextUploadingField(default='', verbose_name='职位描述')),
('detailrequire', ckeditor_uploader.fields.RichTextUploadingField(default='', verbose_name='职位要求')),
('visits', models.BigIntegerField(default=0, null=True, verbose_name='浏览量')),
('createtime', models.DateTimeField(auto_now_add=True, null=True, verbose_name='创建日期')),
('enterprise', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='recruits', to='recruit.Enterprise', verbose_name='企业ID')),
],
options={
'verbose_name_plural': '职位',
'verbose_name': '职位',
'db_table': 'tb_recruit',
},
),
]
|
<reponame>sarah-keren/MAC<filename>src/control/evaluate.py
import sys
import itertools
import numpy as np
import matplotlib.pyplot as plt
sys.path.append('../src')
from control.controller_decentralized import Decentralized
from control.controller_decentralized_RL import DecentralizedRL
from control.controller_centralized_RL import CentralizedRL
from agents.deep_policy_gradient import DeepPolicyGradient
from agents.policy_gradient import PolicyGradient
from agents.deep_q import DQN
from agents.agent import Agent
from agents.random_agent import RandomAgent
from environments.env_wrapper import EnvWrappper
MAX_EPISODE_LEN = 25
def main():
mode = 'CENT_DPG'
if mode == 'DECENT_RANDOM':
controller = make_random_decentralized_control()
rl = False
elif mode == 'DECENT_DPG':
controller = make_rl_decentralized_control('dpg')
controller.train(MAX_EPISODE_LEN, 1000)
rl = True
elif mode == 'CENT_DPG':
controller = make_rl_centralized_control('dpg')
controller.train(MAX_EPISODE_LEN, 1000)
rl = True
results = evaluate_controller(controller, 100, RL=rl)
sums = sum_all_agents(results)
plt.style.use('seaborn')
plt.hist(sums)
print(f'Mean is {sum(sums)/len(sums)}')
plt.show()
def make_rl_centralized_control(centralized_policy):
env = set_env()
decision_maker = create_centralized_agent(centralized_policy, env)
env_agents = env.get_env_agents()
centralized_agents = {agent_name: None for agent_name in env_agents}
controller = CentralizedRL(env, centralized_agents, decision_maker)
return controller
def make_rl_decentralized_control(decentralized_policy):
env = set_env()
env_agents = env.get_env_agents()
decentralized_agents = {agent_name: create_decentralized_agent(decentralized_policy, env)
for agent_name in env_agents}
controller = DecentralizedRL(env, decentralized_agents)
return controller
def make_random_decentralized_control():
env = set_env()
# Make Random Decentralized
spaces = env.get_env().action_spaces
agents = {
agent_name: RandomAgent(spaces[agent_name])
for agent_name in spaces
}
controller = Decentralized(env, agents)
return controller
def evaluate_controller(controller, num_runs, RL):
all_results = []
for _ in range(num_runs):
if RL:
controller.run(render=False, max_iteration=10, max_episode_len=25, num_episodes=10, batch_size=0)
else:
controller.run(False, 20)
all_results.append(controller.total_rewards)
return all_results
def set_env():
sys.path.append('../environments/MultiTaxiEnv')
from environments.MultiTaxiEnv.taxi_environment import TaxiEnv
env = TaxiEnv(2)
# Make sure it works with our API:
env.agents = env.taxis_names
env.action_spaces = {
agent_name: env.action_space for agent_name in env.agents
}
env.observation_spaces = {
agent_name: env.observation_space for agent_name in env.agents
}
env.possible_agents = [agent for agent in env.agents]
needs_conv = False
return EnvWrappper(env, needs_conv=needs_conv)
def sum_all_agents(results):
sums = [] # Across all agents
for episode_results in results:
agents_sums = list(itertools.chain.from_iterable([
step_results.values() for step_results in episode_results
]))
sums.append(sum(agents_sums))
return sums
def create_centralized_agent(policy_name, env):
needs_conv = env.get_needs_conv()
num_obs = env.get_num_obs() if needs_conv else\
(1, env.get_num_obs()[::-1][0] * (len(env.get_env_agents())))
num_actions = (env.get_num_actions()) ** (len(env.get_env_agents()))
mapping_fn = lambda x: x.flatten() if not needs_conv else None
if policy_name == 'pg':
return Agent(PolicyGradient(num_actions, num_obs, mapping_fn=mapping_fn))
elif policy_name == 'dpg':
return Agent(DeepPolicyGradient(num_obs, num_actions, is_conv=needs_conv,
mapping_fn=mapping_fn))
elif policy_name == 'dqn':
return Agent(DQN(num_obs, num_actions, is_conv=needs_conv,
mapping_fn=mapping_fn))
print("Invalid Policy!")
return
def create_decentralized_agent(policy_name, env):
num_obs = env.get_num_obs()
num_actions = env.get_num_actions()
needs_conv = env.get_needs_conv()
mapping_fn = lambda x: x.flatten() if not needs_conv else None
if policy_name == 'pg':
return Agent(PolicyGradient(num_actions, num_obs, mapping_fn=mapping_fn))
elif policy_name == 'dpg':
return Agent(DeepPolicyGradient(num_obs, num_actions, is_conv=needs_conv,
mapping_fn=mapping_fn))
elif policy_name == 'dqn':
return Agent(DQN(num_obs, num_actions, is_conv=needs_conv,
mapping_fn=mapping_fn))
print("Invalid Policy!")
return
if __name__ == "__main__":
main() |
<reponame>janwillembuist/SeismicPro
"""Implements Muter class to define a boundary above which gather values will be zeroed out"""
import numpy as np
from scipy.interpolate import interp1d
from sklearn.linear_model import LinearRegression
from .utils import read_single_vfunc
class Muter:
"""A class to define an offset-time boundary above which gather values will be muted i.e. zeroed out.
Usually muting is performed to attenuate any strong, coherent noise that was generated by the shot, e.g. high
amplitudes near the first breaks. Such kind of noise strongly affects several processing routines, such as
:func:`~Gather.calculate_semblance`.
A muter object can be created from three different types of data by calling a corresponding `classmethod`:
* `from_points` - create a muter from 1d arrays of offsets and times,
* `from_file` - create a muter from a file in VFUNC format with offset-time pairs,
* `from_first_breaks` - create a muter from 1d arrays of offsets and times of first breaks.
The created object is callable and returns times up to which muting should be performed for given offsets. If a
muter is created by direct instantiation, zero time will be returned for every offset.
Examples
--------
>>> muter = Muter.from_points(offsets=[100, 1000, 2000], times=[200, 2000, 3000])
>>> muter([0, 100, 500, 1000, 1500, 2000])
array([ 0., 200., 1000., 2000., 2500., 3000.])
Attributes
----------
muter : callable
Return muting times for given offsets. `muter` argument must be either numeric or 1d array-like.
"""
def __init__(self):
self.muter = lambda offsets: np.zeros_like(offsets)
@classmethod
def from_points(cls, offsets, times, fill_value="extrapolate"):
"""Create a muter from 1d arrays of offsets and times.
The resulting muter performs linear time interpolation between points given, its behaviour outside of the
offsets' range is defined by the `fill_value` argument.
Parameters
----------
offsets : 1d array-like
An array with offset values. Measured in meters.
times : 1d array-like
An array with muting times, matching the length of `offsets`. Measured in milliseconds.
fill_value : float or (float, float) or "extrapolate", optional, defaults to "extrapolate"
- If float, this value is used to fill in for requested points outside of the data range,
- If a two-element tuple, then its elements are used as fill values before min offset and after max offset
given respectively,
- If "extrapolate", then points outside the data range will be linearly extrapolated.
Returns
-------
self : Muter
Created muter.
"""
self = cls()
self.muter = interp1d(offsets, times, fill_value=fill_value)
return self
@classmethod
def from_file(cls, path, **kwargs):
"""Create a muter from a file with vertical functions in Paradigm Echos VFUNC format.
The file must have exactly one record with the following structure:
VFUNC [inline] [crossline]
[offset_1] [time_1] [offset_2] [time_2] ... [offset_n] [time_n]
The loaded data is directly passed to :func:`~Muter.from_points`. The resulting muter performs linear time
interpolation between points given, its behaviour outside of the offsets' range is defined by the
`fill_value` argument.
Parameters
----------
path : str
A path to the file with muting in VFUNC format.
kwargs : misc, optional
Additional keyword arguments to :func:`~Muter.from_points`.
Returns
-------
self : Muter
Created muter.
"""
_, _, offsets, times = read_single_vfunc(path)
return cls.from_points(offsets, times, **kwargs)
@classmethod
def from_first_breaks(cls, offsets, times, velocity_reduction=0):
"""Create a muter from 1d arrays of offsets and times of first breaks.
The muter estimates seismic wave velocity in the weathering layer using linear regression, decrements it by
`velocity_reduction` in order to attenuate amplitudes immediately following the first breaks and uses the
resulting velocity to get muting times by offsets passed.
Parameters
----------
offsets : 1d array-like
An array with offset values. Measured in meters.
times : 1d array-like
An array with times of first breaks, matching the length of `offsets`. Measured in milliseconds.
velocity_reduction : float, optional, defaults to 0
A value used to decrement the found velocity in the weathering layer to attenuate amplitudes immediately
following the first breaks. Measured in meters/seconds.
Returns
-------
self : Muter
Created muter.
"""
velocity_reduction = velocity_reduction / 1000 # from m/s to m/ms
lin_reg = LinearRegression(fit_intercept=True)
lin_reg.fit(np.array(times).reshape(-1, 1), np.array(offsets))
# The fitted velocity is reduced by velocity_reduction in order to mute amplitudes near first breaks
intercept = lin_reg.intercept_
velocity = lin_reg.coef_ - velocity_reduction
self = cls()
self.muter = lambda offsets: (offsets - intercept) / velocity
return self
def __call__(self, offsets):
"""Returns times up to which muting should be performed for given offsets.
Notes
-----
If the muter was created by direct instantiation, zero time will be returned for every offset.
Parameters
----------
offsets : 1d array-like
An array with offset values. Measured in meters.
Returns
-------
times : 1d array-like
An array with muting times, matching the length of `offsets`. Measured in milliseconds.
"""
return self.muter(offsets)
|
from tensorflow.keras.optimizers import Adam, SGD
from tensorflow.keras.callbacks import EarlyStopping
import pandas as pd
import numpy as np
import os
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from library.feature_engineering import encode_source_labels, create_tokenizer, make_c100_features
from library.augmentation import augment_by_adjacent_union
from datetime import date
from utils import write_pickle, create_dir_if_nonexist, read_pickle
from library.training_toolkit import extract_concordances, create_source_label_vocabularly
from library.model_configs import embedded_conv, basic_dense, basic_dense_plus
print('Building predictive model')
# Switches
use_prepared_data = False
extract_training_data = False
rebuild_source_vocabularly = False
augment_training = False
add_position_features = True
add_isic_100_features = True
# Settings
augment_factor = 2
test_set_size = 0.1
max_vocab_fraction = 0.99
decision_boundary = 0.90
n_epochs = 30
n_batch_size = 900
alpha = 0.0001 # learning rate
# Constants
n_root = 6357
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
date_str_today = date.today().strftime("%Y-%m-%d")
# Paths
work_dir = os.environ['work_dir']
raw_data_dir = work_dir + 'training_concs_hscpc/'
training_data_dir = work_dir + 'training_data/'
create_dir_if_nonexist(training_data_dir)
fname_prepared_data = work_dir + 'training_data/' + 'prepared_data_aug' + str(augment_training) + '.pkl'
if use_prepared_data:
prepared_data = read_pickle(fname_prepared_data)
x = prepared_data['x']
y = prepared_data['y']
tokenizer = prepared_data['tokenizer']
max_words = prepared_data['max_words']
else:
# Extract training data
if extract_training_data:
extract_concordances(raw_data_dir, training_data_dir)
# Construct the vocabulary from the source data
if rebuild_source_vocabularly:
create_source_label_vocabularly(raw_data_dir, training_data_dir, n_root)
# Read the vocabularly
source_label_vocab = pd.read_pickle(training_data_dir + 'label_dictionary.pkl')
# Read and wrangle the training data
training_data = pd.read_pickle(training_data_dir + 'hscpc_collected_training_set.pkl')
x_labels = training_data['source_row_label'].to_list()
y_labels = training_data['hscpc_labels'].to_list()
n_samples = len(x_labels)
# Tests; check input data
max_hscpc_index = [max(s) for s in zip(*y_labels)]
min_hscpc_index = [min(s) for s in zip(*y_labels)]
assert max_hscpc_index[0] == n_root-1 and min_hscpc_index[0] == 0
# One hot encode y
y = np.zeros((n_samples, n_root), dtype=int)
for i, j in enumerate(y_labels):
y[i, j] = 1
# Create tokenizer, configured to only take into account the max_words
source_vocab = source_label_vocab['source_labels'].to_list()
tokenizer, max_words = create_tokenizer(source_vocab, max_vocab_fraction)
# One-hot-encode x labels
x_features_encoded = encode_source_labels(tokenizer, x_labels, max_words)
# Add label position feature
if add_position_features:
x_features_encoded = np.hstack((x_features_encoded, training_data['position'].to_numpy().reshape(-1, 1)))
# C100 features
if add_isic_100_features:
print('Calculating string similarity between x labels and C100...')
c100_labels = pd.read_excel(work_dir + 'hscpc/c100_labels.xlsx')['sector'].to_list()
new_features = make_c100_features(training_data['source_row_label'].to_list(), c100_labels)
x_features_encoded = np.hstack((x_features_encoded, new_features))
# Augment
if augment_training:
x_features_encoded, y = augment_by_adjacent_union(x_features_encoded, y, max_words, augment_factor)
# Final feature matrix
x = x_features_encoded.copy()
# Save prepared dataset
prepared_data = {'x': x, 'y': y, 'tokenizer': tokenizer, 'max_words': max_words}
write_pickle(fname_prepared_data, prepared_data)
# Training set properties
n_features = x.shape[1]
n_samples = x.shape[0]
print('Training set contains ' + str(n_features) + ' features and ' + str(x.shape[0]) + ' records')
# Test-train split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=test_set_size)
# Model
# model = basic_dense_plus(n_features, n_root)
model = basic_dense(n_features, n_root)
opt = Adam(learning_rate=alpha)
callback = EarlyStopping(monitor='acc', patience=3)
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['acc', 'categorical_accuracy', 'mean_squared_error'])
history = model.fit(x_train, y_train, epochs=n_epochs, batch_size=n_batch_size, validation_data=(x_test, y_test),
callbacks=[callback])
preds = model.predict(x_test)
preds[preds >= decision_boundary] = 1
preds[preds < decision_boundary] = 0
acc = accuracy_score(y_test, preds)
sse = np.sum(np.square(preds - y_test))
rmse = np.sqrt(np.mean(np.square(preds - y_test)))
print('Accuracy: ' + "{:.4f}".format(acc) + ', SSE: ' + str(sse) + ', RMSE: ' + "{:.3f}".format(rmse))
# Retrain model with full training set (should the batch size be reset here?)
model.fit(x, y, epochs=n_epochs, batch_size=n_batch_size, validation_data=(x, y), callbacks=[callback])
preds = model.predict(x)
preds[preds >= decision_boundary] = 1
preds[preds < decision_boundary] = 0
acc = accuracy_score(y, preds)
sse = np.sum(np.square(preds - y))
rmse = np.sqrt(np.mean(np.square(preds - y)))
print('Final score: accuracy: ' + "{:.4f}".format(acc) + ', SSD: ' + str(sse) + ', RMSE: ' + "{:.3f}".format(rmse))
# Save model object
n_layers = len(model.layers)
model_meta_name = 'model_' + date_str_today + '_w' + str(max_words) + '_s' + str(n_samples) + '_l' + str(n_layers)
model_fname = work_dir + 'model/' + model_meta_name + '.pkl'
write_pickle(model_fname, model)
print('Saved model to disk. model_meta: ' + model_meta_name)
# Save feature meta
feature_meta = {'tokenizer': tokenizer, 'max_words': max_words}
feature_meta['add_position_features'] = add_position_features
feature_meta['add_isic_100_features'] = add_isic_100_features
feature_meta_name = 'feature_meta_' + date_str_today + '_w' + str(max_words)
fname_feature_meta = work_dir + 'model/' + feature_meta_name + '.pkl'
write_pickle(fname_feature_meta, feature_meta)
print('Saved feature meta to disk. feature_meta: ' + feature_meta_name)
print('Finished model build') |
<reponame>ganeshdg95/Privacy-and-Performance-in-Power-Consumption-Curve-Generation-with-GANs
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import numpy as np
from scipy.stats import wasserstein_distance as EMD
# This function computes the empirical distributions of the indicators for natural (real) and artificial (fake) samples.
def MakeHist(indicators_real, indicators_fake):
# Input:
# -Indicators_real: Numpy array of dimensions: [batch_size,5].
# -Indicators_fake: Numpy array of dimensions: [batch_size,5].
# Output:
# -List_Hist_r: List of numpy arrays containing the probability distribution (height of the bin, normalized) for the indicators of the real samples. Shape: [5][bins_W]
# -List_Hist_f: List of numpy arrays containing the probability distribution (height of the bin, normalized) for the indicators of the fake samples. Shape: [5][bins_W]
# -List_Hist_x: List of numpy arrays containing the values (position of the bins) for the indicators of the real and fake samples. Shape: [5][bins_W]
# -List_EMD: List of 5 real values corresponding to the Earth's Movers Distances between real and fake indicators.
# -Avg_Ind_Index: Average Indicator Distance between fake and real samples (mean of List_EMD).
num_samples = indicators_real.shape[0]
mean_real = indicators_real[:,0]
skewness_real = indicators_real[:,1]
CV_real = indicators_real[:,2]
kurtosis_real = indicators_real[:,3]
maxmean_real = indicators_real[:,4]
mean_fake = indicators_fake[:,0]
skewness_fake = indicators_fake[:,1]
CV_fake = indicators_fake[:,2]
kurtosis_fake = indicators_fake[:,3]
maxmean_fake = indicators_fake[:,4]
bins_W = 51 #Number of bins in the histogram plus one
mean_min = np.minimum(mean_real.min(),mean_fake.min())
mean_max = np.maximum(mean_real.max(),mean_fake.max())
Hist_mean_real = np.array(np.histogram(mean_real,bins=np.linspace(mean_min,mean_max,num=bins_W)))
Hist_mean_fake = np.array(np.histogram(mean_fake,bins=np.linspace(mean_min,mean_max,num=bins_W)))
N_mean = (bins_W-1)/((mean_max-mean_min)*num_samples)
Hist_mean_real_y = Hist_mean_real[0]*N_mean
Hist_mean_real_x = Hist_mean_real[1]
Hist_mean_real_x = (Hist_mean_real_x[0:bins_W-1]+Hist_mean_real_x[1:bins_W])/2
Hist_mean_fake_y = Hist_mean_fake[0]*N_mean
Hist_mean_fake_x = Hist_mean_real_x
skewness_min = np.minimum(skewness_real.min(),skewness_fake.min())
skewness_max = np.maximum(skewness_real.max(),skewness_fake.max())
Hist_skewness_real = np.array(np.histogram(skewness_real,bins=np.linspace(skewness_min,skewness_max,num=bins_W)))
Hist_skewness_fake = np.array(np.histogram(skewness_fake,bins=np.linspace(skewness_min,skewness_max,num=bins_W)))
N_skewness = (bins_W-1)/((skewness_max-skewness_min)*num_samples)
Hist_skewness_real_y = Hist_skewness_real[0]*N_skewness
Hist_skewness_real_x = Hist_skewness_real[1]
Hist_skewness_real_x = (Hist_skewness_real_x[0:bins_W-1]+Hist_skewness_real_x[1:bins_W])/2
Hist_skewness_fake_y = Hist_skewness_fake[0]*N_skewness
Hist_skewness_fake_x = Hist_skewness_real_x
CV_min = np.minimum(CV_real.min(),CV_fake.min())
CV_max = np.maximum(CV_real.max(),CV_fake.max())
Hist_CV_real = np.array(np.histogram(CV_real,bins=np.linspace(CV_min,CV_max,num=bins_W)))
Hist_CV_fake = np.array(np.histogram(CV_fake,bins=np.linspace(CV_min,CV_max,num=bins_W)))
N_CV = (bins_W-1)/((CV_max-CV_min)*num_samples)
Hist_CV_real_y = Hist_CV_real[0]*N_CV
Hist_CV_real_x = Hist_CV_real[1]
Hist_CV_real_x = (Hist_CV_real_x[0:bins_W-1]+Hist_CV_real_x[1:bins_W])/2
Hist_CV_fake_y = Hist_CV_fake[0]*N_CV
Hist_CV_fake_x = Hist_CV_real_x
kurtosis_min = np.minimum(kurtosis_real.min(),kurtosis_fake.min())
kurtosis_max = np.maximum(kurtosis_real.max(),kurtosis_fake.max())
Hist_kurtosis_real = np.array(np.histogram(kurtosis_real,bins=np.linspace(kurtosis_min,kurtosis_max,num=bins_W)))
Hist_kurtosis_fake = np.array(np.histogram(kurtosis_fake,bins=np.linspace(kurtosis_min,kurtosis_max,num=bins_W)))
N_kurtosis = (bins_W-1)/((kurtosis_max-kurtosis_min)*num_samples)
Hist_kurtosis_real_y = Hist_kurtosis_real[0]*N_kurtosis
Hist_kurtosis_real_x = Hist_kurtosis_real[1]
Hist_kurtosis_real_x = (Hist_kurtosis_real_x[0:bins_W-1]+Hist_kurtosis_real_x[1:bins_W])/2
Hist_kurtosis_fake_y = Hist_kurtosis_fake[0]*N_kurtosis
Hist_kurtosis_fake_x = Hist_kurtosis_real_x
maxmean_min = np.minimum(maxmean_real.min(),maxmean_fake.min())
maxmean_max = np.maximum(maxmean_real.max(),maxmean_fake.max())
Hist_maxmean_real = np.array(np.histogram(maxmean_real,bins=np.linspace(maxmean_min,maxmean_max,num=bins_W)))
Hist_maxmean_fake = np.array(np.histogram(maxmean_fake,bins=np.linspace(maxmean_min,maxmean_max,num=bins_W)))
N_maxmean = (bins_W-1)/((maxmean_max-maxmean_min)*num_samples)
Hist_maxmean_real_y = Hist_maxmean_real[0]*N_maxmean
Hist_maxmean_real_x = Hist_maxmean_real[1]
Hist_maxmean_real_x = (Hist_maxmean_real_x[0:bins_W-1]+Hist_maxmean_real_x[1:bins_W])/2
Hist_maxmean_fake_y = Hist_maxmean_fake[0]*N_maxmean
Hist_maxmean_fake_x = Hist_maxmean_real_x
List_Hist_r = [Hist_mean_real_y,Hist_skewness_real_y,Hist_CV_real_y,Hist_kurtosis_real_y,Hist_maxmean_real_y]
List_Hist_f = [Hist_mean_fake_y,Hist_skewness_fake_y,Hist_CV_fake_y,Hist_kurtosis_fake_y,Hist_maxmean_fake_y]
List_Hist_x = [Hist_mean_fake_x,Hist_skewness_fake_x,Hist_CV_fake_x,Hist_kurtosis_fake_x,Hist_maxmean_fake_x]
std_mean = (np.std(mean_real)+np.std(mean_fake))/2
Mean_EMD = EMD(mean_real/std_mean,mean_fake/std_mean)
std_skewness = (np.std(skewness_real)+np.std(skewness_fake))/2
Skewness_EMD = EMD(skewness_real/std_skewness,skewness_fake/std_skewness)
std_CV = (np.std(CV_real)+np.std(CV_fake))/2
CV_EMD = EMD(CV_real/std_CV,CV_fake/std_CV)
std_kurtosis = (np.std(kurtosis_real)+np.std(kurtosis_fake))/2
Kurtosis_EMD = EMD(kurtosis_real/std_kurtosis,kurtosis_fake/std_kurtosis)
std_maxmean = (np.std(maxmean_real)+np.std(maxmean_fake))/2
Maxmean_EMD = EMD(maxmean_real/std_maxmean,maxmean_fake/std_maxmean)
List_EMD = [Mean_EMD,Skewness_EMD,CV_EMD,Kurtosis_EMD,Maxmean_EMD]
Avg_Ind_Index = np.mean(List_EMD)
return List_Hist_r, List_Hist_f, List_Hist_x, List_EMD, Avg_Ind_Index
|
<reponame>epicfaace/GCMW
from .util import calculate_price
# def coupon_code_verify_max(form, code, responseId=None):
# # True: coupon code can be used (either length of coupon codes used is not at max, or your ID has already used the coupon code before.)
# # If maximum is negative, that means there is no maximum.
# countByName = form.get("couponCodes", {}).get(code, {}).get("countBy", "responses")
# usedDict = form.get("couponCodes_used", {}).get(code, {}).get(countByName, {})
# # usedDict looks like: {"responseid1": 1, "responseid2": 3}
# if (type(usedDict) is list): usedDict = {rid: 1 for rid in usedDict} # Backwards compatibility -- list.
# totalNumUsed = sum(usedDict.values())
# maximum = form.get("couponCodes", {}).get(code, {}).get("max", -1)
# return responseId in usedDict or maximum < 0 or totalNumUsed < maximum
def coupon_code_verify_max_and_record_as_used(
formsCollection, form, code, responseId, response_data
):
"""
countByName - which column to count coupons used by, i.e., "responses" or "participants", etc.
response_data - data in the form.
Returns:
True/False: is coupon code valid?
numRemaining: number of spots left for coupon codes (used in error messages)
"""
# form = formsCollection.get_item(Key=formKey)["Item"]
formKey = {"id": form["id"], "version": int(form["version"])}
countByName = form.get("couponCodes", {}).get(code, {}).get("countBy", "responses")
usedDict = form.get("couponCodes_used", {}).get(code, {}).get(countByName, {})
shouldOverwriteList = False
if type(usedDict) is list:
usedDict = {rid: 1 for rid in usedDict}
shouldOverwriteList = True
if countByName == "responses":
number = 1
else:
number = int(
calculate_price(countByName, response_data)
) # todo: should this be turned into a decimal?
totalNumUsed = sum(usedDict.values())
maximum = form.get("couponCodes", {}).get(code, {}).get("max", -1)
numRemaining = maximum - (totalNumUsed - usedDict.get("responseId", 0))
if maximum >= 0 and numRemaining - number < 0:
return False, numRemaining
if usedDict.get(responseId, -1) == number:
# Number did not change. Coupon code can be used, but no need to update it.
return True, numRemaining - number
else:
usedDict[responseId] = number
if "couponCodes_used" in form:
if (
not shouldOverwriteList
and code in form["couponCodes_used"]
and countByName in form["couponCodes_used"][code]
):
formsCollection.update_item(
Key=formKey,
UpdateExpression="SET couponCodes_used.#code.#countByName.#responseId = :number",
ExpressionAttributeNames={
"#code": code,
"#countByName": countByName,
"#responseId": responseId,
},
ExpressionAttributeValues={":number": number},
)
else:
formsCollection.update_item(
Key=formKey,
UpdateExpression="SET couponCodes_used.#code = :couponCodeValue",
ExpressionAttributeNames={"#code": code},
ExpressionAttributeValues={":couponCodeValue": {countByName: usedDict}},
)
else:
formsCollection.update_item(
Key=formKey,
UpdateExpression="SET couponCodes_used = :couponCodes_used",
ExpressionAttributeValues={
":couponCodes_used": {code: {countByName: usedDict}}
},
)
return True, numRemaining - number
|
<reponame>DreamsofPeace/Cisco-Firmware-Sort
from iosutils import product,imagelookup,iostrain,utilssinglemove,utilssingleprodname,utils_dev_v2_vf_imagecode,utils_dev_imagecode_v2_vf
from iosutils import filemove,filepath2,filepath3,filepath4,filepath5
from iosutils import util2digit,util3digit,util4digit,util5digit,stringtolist
from iosutils import messageunknowndev,messageunknownfeat,messageunknownfile
def fileprocessornxos (filename,debug1):
if debug1:
print("\tModule#\t\tios_nexus")
if debug1:
print("\tSubroutine#\tfileprocessornxos")
splitbydash = filename.split("-")
splitbydot = filename.split(".")
if (
filename.startswith("ssd_c400_upgrade") or
filename == "upgrade_m500_firmware.tar.gz"
):
prodname = product("nxos")
imagecode = imagelookup("firmware")
utilssinglemove (debug1,filename,prodname,imagecode)
elif filename == "n9000-epld-secure-boot-update.img":
prodname = product("nxos")
imagecode = imagelookup("epld")
utilssinglemove (debug1,filename,prodname,imagecode)
elif (
filename == "poap_script.py" or
filename == "poap_script.tcl"
):
prodname = product("n3500")
imagecode = imagelookup("poap")
utilssinglemove (debug1,filename,prodname,imagecode)
elif filename == "L2-L3_CT.zip":
prodname = product("n1000v")
imagecode = imagelookup("l2l3cvt")
utilssinglemove (debug1,filename,prodname,imagecode)
elif (
filename == "nxos-n3kbios.bin" or
filename == "n3k_bios_release_rn.pdf"
):
prodname = product("n3000")
imagecode = imagelookup("bios")
utilssinglemove (debug1,filename,prodname,imagecode)
elif filename == "nxos.9.3.4-capacity-emulator.tgz":
prodname = product("nxos")
imagecode = imagelookup("capacity-emulator")
workname = filename.replace("-capacity-emulator.tgz","")
workname = workname.replace("nxos.","")
utils_dev_imagecode_v2_vf (debug1,filename,prodname,imagecode,workname)
elif filename == "ntp-1.0.1-7.0.3.I2.2d.lib32_n9000.rpm":
prodname = product("nxos")
imagecode = imagelookup("smu")
imagecode = imagecode + "/7.0/7.0.3.I2.2d/NTP"
utilssinglemove (debug1,filename,prodname,imagecode)
elif filename == "ntp-1.0.1-7.0.3.I2.2e.lib32_n9000.rpm":
prodname = product("nxos")
imagecode = imagelookup("smu")
imagecode = imagecode + "/7.0/7.0.3.I2.2e/NTP"
utilssinglemove (debug1,filename,prodname,imagecode)
elif filename == "ntp-1.0.2-7.0.3.I2.2e.lib32_n9000.rpm":
prodname = product("nxos")
imagecode = imagelookup("smu")
imagecode = imagecode + "/7.0/7.0.3.I2.2e/NTP"
utilssinglemove (debug1,filename,prodname,imagecode)
elif filename == "nxos.nsqos_lc_tor-n9k_TOR-1.0.0-7.0.3.I2.2e.lib32_n9000.rpm":
prodname = product("nxos")
imagecode = imagelookup("smu")
imagecode = imagecode + "/7.0/7.0.3.I2.2e/QoS"
utilssinglemove (debug1,filename,prodname,imagecode)
elif filename == "nxos.nsqos_sup_tor-n9k_TOR-1.0.0-7.0.3.I2.2e.lib32_n9000.rpm":
prodname = product("nxos")
imagecode = imagelookup("smu")
imagecode = imagecode + "/7.0/7.0.3.I2.2e/QoS"
utilssinglemove (debug1,filename,prodname,imagecode)
elif filename == "snmp-1.0.1-7.0.3.I2.2e.lib32_n9000.rpm":
prodname = product("nxos")
imagecode = imagelookup("smu")
imagecode = imagecode + "/7.0/7.0.3.I2.2e/SNMP"
utilssinglemove (debug1,filename,prodname,imagecode)
elif filename == "vxlan-2.0.1.0-9.2.3.lib32_n9000.rpm":
prodname = product("nxos")
imagecode = imagelookup("smu")
imagecode = imagecode + "/9.2/9.2.3/VXLAN"
utilssinglemove (debug1,filename,prodname,imagecode)
elif filename.startswith("n6000_poap_script"):
prodname = product("n6000")
imagecode = imagelookup("poap")
utilssinglemove (debug1,filename,prodname,imagecode)
elif filename.startswith("poap_ng"):
prodname = product("Nexus")
imagecode = imagelookup("poap_ng")
workname = filename.replace(".py","")
workname = workname.replace("poap_ng.","")
utils_dev_imagecode_v2_vf (debug1,filename,prodname,imagecode,workname)
elif (
filename.startswith("Nexus1000v") or
filename.startswith("Nexus1000V")
):
prodname = product("n1000v")
fileprocnxos1000v (debug1,filename,prodname)
elif splitbydot[0] == "n9000-epld":
prodname = product("nxos")
imagecode = imagelookup("epld")
if splitbydot[1] == "6" or splitbydot[1] == "7":
fileprocnxosfivedigit (filename,prodname,imagecode,debug1)
else:
fileprocnxosthreedigit (filename,prodname,imagecode,debug1)
elif splitbydot[0] == "n9000-dk9":
prodname = product("n9000")
if splitbydot[1] == "6" or splitbydot[1] == "7":
if splitbydot[6].startswith("CSC"):
imagecode = imagelookup("smu")
fileprocnxosfivedigit (filename,prodname,imagecode,debug1)
else:
imagecode = imagelookup("system")
fileprocnxosfivedigit (filename,prodname,imagecode,debug1)
elif splitbydot[0] == "n7000-s1-epld" or splitbydot[0] == "n7000-s2-epld":
prodname = product("n7000")
imagecode = imagelookup("epld")
if splitbydot[1] == "7":
fileprocnxosfivedigit (filename,prodname,imagecode,debug1)
else:
fileprocnxosthreedigit (filename,prodname,imagecode,debug1)
elif (
splitbydot[0] == "nxos" or
splitbydot[0] == "nxos64"
):
prodname = product("nxos")
if len(splitbydot) == 5:
imagecode = imagelookup("system")
fileprocnxosthreedigit (filename,prodname,imagecode,debug1)
elif len(splitbydot) == 6:
imagecode = imagelookup("system")
fileprocnxosthreedigit (filename,prodname,imagecode,debug1)
elif len(splitbydot) == 7:
imagecode = imagelookup("system")
fileprocnxosfivedigit (filename,prodname,imagecode,debug1)
elif len(splitbydot) == 7:
imagecode = imagelookup("system")
fileprocnxosfivedigit (filename,prodname,imagecode,debug1)
elif splitbydot[1].startswith("CSC"):
imagecode = imagelookup("smu")
fileprocessornxos9ksmu(filename,prodname,imagecode,debug1)
elif splitbydash[0] == "n6000":
prodname = product(splitbydash[0])
if splitbydot[0] == "n6000-uk9-kickstart":
imagecode = imagelookup("kickstart")
fileprocnxosfivedigit (filename,prodname,imagecode,debug1)
elif splitbydot[0] == "n6000-uk9":
imagecode = imagelookup("system")
fileprocnxosfivedigit (filename,prodname,imagecode,debug1)
elif splitbydash[0] == "n7000":
prodname = product(splitbydash[0])
if splitbydash[1] == "s1":
imagecode = imagelookup("s1")
nexus7ksliceandice (filename,prodname,imagecode,debug1)
elif splitbydash[1] == "s2":
imagecode = imagelookup("s2")
nexus7ksliceandice (filename,prodname,imagecode,debug1)
elif splitbydash[0] == "n7700":
prodname = product(splitbydash[0])
if splitbydash[1] == "s2":
imagecode = imagelookup("s2")
nexus7ksliceandice (filename,prodname,imagecode,debug1)
elif splitbydash[1] == "s3":
imagecode = imagelookup("s3")
nexus7ksliceandice (filename,prodname,imagecode,debug1)
elif filename.startswith("n3000"):
prodname = product("n3000")
if filename.startswith("n3000-uk9-kickstart."):
imagecode = imagelookup("kickstart")
workname = filename.replace(".bin","")
workname = workname.replace("n3000-uk9-kickstart.","")
utils_dev_v2_vf_imagecode (debug1,filename,prodname,imagecode,workname)
elif filename.startswith("n3000-compact"):
imagecode = imagelookup("system")
workname = filename.replace(".bin","")
workname = workname.replace("n3000-compact.","")
utils_dev_v2_vf_imagecode (debug1,filename,prodname,imagecode,workname)
elif filename.startswith("n3000_xsd."):
imagecode = imagelookup("xsd")
workname = filename.replace(".tar.gz","")
workname = workname.replace("n3000_xsd.","")
utils_dev_imagecode_v2_vf (debug1,filename,prodname,imagecode,workname)
else:
imagecode = imagelookup("system")
workname = filename.replace(".bin","")
workname = workname.replace("n3000-uk9.","")
utils_dev_v2_vf_imagecode (debug1,filename,prodname,imagecode,workname)
elif filename.startswith("guestshell"):
prodname = product("Nexus")
imagecode = imagelookup("guestshell")
workname = filename.replace(".ova","")
workname = workname.replace("guestshell.","")
utils_dev_imagecode_v2_vf (debug1,filename,prodname,imagecode,workname)
elif filename.startswith("oac"):
prodname = product("Nexus")
imagecode = imagelookup("oac")
workname = filename.replace(".ova","")
workname = workname.replace("oac.","")
utils_dev_imagecode_v2_vf (debug1,filename,prodname,imagecode,workname)
elif (
filename.startswith("nxosv-final") or
filename.startswith("nxosv") or
filename.startswith("nexus9300v") or
filename.startswith("nexus9500v")
):
prodname = product("nxosv")
imagecode = imagelookup("system")
workname = filename.replace(".box","")
workname = workname.replace(".ova","")
workname = workname.replace(".qcow2","")
workname = workname.replace(".vmdk","")
workname = workname.replace("nxosv-final.","")
workname = workname.replace("nxosv.","")
workname = workname.replace("nexus9300v.","")
workname = workname.replace("nexus9500v64.","")
workname = workname.replace("nexus9500v.","")
utils_dev_v2_vf_imagecode (debug1,filename,prodname,imagecode,workname)
elif (
filename.startswith("n3500") or
filename.startswith("poap_script.6") or
filename.startswith("poap_script_n3k.")
):
prodname = product("n3500")
if filename.startswith("n3500-uk9-kickstart."):
imagecode = imagelookup("kickstart")
workname = filename.replace(".bin","")
workname = workname.replace("n3500-uk9-kickstart.","")
utils_dev_v2_vf_imagecode (debug1,filename,prodname,imagecode,workname)
elif (
filename.startswith("poap_script.") or
filename.startswith("poap_script_n3k.")
):
imagecode = imagelookup("poap")
workname = filename.replace(".py","")
workname = workname.replace(".tcl","")
workname = workname.replace("poap_script_n3k.","")
workname = workname.replace("poap_script.","")
utils_dev_imagecode_v2_vf (debug1,filename,prodname,imagecode,workname)
else:
imagecode = imagelookup("system")
workname = filename.replace(".bin","")
workname = workname.replace("n3500-uk9.","")
utils_dev_v2_vf_imagecode (debug1,filename,prodname,imagecode,workname)
elif splitbydash[0] == "n4000":
prodname = product("n4000")
workname = filename.replace(".bin","")
workname = workname.replace("n4000-bk9-kickstart.","")
workname = workname.replace("n4000-bk9.","")
if filename.startswith("n4000-bk9-kickstart."):
imagecode = imagelookup("kickstart")
else:
imagecode = imagelookup("system")
utils_dev_v2_vf_imagecode (debug1,filename,prodname,imagecode,workname)
elif (
filename.startswith("n5000") or
filename.startswith("poap_script")
):
prodname = product("n5000")
if filename.startswith("n5000-uk9-kickstart."):
imagecode = imagelookup("kickstart")
workname = filename.replace(".bin","")
workname = workname.replace("n5000-uk9-kickstart.","")
utils_dev_v2_vf_imagecode (debug1,filename,prodname,imagecode,workname)
elif (
filename.startswith("n5000_poap_script.") or
filename.startswith("poap_script.")
):
imagecode = imagelookup("poap")
workname = filename.replace(".py","")
workname = workname.replace(".tcl","")
workname = workname.replace("n5000_poap_script.","")
workname = workname.replace("poap_script.","")
utils_dev_imagecode_v2_vf (debug1,filename,prodname,imagecode,workname)
elif filename.startswith("n5000_xsd."):
imagecode = imagelookup("xsd")
workname = filename.replace(".tar.gz","")
workname = workname.replace("n5000_xsd.","")
utils_dev_imagecode_v2_vf (debug1,filename,prodname,imagecode,workname)
else:
imagecode = imagelookup("system")
workname = filename.replace(".bin","")
workname = workname.replace("n5000-uk9.","")
utils_dev_v2_vf_imagecode (debug1,filename,prodname,imagecode,workname)
elif (
splitbydash[0] == "m9000" or
splitbydash[0] == "m9500"
):
prodname = product("m9500")
if filename.startswith("m9000-pkg1.") and filename.endswith(".epld"):
imagecode = imagelookup("epld")
workname = filename.replace(".epld","")
workname = workname.replace("m9000-pkg1.","")
utils_dev_imagecode_v2_vf (debug1,filename,prodname,imagecode,workname)
elif filename.startswith("m9000-epld-") and filename.endswith(".img"):
imagecode = imagelookup("epld")
workname = filename.replace(".img","")
workname = workname.replace("m9000-epld-","")
utils_dev_imagecode_v2_vf (debug1,filename,prodname,imagecode,workname)
elif filename.startswith("m9000-ek9-ssi-mz.") and filename.endswith(".bin"):
imagecode = imagelookup("ssi")
workname = filename.replace(".bin","")
workname = workname.replace("m9000-ek9-ssi-mz.","")
utils_dev_imagecode_v2_vf (debug1,filename,prodname,imagecode,workname)
elif filename.startswith("m9500-sf1ek9-kickstart-mz."):
imagecode = imagelookup("s1ek9")
workname = filename.replace(".bin","")
workname = workname.replace("m9500-sf1ek9-kickstart-mz.","")
utils_dev_imagecode_v2_vf (debug1,filename,prodname,imagecode,workname)
elif filename.startswith("m9500-sf1ek9-mz."):
imagecode = imagelookup("s1ek9")
workname = filename.replace(".bin","")
workname = workname.replace("m9500-sf1ek9-mz.","")
utils_dev_imagecode_v2_vf (debug1,filename,prodname,imagecode,workname)
elif filename.startswith("m9500-sf2ek9-kickstart-mz."):
imagecode = imagelookup("s2ek9")
workname = filename.replace(".bin","")
workname = workname.replace("m9500-sf2ek9-kickstart-mz.","")
utils_dev_imagecode_v2_vf (debug1,filename,prodname,imagecode,workname)
elif filename.startswith("m9500-sf2ek9-mz."):
imagecode = imagelookup("s2ek9")
workname = filename.replace(".bin","")
workname = workname.replace("m9500-sf2ek9-mz.","")
utils_dev_imagecode_v2_vf (debug1,filename,prodname,imagecode,workname)
elif filename.startswith("m9000-cd-npe-"):
imagecode = imagelookup("fabman")
workname = filename.replace(".zip","")
workname = workname.replace("m9000-cd-npe-","")
utils_dev_imagecode_v2_vf (debug1,filename,prodname,imagecode,workname)
elif filename.startswith("m9000-cd-"):
imagecode = imagelookup("fabman")
workname = filename.replace(".zip","")
workname = workname.replace("m9000-cd-","")
utils_dev_imagecode_v2_vf (debug1,filename,prodname,imagecode,workname)
elif filename.startswith("m9000-fm-update-"):
imagecode = imagelookup("fabman")
workname = filename.replace(".jar","")
workname = workname.replace("m9000-fm-update-","")
utils_dev_imagecode_v2_vf (debug1,filename,prodname,imagecode,workname)
elif filename.startswith("m9000-fm-"):
imagecode = imagelookup("fabman")
workname = filename.replace(".jar","")
workname = workname.replace("m9000-fm-","")
utils_dev_imagecode_v2_vf (debug1,filename,prodname,imagecode,workname)
elif filename.startswith("m9000-sha-"):
imagecode = imagelookup("fabman")
workname = filename.replace(".npe.jar","")
workname = workname.replace("m9000-sha-","")
utils_dev_imagecode_v2_vf (debug1,filename,prodname,imagecode,workname)
elif splitbydash[0] == "m9100":
prodname = product("m9100")
workname = filename.replace(".bin","")
workname = workname.replace("m9100-","")
if workname.startswith("s1ek9-"):
imagecode = imagelookup("s1ek9")
workname = workname.replace("s1ek9-","")
elif workname.startswith("s2ek9-"):
imagecode = imagelookup("s2ek9")
workname = workname.replace("s2ek9-","")
elif workname.startswith("s3ek9-"):
imagecode = imagelookup("s3ek9")
workname = workname.replace("s3ek9-","")
elif workname.startswith("s5ek9-"):
imagecode = imagelookup("s5ek9")
workname = workname.replace("s5ek9-","")
else:
messageunknowndev()
workname = workname.replace("kickstart-mz.","")
workname = workname.replace("kickstart-mz-npe.","")
workname = workname.replace("mz.","")
workname = workname.replace("mz-npe.","")
utils_dev_imagecode_v2_vf (debug1,filename,prodname,imagecode,workname)
elif splitbydash[0] == "m9200":
prodname = product("m9200")
workname = filename.replace(".bin","")
workname = workname.replace("m9200-","")
if workname.startswith("s1ek9-"):
imagecode = imagelookup("s1ek9")
workname = workname.replace("s1ek9-","")
elif workname.startswith("s2ek9-"):
imagecode = imagelookup("s2ek9")
workname = workname.replace("s2ek9-","")
elif workname.startswith("s3ek9-"):
imagecode = imagelookup("s3ek9")
workname = workname.replace("s3ek9-","")
elif workname.startswith("s5ek9-"):
imagecode = imagelookup("s5ek9")
workname = workname.replace("s5ek9-","")
else:
messageunknowndev()
workname = workname.replace("kickstart-mz.","")
workname = workname.replace("kickstart-mz-npe.","")
workname = workname.replace("mz.","")
workname = workname.replace("mz-npe.","")
utils_dev_imagecode_v2_vf (debug1,filename,prodname,imagecode,workname)
elif splitbydash[0] == "m9250":
prodname = product("m9250")
workname = filename.replace(".bin","")
workname = workname.replace("m9250-","")
if workname.startswith("s5ek9-"):
imagecode = imagelookup("s5ek9")
workname = workname.replace("s5ek9-","")
else:
messageunknowndev()
workname = workname.replace("kickstart-mz.","")
workname = workname.replace("kickstart-mz-npe.","")
workname = workname.replace("mz.","")
workname = workname.replace("mz-npe.","")
utils_dev_imagecode_v2_vf (debug1,filename,prodname,imagecode,workname)
elif splitbydash[0] == "m9700":
prodname = product("m9700")
workname = filename.replace(".bin","")
workname = workname.replace("m9700-","")
if workname.startswith("s3ek9-"):
imagecode = imagelookup("s3ek9")
workname = workname.replace("s3ek9-","")
elif workname.startswith("s4ek9-"):
imagecode = imagelookup("s4ek9")
workname = workname.replace("s4ek9-","")
else:
messageunknowndev()
workname = workname.replace("kickstart-mz.","")
workname = workname.replace("kickstart-mz-npe.","")
workname = workname.replace("mz.","")
workname = workname.replace("mz-npe.","")
utils_dev_imagecode_v2_vf (debug1,filename,prodname,imagecode,workname)
else:
messageunknownfile()
def nexus7ksliceandice (filename,prodname,supcode,debug1):
if debug1:
print("\tSubroutine#\tnexus7ksliceandice")
splitbydot = filename.split(".")
if splitbydot[0] == "n7000-s1-kickstart-npe" or splitbydot[0] == "n7000-s2-kickstart-npe" or splitbydot[0] == "n7700-s2-kickstart-npe" or splitbydot[0] == "n7700-s3-kickstart-npe":
prodname = prodname + "/" + supcode
imagecode = imagelookup("kickstart-npe")
if splitbydot[1] == "7":
fileprocnxosfivedigit (filename,prodname,imagecode,debug1)
else:
fileprocnxosthreedigit (filename,prodname,imagecode,debug1)
elif splitbydot[0] == "n7000-s1-kickstart" or splitbydot[0] == "n7000-s2-kickstart" or splitbydot[0] == "n7700-s2-kickstart" or splitbydot[0] == "n7700-s3-kickstart":
prodname = prodname + "/" + supcode
imagecode = imagelookup("kickstart")
if splitbydot[1] == "7":
fileprocnxosfivedigit (filename,prodname,imagecode,debug1)
else:
fileprocnxosthreedigit (filename,prodname,imagecode,debug1)
elif splitbydot[0] == "n7000-s1-dk9-npe" or splitbydot[0] == "n7000-s2-dk9-npe" or splitbydot[0] == "n7700-s2-dk9-npe" or splitbydot[0] == "n7700-s3-dk9-npe":
prodname = prodname + "/" + supcode
imagecode = imagelookup("system-npe")
if splitbydot[1] == "7":
fileprocnxosfivedigit (filename,prodname,imagecode,debug1)
else:
fileprocnxosthreedigit (filename,prodname,imagecode,debug1)
elif (
splitbydot[0] == "n7000-s1-dk9" or
splitbydot[0] == "n7000-s2-dk9" or
splitbydot[0] == "n7700-s2-dk9" or
splitbydot[0] == "n7700-s3-dk9" or
splitbydot[0] == "n7000-s1-epld" or
splitbydot[0] == "n7000-s2-epld" or
splitbydot[0] == "n7700-s2-epld" or
splitbydot[0] == "n7700-s3-epld"
):
prodname = prodname + "/" + supcode
if splitbydot[1] == "7":
if splitbydot[6].startswith("CSC"):
imagecode = imagelookup("smu")
fileprocnxosfivedigit (filename,prodname,imagecode,debug1)
elif splitbydot[0].endswith("epld"):
imagecode = imagelookup("epld")
fileprocnxosfivedigit (filename,prodname,imagecode,debug1)
else:
imagecode = imagelookup("system")
fileprocnxosfivedigit (filename,prodname,imagecode,debug1)
else:
if splitbydot[4].startswith("CSC"):
imagecode = imagelookup("smu")
fileprocnxosthreedigit (filename,prodname,imagecode,debug1)
elif splitbydot[0].endswith("epld"):
imagecode = imagelookup("epld")
fileprocnxosthreedigit (filename,prodname,imagecode,debug1)
else:
imagecode = imagelookup("system")
fileprocnxosthreedigit (filename,prodname,imagecode,debug1)
def utilssinglemove (debug1,filename,prodname,imagecode):
if debug1:
print("\tSubroutine#\tnexussinglefile")
filepath = filepath2 (prodname,imagecode)
filemove (filepath, filename)
def fileprocessornxosplatform7700v8 (filename):
if debug1:
print("\tSubroutine#\tfileprocessornxosplatform7700v8")
splitbydash = filename.split("-")
splitbydot = filename.split(".")
if filename.startswith == "n7700-s2-kickstart-npe":
imagecode = "KICKSTART-NPE"
elif filename.startswith == "n7700-s2-kickstart":
imagecode = "KICKSTART"
elif filename.startswith == "n7700-s2-dk9-npe":
imagecode = "SYSTEM-NPE"
elif filename.startswith == "n7700-s2-dk9":
imagecode = "SYSTEM"
if splitbydash[0] == "n7700":
prodname = product (splitbydash[0])
if splitbydash[1] == "s2":
imagecode = "SUP-2"
elif splitbydash[1] == "s3":
imagecode = "SUP-3"
iosmain = util2digit (splitbydot[1],splitbydot[2])
iosfull = util3digit (splitbydot[1],splitbydot[2],splitbydot[3])
filepath = prodname + "/" + iosmain + "/" + iosfull + "/" + imagecode
def fileprocnxosthreedigit (filename,prodname,imagecode,debug1):
if debug1:
print("\tSubroutine#\tfileprocnxosthreedigit")
splitbydot = filename.split(".")
nxosver = util2digit (splitbydot[1],splitbydot[2])
nxosfull = util3digit (splitbydot[1],splitbydot[2],splitbydot[3])
if imagecode == "FIRMWARE-EPLD":
filepath = filepath4 (prodname,imagecode,nxosver,nxosfull)
elif imagecode == "SMU":
filepath = filepath5 (prodname,imagecode,nxosver,nxosfull,splitbydot[4])
else:
filepath = filepath4 (prodname,nxosver,nxosfull,imagecode)
filemove (filepath, filename)
def fileprocnxosfivedigit (filename,prodname,imagecode,debug1):
if debug1:
print("\tSubroutine#\tfileprocnxosfivedigit")
splitbydot = filename.split(".")
nxosver = util2digit (splitbydot[1],splitbydot[2])
nxosfull = util5digit (splitbydot[1],splitbydot[2],splitbydot[3],splitbydot[4],splitbydot[5])
if imagecode == "FIRMWARE-EPLD":
filepath = filepath4 (prodname,imagecode,nxosver,nxosfull)
elif imagecode == "SMU":
filepath = filepath5 (prodname,imagecode,nxosver,nxosfull,splitbydot[6])
else:
filepath = filepath4 (prodname,nxosver,nxosfull,imagecode)
filemove (filepath, filename)
def fileprocnxos1000v (debug1,filename,prodname):
if debug1:
print("\tSubroutine#\tfileprocnxos1000v")
if (
filename.startswith("Nexus1000v.5.2.1.SV") or
filename.startswith("Nexus1000v.4.2.1.SV") or
filename.startswith("nexus1000v.4.2.1.SV") or
filename.startswith("Nexus1000v.4.0.4.SV")
):
workname = filename.replace("-pkg.zip","")
workname = workname.replace("zip","")
splitbydot = workname.split(".")
imagecode = imagelookup("vmware")
nxosfull = util3digit (splitbydot[4],splitbydot[5],splitbydot[6])
filepath = filepath3 (prodname,imagecode,nxosfull)
filemove (filepath, filename)
elif filename == "Nexus1000v-4.0.4.SV1.1.zip":
imagecode = imagelookup("vmware")
nxosfull = util2digit ("SV1","1")
filepath = filepath3 (prodname,imagecode,nxosfull)
filemove (filepath, filename)
elif filename == "Nexus1000v-4.0.4.SV1.3.zip":
imagecode = imagelookup("vmware")
nxosfull = util2digit ("SV1","3")
filepath = filepath3 (prodname,imagecode,nxosfull)
filemove (filepath, filename)
elif (
filename.startswith("Nexus1000v.5.2.1.SK")
):
workname = filename.replace("-pkg.zip","")
workname = workname.replace("zip","")
splitbydot = workname.split(".")
imagecode = imagelookup("kvm")
nxosfull = util3digit (splitbydot[4],splitbydot[5],splitbydot[6])
filepath = filepath3 (prodname,imagecode,nxosfull)
filemove (filepath, filename)
elif (
filename.startswith("Nexus1000V.5.2.1.SM")
):
workname = filename.replace("-pkg.zip","")
workname = workname.replace("zip","")
splitbydot = workname.split(".")
imagecode = imagelookup("hyperv")
nxosfull = util3digit (splitbydot[4],splitbydot[5],splitbydot[6])
filepath = filepath3 (prodname,imagecode,nxosfull)
filemove (filepath, filename)
elif filename == "Nexus1000V5.2.1.SM1.5.2.zip":
imagecode = imagelookup("hyperv")
nxosfull = util3digit ("SM1","5","2")
filepath = filepath3 (prodname,imagecode,nxosfull)
filemove (filepath, filename)
def fileprocessornxos9ksmu (filename,prodname,imagecode,debug1):
if debug1:
print("\tSubroutine#\tfileprocessornxos9ksmu")
splitbydot = filename.split(".")
csc = splitbydot[1].replace("-n9k_ALL-1","")
csc = csc.replace("_EOR-n9k_EOR-1","")
csc = csc.replace("_TOR-n9k_TOR-1","")
csc = csc.replace("_eth-n9k_TOR-1","")
csc = csc.replace("_eth-n9k_EOR-1","")
csc = csc.replace("-n9k_EOR-1","")
csc = csc.replace("-n9k_TOR-1","")
csc = csc.replace("_modular_lc-1","")
csc = csc.replace("_modular_sup-1","")
csc = csc.replace("01-1","")
csc = csc.replace("-1","")
if splitbydot[3] == "0-9":
digitone = "9"
elif splitbydot[3] == "0-10":
digitone = "10"
elif splitbydot[3] == "0-8":
digitone = "8"
elif splitbydot[3] == "0-7" or splitbydot[3] == "1-7":
digitone = "7"
if digitone == "9" or digitone == "10":
nxosver = util2digit (digitone,splitbydot[4])
nxosfull = util3digit (digitone,splitbydot[4],splitbydot[5])
filepath = filepath5 (prodname,imagecode,nxosver,nxosfull,csc)
filemove (filepath, filename)
elif digitone == "7":
nxosver = util2digit (digitone,splitbydot[4])
nxosfull = util5digit (digitone,splitbydot[4],splitbydot[5],splitbydot[6],splitbydot[7])
filepath = filepath5 (prodname,imagecode,nxosver,nxosfull,csc)
filemove (filepath, filename)
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import EnumMeta
from google.protobuf import descriptor_pb2
from proto.primitives import ProtoType
class Field:
"""A representation of a type of field in protocol buffers."""
def __init__(self, proto_type, *, number: int,
message=None, enum=None, oneof: str = None,
json_name: str = None, optional: bool = False):
# This class is not intended to stand entirely alone;
# data is augmented by the metaclass for Message.
self.mcls_data = {}
self.parent = None
# If the proto type sent is an object or a string, it is really
# a message or enum.
if not isinstance(proto_type, int):
# Note: We only support the "shortcut syntax" for enums
# when receiving the actual class.
if isinstance(proto_type, EnumMeta):
enum = proto_type
proto_type = ProtoType.ENUM
else:
message = proto_type
proto_type = ProtoType.MESSAGE
# Save the direct arguments.
self.number = number
self.proto_type = proto_type
self.message = message
self.enum = enum
self.json_name = json_name
self.optional = optional
self.oneof = oneof
# Fields are neither repeated nor maps.
# The RepeatedField and MapField subclasses override these values
# in their initializers.
self.repeated = False
# Once the descriptor is accessed the first time, cache it.
# This is important because in rare cases the message or enum
# types are written later.
self._descriptor = None
@property
def descriptor(self):
"""Return the descriptor for the field."""
proto_type = self.proto_type
if not self._descriptor:
# Resolve the message type, if any, to a string.
type_name = None
if isinstance(self.message, str):
if not self.message.startswith(self.package):
self.message = '{package}.{name}'.format(
package=self.package,
name=self.message,
)
type_name = self.message
elif self.message:
if hasattr(self.message, 'DESCRIPTOR'):
type_name = self.message.DESCRIPTOR.full_name
else:
type_name = self.message.meta.full_name
elif self.enum:
# Nos decipiat.
#
# As far as the wire format is concerned, enums are int32s.
# Protocol buffers itself also only sends ints; the enum
# objects are simply helper classes for translating names
# and values and it is the user's job to resolve to an int.
#
# Therefore, the non-trivial effort of adding the actual
# enum descriptors seems to add little or no actual value.
#
# FIXME: Eventually, come back and put in the actual enum
# descriptors.
proto_type = ProtoType.INT32
# Set the descriptor.
self._descriptor = descriptor_pb2.FieldDescriptorProto(
name=self.name,
number=self.number,
label=3 if self.repeated else 1,
type=proto_type,
type_name=type_name,
json_name=self.json_name,
proto3_optional=self.optional,
)
# Return the descriptor.
return self._descriptor
@property
def name(self) -> str:
"""Return the name of the field."""
return self.mcls_data['name']
@property
def package(self) -> str:
"""Return the package of the field."""
return self.mcls_data['package']
@property
def pb_type(self):
"""Return the composite type of the field, or None for primitives."""
# For enums, return the Python enum.
if self.enum:
return self.enum
# For non-enum primitives, return None.
if not self.message:
return None
# Return the internal protobuf message.
if hasattr(self.message, '_meta'):
return self.message.pb()
return self.message
class RepeatedField(Field):
"""A representation of a repeated field in protocol buffers."""
def __init__(self, proto_type, *, number: int,
message=None, enum=None):
super().__init__(proto_type, number=number, message=message, enum=enum)
self.repeated = True
class MapField(Field):
"""A representation of a map field in protocol buffers."""
def __init__(self, key_type, value_type, *, number: int,
message=None, enum=None):
super().__init__(value_type, number=number, message=message, enum=enum)
self.map_key_type = key_type
__all__ = (
'Field',
'MapField',
'RepeatedField',
)
|
#!/usr/bin/env python
CopyRight = '''
/**************************************************************************
*
* Copyright 2010 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/**
* @file
* SRGB translation.
*
* @author <NAME> <<EMAIL>>
* @author <NAME> <<EMAIL>>
* @author <NAME> <<EMAIL>>
*/
'''
import math
import struct
def srgb_to_linear(x):
if x <= 0.04045:
return x / 12.92
else:
return math.pow((x + 0.055) / 1.055, 2.4)
def linear_to_srgb(x):
if x >= 0.0031308:
return 1.055 * math.pow(x, 0.41666666) - 0.055
else:
return 12.92 * x
def generate_srgb_tables():
print 'const float'
print 'util_format_srgb_8unorm_to_linear_float_table[256] = {'
for j in range(0, 256, 4):
print ' ',
for i in range(j, j + 4):
print '%.7e,' % (srgb_to_linear(i / 255.0),),
print
print '};'
print
print 'const uint8_t'
print 'util_format_srgb_to_linear_8unorm_table[256] = {'
for j in range(0, 256, 16):
print ' ',
for i in range(j, j + 16):
print '%3u,' % (int(srgb_to_linear(i / 255.0) * 255.0 + 0.5),),
print
print '};'
print
print 'const uint8_t'
print 'util_format_linear_to_srgb_8unorm_table[256] = {'
for j in range(0, 256, 16):
print ' ',
for i in range(j, j + 16):
print '%3u,' % (int(linear_to_srgb(i / 255.0) * 255.0 + 0.5),),
print
print '};'
print
# calculate the table interpolation values used in float linear to unorm8 srgb
numexp = 13
mantissa_msb = 3
# stepshift is just used to only use every x-th float to make things faster,
# 5 is largest value which still gives exact same table as 0
stepshift = 5
nbuckets = numexp << mantissa_msb
bucketsize = (1 << (23 - mantissa_msb)) >> stepshift
mantshift = 12
valtable = []
sum_aa = float(bucketsize)
sum_ab = 0.0
sum_bb = 0.0
for i in range(0, bucketsize):
j = (i << stepshift) >> mantshift
sum_ab += j
sum_bb += j*j
inv_det = 1.0 / (sum_aa * sum_bb - sum_ab * sum_ab)
for bucket in range(0, nbuckets):
start = ((127 - numexp) << 23) + bucket*(bucketsize << stepshift)
sum_a = 0.0
sum_b = 0.0
for i in range(0, bucketsize):
j = (i << stepshift) >> mantshift
fint = start + (i << stepshift)
ffloat = struct.unpack('f', struct.pack('I', fint))[0]
val = linear_to_srgb(ffloat) * 255.0 + 0.5
sum_a += val
sum_b += j*val
solved_a = inv_det * (sum_bb*sum_a - sum_ab*sum_b)
solved_b = inv_det * (sum_aa*sum_b - sum_ab*sum_a)
scaled_a = solved_a * 65536.0 / 512.0
scaled_b = solved_b * 65536.0
int_a = int(scaled_a + 0.5)
int_b = int(scaled_b + 0.5)
valtable.append((int_a << 16) + int_b)
print 'const unsigned'
print 'util_format_linear_to_srgb_helper_table[104] = {'
for j in range(0, nbuckets, 4):
print ' ',
for i in range(j, j + 4):
print '0x%08x,' % (valtable[i],),
print
print '};'
print
def main():
print '/* This file is autogenerated by u_format_srgb.py. Do not edit directly. */'
print
# This will print the copyright message on the top of this file
print CopyRight.strip()
print
print '#include "format_srgb.h"'
print
generate_srgb_tables()
if __name__ == '__main__':
main()
|
<reponame>yangchengtest/onnx-tf-atlas
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import copy
import inspect
import tensorflow as tf
from onnx_tf.common import IS_PYTHON3
from onnx_tf.common import get_data_format
from onnx_tf.common import get_perm_from_formats
from onnx_tf.common import supports_device
from .handler import Handler
class BackendHandler(Handler):
""" This class is base backend handler class.
All backend operator handler class MUST inherit this class.
In backend, operator handler class's name should be pascal case of file name
which should be snake case.
Use ONNX operator name as class name.
"""
TF_FUNC = None
@classmethod
def get_attrs_processor_param(cls):
""" Get param for attrs processor.
:return: Dict.
"""
return {}
@classmethod
def _process_attrs(cls, attrs):
""" Private method for processing attrs.
Param for this processor got from `get_attrs_processor_param`.
Param is dict contains two key: `default` and `raname`.
First add default value to attrs if key does not exist.
Second rename key to new key.
For example:
attrs = {"keep_dims": True}
param = {"default": {"axis": 1},
"rename": {"keep_dims": "keepdims"}}
processed_attrs = {"axis": "1", "keepdims": True}
:param attrs: Process target attrs.
:return: Processed attrs.
"""
param = {"rename": {}, "default": {}}
param.update(cls.get_attrs_processor_param())
for k, v in param["default"].items():
attrs.setdefault(k, v)
for k, new_k in param["rename"].items():
if k in attrs:
attrs[new_k] = attrs.pop(k)
return attrs
@classmethod
def make_tensor_from_onnx_node(cls,
node,
tf_func=None,
inputs=None,
attrs=None,
name="",
c_first_cuda_only=False,
c_last_only=False,
**kwargs):
""" Helper method to make tensor.
:param node: OnnxNode object.
:param tf_func: Callable Tf function. Default is cls.TF_FUNC.
:param inputs: Inputs tensor. Default is got from node.inputs.
:param attrs: Attributes. Default is node.attrs.
:param name: Node name.
:param c_first_cuda_only: If channel first is only supported by cuda.
If true and not cuda, do pre and post transpose.
:param c_last_only: If only channel last is support,
do pre and post transpose.
:param kwargs: Other args.
:return: Tensor.
"""
tensor_dict = kwargs.get("tensor_dict", {})
tf_func = tf_func or cls.TF_FUNC
if tf_func is None:
raise RuntimeError("No Tensorflow function is given.")
if inputs is None:
inputs = [tensor_dict.get(inp, None) for inp in node.inputs]
if attrs is None:
attrs = copy.deepcopy(node.attrs)
name = name or node.name
if name != "":
attrs["name"] = name
if c_first_cuda_only and c_last_only:
raise ValueError(
"c_first_cuda_only and c_last_only can not both be True.")
input_format = kwargs.get("input_format", "NCHW")
if input_format =="NCHW":
if c_first_cuda_only:
return cls.c_first_cuda_only(tf_func, inputs, attrs)
elif c_last_only:
return cls.c_last_only(tf_func, inputs, attrs)
return cls._run_tf_func(tf_func, inputs, attrs)
@classmethod
def c_first_cuda_only(cls, tf_func, inputs, attrs):
""" Handle operator that channel first is only supported by CUDA.
When using CPU, two transposes should be added.
:param tf_func: Callable Tf function.
:param inputs: Inputs tensor.
:param attrs: Attributes.
:return: Tensor.
"""
support_cuda = supports_device("CUDA")
if not support_cuda:
return cls._tuck_transpose(tf_func, inputs, attrs)
return cls._run_tf_func(tf_func, inputs, attrs)
@classmethod
def c_last_only(cls, tf_func, inputs, attrs):
""" Handle operator that channel last only is supported.
Add two transposes anyway.
:param tf_func: Callable Tf function.
:param inputs: Inputs tensor.
:param attrs: Attributes.
:return: Tensor.
"""
storage_format, compute_format = get_data_format(len(inputs[0].get_shape()))
compute_format = compute_format.replace("C", "") + "C"
return cls._tuck_transpose(tf_func, inputs, attrs,
(storage_format, compute_format))
@classmethod
def _tuck_transpose(cls, tf_func, inputs, attrs, data_format=None):
x = inputs[0]
x_rank = len(x.get_shape())
if not data_format:
data_format = get_data_format(x_rank)
pre_perm = get_perm_from_formats(data_format[0], data_format[1])
post_perm = get_perm_from_formats(data_format[1], data_format[0])
attrs["data_format"] = data_format[1]
if pre_perm != list(range(x_rank)):
x_t = tf.transpose(x, perm=pre_perm)
y = cls._run_tf_func(tf_func, [x_t] + inputs[1:], attrs)
y_t = tf.transpose(y, perm=post_perm)
return y_t
return cls._run_tf_func(tf_func, inputs, attrs)
@classmethod
def _run_tf_func(cls, tf_func, inputs, attrs):
""" Run Tensorflow function.
Use only acceptable attributes of function from attrs.
:param tf_func: Tensorflow function.
:param inputs: Inputs.
:param attrs: Attributes.
:return: Tensor.
"""
if IS_PYTHON3:
params = list(inspect.signature(tf_func).parameters.keys())
print (params)
else:
# use closure to get args for function using decorator
if tf_func.__closure__ is not None:
while "__wrapped__" in tf_func.func_dict:
tf_func = tf_func.func_dict["__wrapped__"]
params = inspect.getargspec(tf_func).args
else:
params = inspect.getargspec(tf_func).args
attrs = cls._process_attrs(attrs)
print("tf func:",tf_func)
print("tf attr:",[(p, attrs[p]) for p in params if p in attrs])
return tf_func(*inputs,
**dict([(p, attrs[p]) for p in params if p in attrs]))
|
# Copyright 2014 SolidBuilds.com. All rights reserved
#
# Authors: <NAME> <<EMAIL>>
from flask import Blueprint, redirect, render_template
from flask import request, url_for
from flask_user import current_user, login_required, roles_required
from app import db
from app.models.employee_models import EmployeeProfileForm
from app.models.menuItem_models import Menu, MenuItem, MenuItems
from app.models.order_models import Order,OrderItems,OrderAssignments
from .view_helpers import *
import datetime
from datetime import timezone
import time
main_blueprint = Blueprint('main', __name__, template_folder='templates')
# The Home page is accessible to anyone
@main_blueprint.route('/')
def home_page():
restaurants = getRestaurants()
if(len(restaurants)>0):
r = restaurants[0]
return redirect("/"+ r.name+"/menu")
return redirect('/restaurants')
@main_blueprint.route('/restaurants')
def restaurant_lading():
restaurants = getRestaurants()
return render_template('restaurant/restaurants.html',restaurants=restaurants)
# The Home page is accessible to anyone
@main_blueprint.route('/_users')
def users_page():
return render_template('main/_users.html')
# The User page is accessible to authenticated users (users that have logged in)
@main_blueprint.route('/<string:name>/menu')
def restaurant_menu_page(name=None):
r = getRestaurantByName(name)
v = getCategorizedMenuItems(r.getMenu().id)
categories = [ i['category_name'] for i in v ]
return render_template('restaurant/menu.html',restaurant=r,title="Menu for " + name,categorizedItems=v,categories=categories)
@main_blueprint.route('/<string:name>/order/<int:order_id>')
def restaurant_order_status(name=None,order_id=None):
if(order_id == None or name == None):
return
order = getOrderByID(order_id)
restaurant = getRestaurantByName(name)
ordertime = order.order_placed.replace(tzinfo=timezone.utc).astimezone(tz=None)
return render_template('restaurant/order_status.html',title="Order Status ",purpose="Order Status",order=order,restaurant=restaurant,ordertime=ordertime)
@main_blueprint.route('/manage/employee/report')
@login_required
@roles_required('owner') # Limits access to users with the 'owner' role
def get_employee_report():
today = datetime.datetime.utcnow()
report = dict()
lreport = dict()
reports=list()
startDate = today - datetime.timedelta(days=today.weekday())
endDate = startDate + datetime.timedelta(days=6)
lstartDate = startDate - datetime.timedelta(days=7)
lendDate = startDate - datetime.timedelta(days=1)
reports = reports + [generateEmployeeReportForInterval(startDate,endDate,'This Week: ')] + [generateEmployeeReportForInterval(lstartDate,lendDate,'Last Week: ')]
return render_template('main/employee_report.html',purpose="Report on Employees",title="Menu Managment",reports=reports)
@main_blueprint.route('/manage/order/report')
@login_required
@roles_required('owner') # Limits access to users with the 'owner' role
def get_order_report():
today = datetime.datetime.utcnow()
report = dict()
lreport = dict()
reports=list()
startDate = today - datetime.timedelta(days=today.weekday())
endDate = startDate + datetime.timedelta(days=6)
lstartDate = startDate - datetime.timedelta(days=7)
lendDate = startDate - datetime.timedelta(days=1)
reports = reports + [generateOrderReportForInterval(startDate,endDate,'This Week: ')] + [generateOrderReportForInterval(lstartDate,lendDate,'Last Week: ')]
return render_template('main/order_report.html',purpose="Report on Restaurant Orders",title="Report on Orders",reports=reports)
# The User page is accessible to authenticated users (users that have logged in)
@main_blueprint.route('/manage/menu')
@login_required
@roles_required('owner') # Limits access to users with the 'owner' role
def add_menu_page():
menus = getMenus()
menuItems = getItems()
return render_template('menu/add_menu_page.html',purpose="Menu Mangment",title="Menu Managment",menus=menus,menuItems=menuItems)
@main_blueprint.route('/manage/menuitems')
@login_required
def menu_item_manager():
menuItems = getItems()
isOwner = current_user.has_roles('owner')
return render_template('menuitem/menu_page.html',purpose="Menu Item Managment",title="Item Managment",menuItems=menuItems,editable=True,isOwner=isOwner)
@main_blueprint.route('/manage/orders')
@login_required
def order_managment():
restaurants_with_orders = getRestaurantOrders()
return render_template('restaurant/orders.html',purpose="Order Managment",title="Order Managment",restaurants_with_orders=restaurants_with_orders,timezone=timezone)
@main_blueprint.route('/manage/restaurant')
@login_required
@roles_required('owner') # Limits access to users with the 'owner' role
def restaurant_manager():
restaurants = getRestaurants()
employees = getEmployees()
menus = getMenus()
return render_template('restaurant/restaurant_page.html',purpose="Restaurant Mangment",title="Restaurant Managment",restaurants=restaurants,employees=employees,menus=menus)
@main_blueprint.route('/edit/menu/<int:id>')
@main_blueprint.route('/edit/menu/s/<string:name>')
@login_required # Limits access to authenticated users
@roles_required(['owner','waiter']) # Limits access to users with the 'owner' role
def edit_menu_page(id=None,name=None):
menuItems = getMenuItems(id,name)
isOwner = current_user.has_roles('owner')
isWaiter = current_user.has_roles('waiter')
return render_template('menu/menu_page.html',title="Edit Menu",menuItems=menuItems,editable=True,isOwner=isOwner,isWaiter=isWaiter)
# The Admin page is accessible to users with the 'owner' role
@main_blueprint.route('/edit/site')
@roles_required('owner') # Limits access to users with the 'owner' role
def admin_page():
return render_template('main/admin_page.html')
@main_blueprint.route('/main/profile', methods=['GET', 'POST'])
@login_required
def user_profile_page():
# Initialize form
form = EmployeeProfileForm(request.form, obj=current_user)
# Process valid POST
if request.method == 'POST' and form.validate():
# Copy form fields to user_profile fields
form.populate_obj(current_user)
# Save user_profile
db.session.commit()
# Redirect to home page
return redirect(url_for('main.menu_page'))
# Process GET or invalid POST
return render_template('main/user_profile_page.html',
form=form)
|
<gh_stars>10-100
import os
import sys
import json
import unittest
import numpy as np
from skimage.morphology import label
from elf.evaluation import rand_index
import luigi
import z5py
import nifty.tools as nt
try:
from ..base import BaseTest
except ValueError:
sys.path.append('..')
from base import BaseTest
class TestThresholdedComponents(BaseTest):
input_key = 'volumes/boundaries'
output_key = 'data'
assignment_key = 'assignments'
def _check_result(self, mode, check_for_equality=True, threshold=.5):
with z5py.File(self.output_path) as f:
res = f[self.output_key][:]
with z5py.File(self.input_path) as f:
inp = f[self.input_key][:]
if mode == 'greater':
expected = label(inp > threshold)
elif mode == 'less':
expected = label(inp < threshold)
elif mode == 'equal':
expected = label(inp == threshold)
self.assertEqual(res.shape, expected.shape)
if check_for_equality:
score = rand_index(res, expected)[0]
self.assertAlmostEqual(score, 0., places=4)
def _test_mode(self, mode, threshold=.5):
from cluster_tools.thresholded_components import ThresholdedComponentsWorkflow
task = ThresholdedComponentsWorkflow(tmp_folder=self.tmp_folder,
config_dir=self.config_folder,
target=self.target, max_jobs=self.max_jobs,
input_path=self.input_path,
input_key=self.input_key,
output_path=self.output_path,
output_key=self.output_key,
assignment_key=self.assignment_key,
threshold=threshold, threshold_mode=mode)
ret = luigi.build([task], local_scheduler=True)
self.assertTrue(ret)
self._check_result(mode, threshold=threshold)
def test_greater(self):
self._test_mode('greater')
def test_less(self):
self._test_mode('less')
def test_equal(self):
self._test_mode('equal', threshold=0)
@unittest.skip("debugging test")
def test_first_stage(self):
from cluster_tools.thresholded_components.block_components import BlockComponentsLocal
from cluster_tools.utils.task_utils import DummyTask
task = BlockComponentsLocal(tmp_folder=self.tmp_folder,
config_dir=self.config_folder,
max_jobs=8,
input_path=self.input_path,
input_key=self.input_key,
output_path=self.output_path,
output_key=self.output_key,
threshold=.5,
dependency=DummyTask())
ret = luigi.build([task], local_scheduler=True)
self.assertTrue(ret)
self._check_result('greater', check_for_equality=False)
@unittest.skip("debugging test")
def test_second_stage(self):
from cluster_tools.thresholded_components.block_components import BlockComponentsLocal
from cluster_tools.thresholded_components.merge_offsets import MergeOffsetsLocal
from cluster_tools.utils.task_utils import DummyTask
task1 = BlockComponentsLocal(tmp_folder=self.tmp_folder,
config_dir=self.config_folder,
max_jobs=8,
input_path=self.input_path,
input_key=self.input_key,
output_path=self.output_path,
output_key=self.output_key,
threshold=.5,
dependency=DummyTask())
offset_path = './tmp/offsets.json'
with z5py.File(self.input_path) as f:
shape = f[self.input_key].shape
task = MergeOffsetsLocal(tmp_folder=self.tmp_folder,
config_dir=self.config_folder,
max_jobs=8,
shape=shape,
save_path=offset_path,
dependency=task1)
ret = luigi.build([task], local_scheduler=True)
self.assertTrue(ret)
self.assertTrue(os.path.exists(offset_path))
# checks
# load offsets from file
with open(offset_path) as f:
offsets_dict = json.load(f)
offsets = offsets_dict['offsets']
max_offset = int(offsets_dict['n_labels']) - 1
# load output segmentation
with z5py.File(self.output_path) as f:
seg = f[self.output_key][:]
blocking = nt.blocking([0, 0, 0], list(shape), self.block_shape)
for block_id in range(blocking.numberOfBlocks):
block = blocking.getBlock(block_id)
bb = tuple(slice(beg, end)
for beg, end in zip(block.begin, block.end))
segb = seg[bb]
n_labels = len(np.unique(segb))
# number of labels from offsets
if block_id < blocking.numberOfBlocks - 1:
n_offsets = offsets[block_id + 1] - offsets[block_id]
else:
n_offsets = max_offset - offsets[block_id]
self.assertEqual(n_labels, n_offsets)
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
# (C) Copyright IBM Corp. 2020.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ibm_cloud_sdk_core.authenticators.no_auth_authenticator import NoAuthAuthenticator
import inspect
import json
import pytest
import re
import responses
from ibm_cloud_networking_services.cis_ip_api_v1 import *
service = CisIpApiV1(
authenticator=NoAuthAuthenticator()
)
base_url = 'https://api.cis.cloud.ibm.com'
service.set_service_url(base_url)
##############################################################################
# Start of Service: IP
##############################################################################
# region
#-----------------------------------------------------------------------------
# Test Class for list_ips
#-----------------------------------------------------------------------------
class TestListIps():
# Preprocess the request URL to ensure the mock response will be found.
def preprocess_url(self, request_url: str):
if re.fullmatch('.*/+', request_url) is None:
return request_url
else:
return re.compile(request_url.rstrip('/') + '/+')
#--------------------------------------------------------
# list_ips()
#--------------------------------------------------------
@responses.activate
def test_list_ips_all_params(self):
# Set up mock
url = self.preprocess_url(base_url + '/v1/ips')
mock_response = '{"success": true, "errors": [["errors"]], "messages": [["messages"]], "result": {"ipv4_cidrs": ["172.16.58.3/20"], "ipv6_cidrs": ["2400:cb00::/32"]}}'
responses.add(responses.GET,
url,
body=mock_response,
content_type='application/json',
status=200)
# Invoke method
response = service.list_ips()
# Check for correct operation
assert len(responses.calls) == 1
assert response.status_code == 200
# endregion
##############################################################################
# End of Service: IP
##############################################################################
##############################################################################
# Start of Model Tests
##############################################################################
# region
#-----------------------------------------------------------------------------
# Test Class for IpResponseResult
#-----------------------------------------------------------------------------
class TestIpResponseResult():
#--------------------------------------------------------
# Test serialization/deserialization for IpResponseResult
#--------------------------------------------------------
def test_ip_response_result_serialization(self):
# Construct a json representation of a IpResponseResult model
ip_response_result_model_json = {}
ip_response_result_model_json['ipv4_cidrs'] = ['172.16.58.3/20']
ip_response_result_model_json['ipv6_cidrs'] = ['2400:cb00::/32']
# Construct a model instance of IpResponseResult by calling from_dict on the json representation
ip_response_result_model = IpResponseResult.from_dict(ip_response_result_model_json)
assert ip_response_result_model != False
# Construct a model instance of IpResponseResult by calling from_dict on the json representation
ip_response_result_model_dict = IpResponseResult.from_dict(ip_response_result_model_json).__dict__
ip_response_result_model2 = IpResponseResult(**ip_response_result_model_dict)
# Verify the model instances are equivalent
assert ip_response_result_model == ip_response_result_model2
# Convert model instance back to dict and verify no loss of data
ip_response_result_model_json2 = ip_response_result_model.to_dict()
assert ip_response_result_model_json2 == ip_response_result_model_json
#-----------------------------------------------------------------------------
# Test Class for IpResponse
#-----------------------------------------------------------------------------
class TestIpResponse():
#--------------------------------------------------------
# Test serialization/deserialization for IpResponse
#--------------------------------------------------------
def test_ip_response_serialization(self):
# Construct dict forms of any model objects needed in order to build this model.
ip_response_result_model = {} # IpResponseResult
ip_response_result_model['ipv4_cidrs'] = ['172.16.58.3/20']
ip_response_result_model['ipv6_cidrs'] = ['2400:cb00::/32']
# Construct a json representation of a IpResponse model
ip_response_model_json = {}
ip_response_model_json['success'] = True
ip_response_model_json['errors'] = [['testString']]
ip_response_model_json['messages'] = [['testString']]
ip_response_model_json['result'] = ip_response_result_model
# Construct a model instance of IpResponse by calling from_dict on the json representation
ip_response_model = IpResponse.from_dict(ip_response_model_json)
assert ip_response_model != False
# Construct a model instance of IpResponse by calling from_dict on the json representation
ip_response_model_dict = IpResponse.from_dict(ip_response_model_json).__dict__
ip_response_model2 = IpResponse(**ip_response_model_dict)
# Verify the model instances are equivalent
assert ip_response_model == ip_response_model2
# Convert model instance back to dict and verify no loss of data
ip_response_model_json2 = ip_response_model.to_dict()
assert ip_response_model_json2 == ip_response_model_json
# endregion
##############################################################################
# End of Model Tests
##############################################################################
|
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals, division
from eight import *
from bw2analyzer.sc_graph import GTManipulator
from bw2data import (
Database,
databases,
geomapping,
mapping,
methods,
projects,
)
from bw2data.tests import BW2DataTest
import unittest
import copy
class UnrollGraphTestCase(unittest.TestCase):
def test_simple_chain(self):
nodes = {
-1: {"amount": 1, "cum": 1, "ind": 0},
10: {"amount": 1, "cum": 1, "ind": 1},
11: {"amount": 0.5, "cum": 0.5, "ind": 1},
12: {"amount": 0.1, "cum": 0.1, "ind": 1},
}
edges = [
{"to": -1, "from": 10, "amount": 1, "exc_amount": 1, "impact": 1},
{"to": 10, "from": 11, "amount": 0.5, "exc_amount": 0.5, "impact": 0.5},
{"to": 11, "from": 12, "amount": 0.1, "exc_amount": 0.2, "impact": 0.1},
]
nodes, edges, count = GTManipulator.unroll_graph(nodes, edges, 1)
ref_nodes = {
-1: {"ind": 0, "amount": 1, "cum": 1},
0: {"ind": 1, "amount": 1, "cum": 1, "row": 10},
1: {"ind": 1, "amount": 0.5, "cum": 0.5, "row": 11},
2: {"ind": 1, "amount": 0.1, "cum": 0.10000000000000002, "row": 12},
}
ref_edges = [
{"impact": 1, "to": -1, "amount": 1, "exc_amount": 1, "from": 0},
{"impact": 0.5, "to": 0, "amount": 0.5, "exc_amount": 0.5, "from": 1},
{
"impact": 0.10000000000000002,
"to": 1,
"amount": 0.1,
"exc_amount": 0.2,
"from": 2,
},
]
self.assertEqual(count, 3)
self.assertEqual(nodes, ref_nodes)
self.assertEqual(edges, ref_edges)
def test_multiple_inputs(self):
nodes = {
-1: {"amount": 1, "cum": 1, "ind": 0},
10: {"amount": 1, "cum": 1, "ind": 1},
11: {"amount": 0.5, "cum": 0.5, "ind": 1},
12: {"amount": 0.5, "cum": 0.5, "ind": 1},
}
edges = [
{"to": -1, "from": 10, "amount": 1, "exc_amount": 1, "impact": 1},
{"to": 10, "from": 11, "amount": 0.5, "exc_amount": 0.5, "impact": 0.5},
{"to": 10, "from": 12, "amount": 0.5, "exc_amount": 0.5, "impact": 0.5},
]
nodes, edges, count = GTManipulator.unroll_graph(nodes, edges, 1)
ref_nodes = {
-1: {"ind": 0, "amount": 1, "cum": 1},
0: {"ind": 1, "amount": 1, "cum": 1, "row": 10},
1: {"ind": 1, "amount": 0.5, "cum": 0.5, "row": 11},
2: {"ind": 1, "amount": 0.5, "cum": 0.5, "row": 12},
}
ref_edges = [
{"impact": 1, "to": -1, "amount": 1, "exc_amount": 1, "from": 0},
{"impact": 0.5, "to": 0, "amount": 0.5, "exc_amount": 0.5, "from": 1},
{"impact": 0.5, "to": 0, "amount": 0.5, "exc_amount": 0.5, "from": 2},
]
self.assertEqual(count, 3)
self.assertEqual(nodes, ref_nodes)
self.assertEqual(edges, ref_edges)
def test_pruning(self):
nodes = {
-1: {"amount": 1, "cum": 1, "ind": 0},
10: {"amount": 1, "cum": 1, "ind": 1},
11: {"amount": 0.5, "cum": 0.5, "ind": 1},
12: {"amount": 0.001, "cum": 0.001, "ind": 1},
}
edges = [
{"to": -1, "from": 10, "amount": 1, "exc_amount": 1, "impact": 1},
{"to": 10, "from": 11, "amount": 0.5, "exc_amount": 0.5, "impact": 0.5},
{
"to": 10,
"from": 12,
"amount": 0.001,
"exc_amount": 0.002,
"impact": 0.001,
},
]
nodes, edges, count = GTManipulator.unroll_graph(nodes, edges, 1)
ref_nodes = {
-1: {"ind": 0, "amount": 1, "cum": 1},
0: {"ind": 1, "amount": 1, "cum": 1, "row": 10},
1: {"ind": 1, "amount": 0.5, "cum": 0.5, "row": 11},
}
ref_edges = [
{"impact": 1, "to": -1, "amount": 1, "exc_amount": 1, "from": 0},
{"impact": 0.5, "to": 0, "amount": 0.5, "exc_amount": 0.5, "from": 1},
]
self.assertEqual(count, 3)
self.assertEqual(nodes, ref_nodes)
self.assertEqual(edges, ref_edges)
def test_unroll_circular(self):
nodes = {
-1: {"amount": 1, "cum": 1, "ind": 0},
10: {"amount": 1, "cum": 1, "ind": 1},
11: {"amount": 1, "cum": 1, "ind": 1},
}
edges = [
{"to": -1, "from": 10, "amount": 1, "exc_amount": 1, "impact": 1},
{"to": 10, "from": 11, "amount": 1, "exc_amount": 0.8, "impact": 1},
{"to": 11, "from": 10, "amount": 1, "exc_amount": 0.8, "impact": 1},
]
nodes, edges, count = GTManipulator.unroll_graph(nodes, edges, 1, cutoff=0.4)
ref_nodes = {
-1: {"ind": 0, "amount": 1, "cum": 1},
0: {"ind": 1, "amount": 1, "cum": 1, "row": 10},
1: {"ind": 1, "amount": 0.8, "cum": 0.8, "row": 11},
2: {
"ind": 1,
"amount": 0.6400000000000001,
"cum": 0.6400000000000001,
"row": 10,
},
3: {
"ind": 1,
"amount": 0.5120000000000001,
"cum": 0.5120000000000001,
"row": 11,
},
4: {
"ind": 1,
"amount": 0.40960000000000013,
"cum": 0.40960000000000013,
"row": 10,
},
}
ref_edges = [
{"impact": 1, "to": -1, "amount": 1, "exc_amount": 1, "from": 0},
{"impact": 0.8, "to": 0, "amount": 0.8, "exc_amount": 0.8, "from": 1},
{
"impact": 0.6400000000000001,
"to": 1,
"amount": 0.6400000000000001,
"exc_amount": 0.8,
"from": 2,
},
{
"impact": 0.5120000000000001,
"to": 2,
"amount": 0.5120000000000001,
"exc_amount": 0.8,
"from": 3,
},
{
"impact": 0.40960000000000013,
"to": 3,
"amount": 0.40960000000000013,
"exc_amount": 0.8,
"from": 4,
},
]
self.assertEqual(count, 6)
self.assertEqual(nodes, ref_nodes)
self.assertEqual(edges, ref_edges)
def test_max_links(self):
nodes = {
-1: {"amount": 1, "cum": 1, "ind": 0},
10: {"amount": 1, "cum": 1, "ind": 1},
11: {"amount": 1, "cum": 1, "ind": 1},
}
edges = [
{"to": -1, "from": 10, "amount": 1, "exc_amount": 1, "impact": 1},
{"to": 10, "from": 11, "amount": 1, "exc_amount": 0.999, "impact": 1},
{"to": 11, "from": 10, "amount": 1, "exc_amount": 0.999, "impact": 1},
]
nodes, edges, count = GTManipulator.unroll_graph(nodes, edges, 1, max_links=100)
self.assertEqual(count, 100)
def test_diamond(self):
nodes = {
-1: {"amount": 1, "cum": 1, "ind": 0},
10: {"amount": 1, "cum": 1, "ind": 0},
11: {"amount": 1, "cum": 0.2, "ind": 0},
12: {"amount": 1, "cum": 0.8, "ind": 0},
13: {"amount": 1, "cum": 1, "ind": 1},
}
edges = [
{"to": -1, "from": 10, "amount": 1, "exc_amount": 1, "impact": 1},
{"to": 10, "from": 11, "amount": 1, "exc_amount": 1, "impact": 0.2},
{"to": 10, "from": 12, "amount": 1, "exc_amount": 1, "impact": 0.8},
{"to": 11, "from": 13, "amount": 0.2, "exc_amount": 0.2, "impact": 0.2},
{"to": 12, "from": 13, "amount": 0.8, "exc_amount": 0.8, "impact": 0.8},
]
nodes, edges, count = GTManipulator.unroll_graph(nodes, edges, 1)
ref_nodes = {
-1: {"ind": 0, "amount": 1, "cum": 1},
0: {"ind": 0, "amount": 1, "cum": 1, "row": 10},
1: {"ind": 0, "amount": 1, "cum": 0.2, "row": 11},
2: {"ind": 0, "amount": 1, "cum": 0.8, "row": 12},
3: {"ind": 1, "amount": 0.8, "cum": 0.8, "row": 13},
4: {"ind": 1, "amount": 0.2, "cum": 0.2, "row": 13},
}
ref_edges = [
{"impact": 1, "to": -1, "amount": 1, "exc_amount": 1, "from": 0},
{"impact": 0.2, "to": 0, "amount": 1, "exc_amount": 1, "from": 1},
{"impact": 0.8, "to": 0, "amount": 1, "exc_amount": 1, "from": 2},
{"impact": 0.8, "to": 2, "amount": 0.8, "exc_amount": 0.8, "from": 3},
{"impact": 0.2, "to": 1, "amount": 0.2, "exc_amount": 0.2, "from": 4},
]
self.assertEqual(count, 5)
self.assertEqual(nodes, ref_nodes)
self.assertEqual(edges, ref_edges)
def test_circle_with_branches(self):
pass
class MetadataTestCase(BW2DataTest):
class LCAMock(object):
def reverse_dict(self):
return (
{1: ("A", "a"), 2: ("A", "b"), 3: ("A", "c")},
{1: ("A", "a"), 2: ("A", "b"), 3: ("A", "c")},
{},
)
def extra_setup(self):
data = {
("A", "a"): {"name": "a", "categories": [], "unit": "kilogram"},
("A", "b"): {"name": "b", "categories": [], "unit": "kilogram"},
("A", "c"): {"name": "c", "categories": [], "unit": "kilogram"},
}
d = Database("A")
d.register(name="Tests", depends=[])
d.write(data)
self.assertEqual(len(databases), 1)
def test_setup_clean(self):
self.assertEqual(list(databases), ["A"])
self.assertEqual(list(methods), [])
self.assertEqual(len(mapping), 3)
self.assertEqual(len(geomapping), 1) # GLO
self.assertTrue("GLO" in geomapping)
self.assertEqual(len(projects), 1) # Default project
self.assertTrue("default" in projects)
def test_without_row(self):
nodes = {1: {}, 3: {}}
old_nodes = copy.deepcopy(nodes)
new_nodes = GTManipulator.add_metadata(nodes, self.LCAMock())
self.assertEqual(old_nodes, nodes)
self.assertEqual(
new_nodes,
{
1: {
"categories": [],
"unit": "kilogram",
"key": ("A", "a"),
"name": "a",
},
3: {
"categories": [],
"unit": "kilogram",
"key": ("A", "c"),
"name": "c",
},
},
)
def test_with_functional_unit(self):
nodes = {-1: {}, 1: {}, 3: {}}
old_nodes = copy.deepcopy(nodes)
new_nodes = GTManipulator.add_metadata(nodes, self.LCAMock())
self.assertEqual(old_nodes, nodes)
self.assertEqual(
new_nodes,
{
-1: {
"name": "Functional unit",
"unit": "unit",
"categories": ["Functional unit"],
},
1: {
"categories": [],
"unit": "kilogram",
"key": ("A", "a"),
"name": "a",
},
3: {
"categories": [],
"unit": "kilogram",
"key": ("A", "c"),
"name": "c",
},
},
)
def test_with_row(self):
nodes = {1000: {"row": 1}, 3000: {"row": 3}}
old_nodes = copy.deepcopy(nodes)
new_nodes = GTManipulator.add_metadata(nodes, self.LCAMock())
self.assertEqual(old_nodes, nodes)
self.assertEqual(
new_nodes,
{
1000: {
"categories": [],
"unit": "kilogram",
"key": ("A", "a"),
"name": "a",
"row": 1,
},
3000: {
"categories": [],
"unit": "kilogram",
"key": ("A", "c"),
"name": "c",
"row": 3,
},
},
)
class SimplifyTestCase(unittest.TestCase):
def test_nodes_dont_change(self):
nodes = {
1: {"amount": 1, "ind": 1},
2: {"amount": 2, "ind": 0.0001},
3: {"amount": 4, "ind": 1},
}
old_nodes = copy.deepcopy(nodes)
edges = [
{"to": 1, "from": 2, "amount": 3, "exc_amount": 2, "impact": 4},
{"to": 2, "from": 3, "amount": 3, "exc_amount": 2, "impact": 5},
]
GTManipulator.simplify(nodes, edges, 2, 0.1)
self.assertEqual(old_nodes, nodes)
def test_linear(self):
"""Test supply chain graph like this:
o
| o
x => |
| o
o
"""
nodes = {
1: {"amount": 1, "ind": 1},
2: {"amount": 2, "ind": 0.0001},
3: {"amount": 4, "ind": 1},
}
edges = [
{"to": 1, "from": 2, "amount": 3, "exc_amount": 2, "impact": 4},
{"to": 2, "from": 3, "amount": 3, "exc_amount": 2, "impact": 5},
]
new_nodes, new_edges = GTManipulator.simplify(nodes, edges, 2, 0.1)
self.assertEqual(
new_nodes, {key: value for key, value in nodes.items() if key in (1, 3)}
)
self.assertEqual(
list(new_edges),
[{"to": 1, "from": 3, "amount": 3, "exc_amount": 4, "impact": 5}],
)
def test_y(self):
r"""Test supply chain graph like this:
o o o o
\ / \ /
x => o
|
o
"""
nodes = {
1: {"amount": 1, "ind": 1},
2: {"amount": 4, "ind": 2},
3: {"amount": 1, "ind": 0.001},
4: {"amount": 2, "ind": 1.5},
}
edges = [
{"to": 1, "from": 3, "amount": 0.2, "exc_amount": 0.2, "impact": 1},
{"to": 2, "from": 3, "amount": 0.8, "exc_amount": 0.2, "impact": 2},
{"to": 3, "from": 4, "amount": 2, "exc_amount": 2, "impact": 3},
]
new_nodes, new_edges = GTManipulator.simplify(nodes, edges, 9, 0.1)
expected_nodes = {
key: value for key, value in nodes.items() if key in (1, 2, 4)
}
self.assertEqual(expected_nodes, new_nodes)
expected_edges = sorted(
[
{"to": 2, "from": 4, "amount": 1.6, "exc_amount": 0.4, "impact": 2},
{"to": 1, "from": 4, "amount": 0.4, "exc_amount": 0.4, "impact": 1},
],
key=lambda x: (x["to"], x["from"]),
)
self.assertEqual(
expected_edges, sorted(new_edges, key=lambda x: (x["to"], x["from"]))
)
def test_no_self_edge(self):
"""Test that collapsed edges from a -> a are deleted."""
nodes = {
1: {"amount": 1, "ind": 1},
2: {"amount": 4, "ind": 2},
3: {"amount": 1, "ind": 0.001},
4: {"amount": 2, "ind": 1.5},
}
edges = [
{"to": 1, "from": 3, "amount": 0.2, "exc_amount": 0.2, "impact": 1},
{"to": 2, "from": 3, "amount": 0.8, "exc_amount": 0.2, "impact": 2},
{"to": 3, "from": 4, "amount": 2, "exc_amount": 2, "impact": 3},
]
new_nodes, new_edges = GTManipulator.simplify(nodes, edges, 9, 0.1)
expected_nodes = {
key: value for key, value in nodes.items() if key in (1, 2, 4)
}
self.assertEqual(expected_nodes, new_nodes)
expected_edges = sorted(
[
{"to": 2, "from": 4, "amount": 1.6, "exc_amount": 0.4, "impact": 2},
{"to": 1, "from": 4, "amount": 0.4, "exc_amount": 0.4, "impact": 1},
],
key=lambda x: (x["to"], x["from"]),
)
self.assertEqual(
expected_edges, sorted(new_edges, key=lambda x: (x["to"], x["from"]))
)
def test_diamond(self):
r"""Test supply chain graph like this:
o
/ \ o
x x => |
\ / o
o
"""
nodes = {
1: {"amount": 1, "ind": 1},
2: {"amount": 2, "ind": 0},
3: {"amount": 3, "ind": 0},
4: {"amount": 5, "ind": 1},
}
edges = [
{"to": 1, "from": 2, "amount": 2, "exc_amount": 1, "impact": 2},
{"to": 1, "from": 3, "amount": 3, "exc_amount": 1, "impact": 3},
{"to": 2, "from": 4, "amount": 2, "exc_amount": 1, "impact": 2},
{"to": 3, "from": 4, "amount": 3, "exc_amount": 1, "impact": 3},
]
new_nodes, new_edges = GTManipulator.simplify(nodes, edges, 5, 0.1)
expected_nodes = {key: value for key, value in nodes.items() if key in (1, 4)}
self.assertEqual(expected_nodes, new_nodes)
expected_edges = [
{"to": 1, "from": 4, "amount": 5, "exc_amount": 2, "impact": 5}
]
self.assertEqual(expected_edges, list(new_edges))
def test_x(self):
r"""Test supply chain graph like this:
o o
\ / o o
x => |\/|
/ \ |/\|
o o o o
"""
nodes = {
1: {"amount": 1, "ind": 1},
2: {"amount": 1, "ind": 1},
3: {"amount": 3, "ind": 0},
4: {"amount": 9, "ind": 3},
5: {"amount": 12, "ind": 2},
}
edges = [
{"to": 1, "from": 3, "amount": 1, "exc_amount": 1, "impact": 17},
{"to": 2, "from": 3, "amount": 2, "exc_amount": 2, "impact": 34},
{"to": 3, "from": 4, "amount": 9, "exc_amount": 3, "impact": 27},
{"to": 3, "from": 5, "amount": 12, "exc_amount": 4, "impact": 24},
]
new_nodes, new_edges = GTManipulator.simplify(nodes, edges, 53, 0.01)
expected_nodes = {
key: value for key, value in nodes.items() if key in (1, 2, 4, 5)
}
self.assertEqual(expected_nodes, new_nodes)
expected_edges = [
{"to": 1, "from": 4, "amount": 3, "exc_amount": 3, "impact": 9},
{"to": 1, "from": 5, "amount": 4, "exc_amount": 4, "impact": 8},
{"to": 2, "from": 5, "amount": 8, "exc_amount": 8, "impact": 16},
{"to": 2, "from": 4, "amount": 6, "exc_amount": 6, "impact": 18},
]
self.assertEqual(
sorted(expected_edges, key=lambda x: (x["to"], x["from"])),
sorted(new_edges, key=lambda x: (x["to"], x["from"])),
)
|
# (C) Copyright IBM Corp. 2019, 2020.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The source code for this program is not published or otherwise divested of its trade secrets, irrespective of what has been depos-ited with the U.S. Copyright Office.
########################################################################
# Functions that process flows
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
########################################################################
def get_nodes_list(main_list):
global descendant_list
if type(main_list) == list:
for i in main_list:
if type(i) == str:
descendant_list.append(i)
else:
get_nodes_list(i)
elif type(main_list) == dict:
for i in main_list:
if type(main_list[i]) == str:
descendant_list.append(i)
else:
descendant_list.append(i)
get_nodes_list(main_list[i])
##########################################################################
def find_node_in_tree(node_name, main_list):
global descendant_list
for i in main_list:
if i == node_name:
descendant_list.append(i)
if type(main_list) == dict:
get_nodes_list(main_list[i])
elif type(main_list) == list:
get_nodes_list(i)
else:
if type(main_list) == list:
for i in main_list:
find_node_in_tree(node_name, i)
elif type(main_list) == dict:
for i in main_list:
find_node_in_tree(node_name, main_list[i])
####################################################################
def get_descendants(nodes_tree, nodes_names, skill=None):
global descendant_list
descendant_list = []
for node in nodes_names:
if skill == "all":
for i in nodes_tree:
find_node_in_tree(node, nodes_tree[i])
elif skill is not None:
find_node_in_tree(node, nodes_tree[skill])
return descendant_list
#############################################################################
def detect_flow_state(nodes, flows):
if type(nodes) != list:
return None
processed_found=False
for flow in flows:
for node in nodes:
if node in flow['completion_nodes']:
return "completed"
elif node in flow['nodes']:
processed_found=True
if processed_found:
return "processed"
return None
#############################################################################
def detect_flow(nodes, flows):
#give to it flow_defs['flows'] and node as list, for example ['node_xxxxxxxxx']
# return latest one
if type(nodes) != list:
return None
for flow in flows:
for node in nodes:
if node in flow['nodes']:
return flow['name']
return None
###########################################################################################
# add internal flow content to initial definition
def enrich_flows_by_workspace(flow_defs, workspace, workspace_name="Some workspace"):
workspace_edited={}
workspace_edited[workspace_name]={}
workspace_edited[workspace_name]["workspace_name"]=workspace["name"]
workspace_edited[workspace_name]['indexed_dialog_nodes']={}
for i in range(0,len(workspace["dialog_nodes"])):
workspace_edited[workspace_name]['indexed_dialog_nodes'][workspace["dialog_nodes"][i]["dialog_node"]] =\
workspace["dialog_nodes"][i]
tree_dict = {}
for key in workspace_edited.keys():
par = got_parent_nodes_for_skill(key, workspace_edited)
tree_dict[key] = create_nodes_tree(par, workspace_edited)
for i in range (0, len(flow_defs["flows"])):
flow_defs["flows"][i]["nodes"] = get_descendants(tree_dict, flow_defs["flows"][i]["parent_nodes"], 'all')
return flow_defs
#######################################################################################
# add two flow columns to canonical dataframe
def enrich_canonical_by_flows(df_all, flow_defs):
df_all["flow"] = df_all.nodes_visited.map(lambda x: detect_flow(x, flow_defs["flows"]))
df_all["flow_state"] = df_all.nodes_visited.map(lambda x: detect_flow_state(x, flow_defs["flows"]))
return df_all
#######################################################################################
def got_parent_nodes_for_skill(skill,workspaces):
nodes_parents = {}
for i in workspaces[skill]['indexed_dialog_nodes']:
if "parent" in workspaces[skill]['indexed_dialog_nodes'][i].keys():
if workspaces[skill]['indexed_dialog_nodes'][i]['parent'] not in nodes_parents:
nodes_parents[workspaces[skill]['indexed_dialog_nodes'][i]['parent']] = []
nodes_parents[workspaces[skill]['indexed_dialog_nodes'][i]['parent']].append(workspaces[skill]['indexed_dialog_nodes'][i]['dialog_node'])
return nodes_parents
def create_nodes_tree(parents_dict,workspaces):
global delete_list
delete_list = []
new_dict = {}
delete_list = []
for i in parents_dict:
values = parents_dict[i]
#check if master
new_values = check_if_master(values, parents_dict)
new_dict[i] = new_values
for z in set(delete_list):
del new_dict[z]
return new_dict
def check_if_master(values, parents_dict):
new_values = []
for i in values:
if i in parents_dict:
#it's master
new_values.append({i : check_if_master(parents_dict[i], parents_dict)})
global delete_list
delete_list.append(i)
else:
#not a master
new_values.append(i)
return new_values
##########################################################################################################
# compute flow statistics
def count_flows(df_logs, flow_defs):
df_logs = df_logs.sort_values(["conversation_id", "response_timestamp"])
flow_outcome_summary={}
flow_names=[]
for i in range(0,len(flow_defs["flows"])):
flow_names.append(flow_defs["flows"][i]["name"])
flow_outcome_summary[flow_names[i]] = {}
flow_outcome_summary[flow_names[i]]["completed"]=0
flow_outcome_summary[flow_names[i]]["abandoned"]=0
flow_outcome_summary[flow_names[i]]["rerouted"]=0
#flow_outcome_summary[flow_names[i]]["escalated"]=0
num_flows=len(flow_names)
previous_conversation_id=""
previous_flow_state=""
previous_flow=""
# state of each flow will be monitored
current_conversation_flow_state = {}
for i in range(0, num_flows):
current_conversation_flow_state[flow_names[i]] = ""
for i in range(0,len(df_logs)):
current_conversation_id = df_logs.iloc[i,df_logs.columns.get_loc("conversation_id")]
current_flow = df_logs.iloc[i,df_logs.columns.get_loc("flow")]
current_flow_state = df_logs.iloc[i,df_logs.columns.get_loc("flow_state")]
if current_conversation_id!=previous_conversation_id:
# resetting states of all flows
for j in range(0, num_flows):
current_conversation_flow_state[flow_names[j]] = ""
previous_conversation_id=current_conversation_id
new_session=True
else:
new_session=False
if current_flow is not None:
if current_flow_state=="processed":
if current_conversation_flow_state[current_flow]=="" or new_session: # started now
current_conversation_flow_state[current_flow]="processed"
flow_outcome_summary[current_flow]["abandoned"]+=1 # abandonment is initial default
elif current_conversation_flow_state[current_flow]=="rerouted": # return of digression
current_conversation_flow_state[current_flow]="processed"
flow_outcome_summary[current_flow]["abandoned"]+=1 # abandonment is initial default
flow_outcome_summary[current_flow]["rerouted"]-=1
elif current_flow_state=="completed":
flow_outcome_summary[current_flow]["completed"]+=1
if current_conversation_flow_state[current_flow]=="processed":
flow_outcome_summary[current_flow]["abandoned"]-=1
elif current_conversation_flow_state[current_flow]=="rerouted":
flow_outcome_summary[current_flow]["rerouted"]-=1
current_conversation_flow_state[current_flow]=""
else:
print("Error! Wrong flow state.")
# TODO: Check that dashboard code is fine
if not new_session:
if (previous_flow!=current_flow or current_flow is None) and previous_flow_state=="processed":
flow_outcome_summary[previous_flow]["rerouted"]+=1
current_conversation_flow_state[previous_flow]="rerouted"
flow_outcome_summary[previous_flow]["abandoned"]-=1
previous_flow=current_flow
previous_flow_state=current_flow_state
# Temporarily we will not have escalated
for key in flow_outcome_summary.keys():
flow_outcome_summary[key]["overall"] = flow_outcome_summary[key]["completed"] +\
flow_outcome_summary[key]["rerouted"] + flow_outcome_summary[key]["abandoned"]
df=pd.DataFrame.from_dict(flow_outcome_summary, orient='index',
columns=['overall','completed', 'abandoned', 'rerouted'])
df.index.name="flow name"
df.reset_index(inplace=True)
return df
################################################################################################
# flow chart for flow completion / abandonment / rerouting
def plot_flow_outcomes(df_flow_outcome_summary):
flow_names = df_flow_outcome_summary["flow name"].tolist()
num_flows=len(flow_names)
flow_outcome_summary = df_flow_outcome_summary.set_index('flow name').T.to_dict()
category_names=["Completed", "Rerouted", "Abandoned"]
for key in flow_outcome_summary.keys():
flow_outcome_summary[key]["overall"] = flow_outcome_summary[key]["completed"] +\
flow_outcome_summary[key]["rerouted"] + flow_outcome_summary[key]["abandoned"]
labels=[]
for i in range(0, num_flows):
labels.append(flow_names[i] + (" ("+str(flow_outcome_summary[flow_names[i]]["overall"])+")"))
data = np.zeros([num_flows,3])
for i in range(0, num_flows):
data[i,0] = (flow_outcome_summary[flow_names[i]]["completed"] / flow_outcome_summary[flow_names[i]]["overall"]) * 100.0
data[i,1] = (flow_outcome_summary[flow_names[i]]["rerouted"] / flow_outcome_summary[flow_names[i]]["overall"]) * 100.0
data[i,2] = (flow_outcome_summary[flow_names[i]]["abandoned"] / flow_outcome_summary[flow_names[i]]["overall"]) * 100.0
data_cum = data.cumsum(axis=1)
alpha = 0.7
category_colors = [(0.102, 0.596, 0.314,alpha),(0.878, 0.878, 0.878, alpha),(0.843, 0.188, 0.152, alpha)]
fig, ax = plt.subplots(figsize=(12, 2))
ax.invert_yaxis()
#ax.xaxis.set_visible(False)
ax.set_xlim(0, np.sum(data, axis=1).max())
for i, (colname, color) in enumerate(zip(category_names, category_colors)):
widths = data[:, i]
starts = data_cum[:, i] - widths
ax.barh(labels, widths, left=starts, height=0.8,
label=colname, color=color)
xcenters = starts + widths / 2
text_color = 'black'
for y, (x, c) in enumerate(zip(xcenters, widths)):
ax.text(x, y, str(int(c))+ "%", ha='center', va='center',
color=text_color, fontsize='large')
ax.set_xlabel("Percentage", fontsize="large")
ax.set_yticklabels(labels, fontsize="large")
ax.legend(ncol=len(category_names), bbox_to_anchor=(0, 1),
loc='lower left', fontsize='large')
return
|
import numpy as np
def generate_cosmic_rays(
shape, mean_cosmic_rays=10,
min_length=10, max_length=30, rng=None):
"""Generate a binary mask w/ cosmic rays.
This routine generates cosmic rays by choosing a random
position in the image, a random angle, and a random
length of the cosmic ray track. Then pixels along the line
determined by the length of the track, the position, and the angle
are marked. The width of the track is broadened a bit when
the row or column changes.
The total number of cosmic rays is Poisson distributed according to
`mean_cosmic_rays`.
The length of the track is chosen uniformly between `min_length` and
`max_length`.
Parameters
----------
shape : int or tuple of ints
The shape of the mask to generate.
mean_cosmic_rays : int, optional
The mean number of cosmic rays.
min_length : int, optional
The minimum length of the track.
max_length : int, optional
The maximum length of the track.
rng : np.random.RandomState or None, optional
An RNG to use. If none is provided, a new `np.random.RandomState`
state instance will be created.
Returns
-------
msk : np.ndarray, shape `shape`
A boolean mask marking the locations of the cosmic rays.
"""
msk = np.zeros(shape)
rng = rng or np.random.RandomState()
n_cosmic_rays = rng.poisson(mean_cosmic_rays)
for _ in range(n_cosmic_rays):
y = rng.randint(0, msk.shape[0]-1)
x = rng.randint(0, msk.shape[1]-1)
angle = rng.uniform() * 2.0 * np.pi
n_pix = rng.randint(min_length, max_length+1)
cosa = np.cos(angle)
sina = np.sin(angle)
_x_prev = None
_y_prev = None
for _ in range(n_pix):
_x = int(x + 0.5)
_y = int(y + 0.5)
if _y >= 0 and _y < msk.shape[0] and _x >= 0 and _x < msk.shape[1]:
if _x_prev is not None and _y_prev is not None:
if _x_prev != _x:
msk[_y, _x_prev] = 1
if _y_prev != _y:
msk[_y_prev, _x] = 1
msk[_y, _x] = 1
_x_prev = _x
_y_prev = _y
x += cosa
y += sina
return msk.astype(bool)
def generate_bad_columns(
shape, mean_bad_cols=10,
widths=(1, 2, 5, 10), p=(0.8, 0.1, 0.075, 0.025),
min_length_frac=(1, 1, 0.25, 0.25),
max_length_frac=(1, 1, 0.75, 0.75),
gap_prob=(0.30, 0.30, 0, 0),
min_gap_frac=(0.1, 0.1, 0, 0),
max_gap_frac=(0.3, 0.3, 0, 0),
rng=None):
"""Generate a binary mask w/ bad columns.
Parameters
----------
shape : int or tuple of ints
The shape of the mask to generate.
mean_bad_cols : float, optional
The mean of the Poisson distribution for the total number of
bad columns to generate.
widths : n-tuple of ints, optional
The possible widths of the bad columns.
p : n-tuple of floats, optional
The frequency of each of the bad column widths.
min_length_frac : n-tuple of floats, optional
The minimum fraction of the image the bad column spans. There should be
one entry per bad column width in `widths`.
max_length_frac : n-tuple of floats, optional
The maximum fraction of the image the bad column spans. There should be
one entry per bad column width in `widths`.
gap_prob : n-tuple of floats, optional
The probability that the bad column has a gap in it. There should be
one entry per bad column width in `widths`.
min_gap_frac : n-tuple of floats, optional
The minimum fraction of the image that the gap spans. There should be
one entry per bad column width in `widths`.
max_gap_frac : n-tuple of floats, optional
The maximum fraction of the image that the gap spans. There should be
one entry per bad column width in `widths`.
rng : np.random.RandomState or None, optional
An RNG to use. If none is provided, a new `np.random.RandomState`
state instance will be created.
Returns
-------
msk : np.ndarray, shape `shape`
A boolean mask marking the locations of the bad columns.
"""
p = np.array(p) / np.sum(p)
msk = np.zeros(shape)
rng = rng or np.random.RandomState()
n_bad_cols = rng.poisson(mean_bad_cols)
for _ in range(n_bad_cols):
w = rng.choice(widths, p=p)
wind = widths.index(w)
x = rng.randint(0, msk.shape[1]-w)
len_frac = rng.uniform(min_length_frac[wind], max_length_frac[wind])
length = int(len_frac * msk.shape[0])
if length < msk.shape[0]:
start = rng.choice([0, msk.shape[0] - length])
else:
start = 0
# set the mask first
msk[start:start+length, x:x+w] = 1
# add gaps
if rng.uniform() < gap_prob[wind]:
gfrac = rng.uniform(min_gap_frac[wind], max_gap_frac[wind])
glength = int(msk.shape[0] * gfrac)
gloc = rng.randint(0, msk.shape[0] - glength)
msk[gloc:gloc+glength, x:x+1] = 0
return msk.astype(bool)
def _point_line_dist(x1, y1, x2, y2, x0, y0):
return np.abs((x2-x1)*(y1-y0) - (x1-x0)*(y2-y1)) / np.sqrt((x2-x1)**2 + (y2-y1)**2)
def generate_streaks(shape, mean_streaks=1, min_wdth=2, max_width=10, rng=None):
"""Make image streaks representing airplanes and other flying things.
The streak width range is guess based on
https://iopscience.iop.org/article/10.3847/1538-3881/aaddff
Parameters
----------
shape : tuple of ints
The shape of the mask to generate.
mean_steaks : float, optional
The mean of the Poisson distribution for the total number of
streaks to generate.
min_width : float
The minimum width of a streak. Default is 2 pixels.
max_width : float
The maximum width of a streak. Default is 10 pixels.
rng : np.random.RandomState or None, optional
An RNG to use. If none is provided, a new `np.random.RandomState`
state instance will be created.
Returns
-------
msk : np.ndarray, shape `shape`
A boolean mask marking the locations of the bad columns.
"""
msk = np.zeros(shape, dtype=np.int32)
rng = rng or np.random.RandomState()
n_streaks = rng.poisson(mean_streaks)
if n_streaks > 0:
x1s = rng.uniform(size=n_streaks) * shape[1]
y1s = rng.uniform(size=n_streaks) * shape[0]
angs = rng.uniform(size=n_streaks) * np.pi * 2.0
x2s = x1s + np.cos(angs)
y2s = y1s + np.sin(angs)
half_widths = rng.uniform(
low=min_wdth,
high=max_width,
size=n_streaks,
) / 2
for yind in range(shape[0]):
y0 = yind + 0.5
for xind in range(shape[1]):
x0 = xind + 0.5
for x1, y1, x2, y2, half_width in zip(x1s, y1s, x2s, y2s, half_widths):
if _point_line_dist(x1, y1, x2, y2, x0, y0) < half_width:
msk[yind, xind] = 1
return msk.astype(bool)
|
import random
from eth_utils import (
encode_hex,
)
import hypothesis.strategies as st
from eth_abi.utils.numeric import (
scale_places,
)
total_bits = st.integers(min_value=1, max_value=32).map(lambda n: n * 8)
frac_places = st.integers(min_value=1, max_value=80)
bytes_sizes = st.integers(min_value=1, max_value=32)
fixed_sizes = st.tuples(total_bits, frac_places)
##########################
# Type string strategies #
##########################
def join(xs):
return ''.join(map(str, xs))
def join_with_x(xs):
return 'x'.join(map(str, xs))
bare_type_strs = st.sampled_from([
'uint', 'int', 'ufixed', 'fixed', 'address', 'bool', 'bytes', 'function',
'string',
])
fixed_bytes_type_strs = bytes_sizes.map('bytes{}'.format)
uint_type_strs = total_bits.map('uint{}'.format)
int_type_strs = total_bits.map('int{}'.format)
fixed_size_strs = fixed_sizes.map(join_with_x)
ufixed_type_strs = fixed_size_strs.map('ufixed{}'.format)
fixed_type_strs = fixed_size_strs.map('fixed{}'.format)
non_array_type_strs = st.one_of(
bare_type_strs,
fixed_bytes_type_strs,
uint_type_strs,
int_type_strs,
ufixed_type_strs,
fixed_type_strs,
)
dynam_array_components = st.just(tuple())
fixed_array_components = st.integers(min_value=1).map(lambda x: (x,))
array_components = st.one_of(dynam_array_components, fixed_array_components)
array_lists = st.lists(array_components, min_size=1, max_size=6)
array_list_strs = array_lists.map(lambda x: ''.join(repr(list(i)) for i in x))
array_type_strs = st.tuples(non_array_type_strs, array_list_strs).map(join)
non_tuple_type_strs = st.one_of(non_array_type_strs, array_type_strs)
def join_tuple(xs):
if not isinstance(xs, list):
return xs
return '({})'.format(','.join(join_tuple(x) for x in xs))
tuple_type_strs = st.recursive(
st.lists(non_tuple_type_strs, min_size=0, max_size=10),
lambda this_strategy: st.lists(
st.one_of(non_tuple_type_strs, this_strategy),
min_size=0, max_size=10,
),
).map(join_tuple)
type_strs = st.one_of(non_tuple_type_strs, tuple_type_strs)
def guaranteed_permute(xs):
len_xs = len(xs)
indices = tuple(range(len_xs))
shuffled_indices = indices
while indices == shuffled_indices:
shuffled_indices = tuple(random.sample(indices, k=len_xs))
return tuple(xs[i] for i in shuffled_indices)
malformed_non_tuple_type_strs = st.tuples(
st.one_of(bare_type_strs, st.text()),
st.one_of(total_bits, fixed_size_strs),
array_list_strs,
).map(guaranteed_permute).map(join)
malformed_tuple_type_strs = st.recursive(
st.lists(malformed_non_tuple_type_strs, min_size=1, max_size=10),
lambda this_strategy: st.lists(
st.one_of(malformed_non_tuple_type_strs, this_strategy),
min_size=1, max_size=10,
),
max_leaves=5,
).map(join_tuple)
malformed_type_strs = st.one_of(
malformed_non_tuple_type_strs,
malformed_tuple_type_strs,
)
#################################
# Type string w/data strategies #
#################################
MIN_LIST_SIZE = 1
MAX_LIST_SIZE = 8
uint_total_bits = st.shared(total_bits, key='uint_total_bits')
uint_strs = uint_total_bits.map('uint{}'.format)
uint_values = uint_total_bits.flatmap(lambda n: st.integers(
min_value=0,
max_value=2 ** n - 1,
))
int_total_bits = st.shared(total_bits, key='int_total_bits')
int_strs = int_total_bits.map('int{}'.format)
int_values = int_total_bits.flatmap(lambda n: st.integers(
min_value=-2 ** (n - 1),
max_value=2 ** (n - 1) - 1,
))
ufixed_size_tuples = st.shared(fixed_sizes, key='ufixed_size_tuples')
ufixed_strs = ufixed_size_tuples.map(join_with_x).map('ufixed{}'.format)
ufixed_values = ufixed_size_tuples.flatmap(lambda sz: st.decimals(
min_value=0,
max_value=2 ** sz[0] - 1,
places=0,
).map(scale_places(sz[1])))
fixed_size_tuples = st.shared(fixed_sizes, key='fixed_size_tuples')
fixed_strs = fixed_size_tuples.map(join_with_x).map('fixed{}'.format)
fixed_values = fixed_size_tuples.flatmap(lambda sz: st.decimals(
min_value=-2 ** (sz[0] - 1),
max_value=2 ** (sz[0] - 1) - 1,
places=0,
).map(scale_places(sz[1])))
fixed_bytes_sizes = st.shared(bytes_sizes, key='fixed_bytes_sizes')
fixed_bytes_strs = fixed_bytes_sizes.map('bytes{}'.format)
fixed_bytes_values = fixed_bytes_sizes.flatmap(lambda n: st.binary(
min_size=n,
max_size=n,
))
address_strs = st.just('address')
address_values = st.binary(min_size=20, max_size=20).map(encode_hex)
bytes_strs_and_values = st.tuples(
st.just('bytes'),
st.binary(min_size=0, max_size=4096),
)
non_array = (
(uint_strs, uint_values),
(int_strs, int_values),
(ufixed_strs, ufixed_values),
(fixed_strs, fixed_values),
(fixed_bytes_strs, fixed_bytes_values),
(address_strs, address_values),
)
non_array_strs_values = st.one_of(*[
st.tuples(type_strs, type_values) for type_strs, type_values in non_array
])
num_unsized_elements = st.integers(min_value=0, max_value=MAX_LIST_SIZE)
unsized_array_strs_values = num_unsized_elements.flatmap(
lambda n: st.one_of([
st.tuples(
type_strs.map('{}[]'.format),
st.lists(type_values, min_size=n, max_size=n).map(tuple),
)
for type_strs, type_values in non_array
])
)
num_sized_elements = st.integers(min_value=MIN_LIST_SIZE, max_value=MAX_LIST_SIZE)
sized_array_strs_values = num_sized_elements.flatmap(
lambda n: st.one_of([
st.tuples(
type_strs.map(lambda ts: '{}[{}]'.format(ts, n)),
st.lists(type_values, min_size=n, max_size=n).map(tuple),
)
for type_strs, type_values in non_array
])
)
single_strs_values = st.one_of(
unsized_array_strs_values,
sized_array_strs_values,
non_array_strs_values,
bytes_strs_and_values,
)
def to_tuple(xs):
if not isinstance(xs, list):
return xs
return tuple(to_tuple(x) for x in xs)
def destructure_tuple_example(xs):
def _recur(xs, type_strs, values):
if not isinstance(xs, list):
type_strs.append(xs[0])
values.append(xs[1])
return
_type_strs = []
_values = []
for ys in xs:
_recur(ys, _type_strs, _values)
type_strs.append(_type_strs)
values.append(_values)
type_strs = []
values = []
_recur(xs, type_strs, values)
return join_tuple(type_strs[0]), to_tuple(values[0])
tuple_strs_values = st.recursive(
st.lists(single_strs_values, min_size=0, max_size=10),
lambda this_strategy: st.lists(
st.one_of(single_strs_values, this_strategy),
min_size=0, max_size=10,
),
).map(destructure_tuple_example)
def unzip_strs_values(strs_values):
type_strs, type_values = zip(*strs_values)
return tuple(type_strs), tuple(type_values)
multi_strs_values = st.lists(
single_strs_values,
min_size=1,
max_size=10,
).map(unzip_strs_values)
|
from typing import List, Union, Optional
import os
import requests
import re
import time
import pandas as pd
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from bs4 import BeautifulSoup
class PatentDownloader:
url = "https://patents.google.com"
def __init__(self, chrome_driver: str = 'chromedriver.exe', brave: bool = False, verbose: bool = False):
"""
Parameters
----------
chrome_driver : str
Path and file name of the Chrome driver exe. Default is "chromedriver.exe".
brave : bool, optional
Change Chrome application for Brave by passing 'brave'. Default is None.
"""
self.verbose = verbose # TODO: unused attribute?
self.driver_file = chrome_driver
self.option = None
if brave:
brave_path = brave_application_path()
self.option = webdriver.ChromeOptions()
self.option.binary_location = brave_path
self.option.add_argument("--incognito")
def download(self,
patent: Union[str, List[str]],
output_path: str = "./",
waiting_time: int = 6,
remove_kind_codes: Optional[List[str]] = None) -> None:
"""Download patent document(s) as PDF
Parameters
----------
patent : str, list[str]
Patent(s) to download.
Ether a string containing a patent number (e.g., US4405829A1, EP0551921B1),
or a list containing patent numbers,
or a string containing the path and file name (CSV,TXT). The CSV needs a column named `patent_number`.
output_path : str, optional
An output path where documents are saved. Default is "./".
waiting_time : int, optional
Waiting time in seconds for each request. The default is 6.
remove_kind_codes : list, optional
A list containing the patent kind codes which should be removed from patent numbers. Default is None.
Returns
-------
None.
Examples
----------
See https://github.com/lorenzbr/GooglePatentsPdfDownloader#readme
"""
try:
valid_path = os.path.isfile(patent)
except TypeError:
valid_path = False
if valid_path or isinstance(patent, list):
self.get_pdfs(
patents=patent,
output_path=output_path,
waiting_time=waiting_time,
remove_kind_codes=remove_kind_codes
)
else:
self.get_pdf(
patent=patent,
output_path=output_path,
waiting_time=waiting_time,
remove_kind_codes=remove_kind_codes
)
def get_pdf(self, patent: str, output_path: str = "./", waiting_time: int = 6,
remove_kind_codes: Optional[List[str]] = None) -> None:
if remove_kind_codes:
for remove_kind_code in remove_kind_codes:
patent = re.sub(remove_kind_code + "$", "", patent)
# get selenium started and open url
if self.option:
driver = webdriver.Chrome(executable_path=self.driver_file, options=self.option)
else:
driver = webdriver.Chrome(executable_path=self.driver_file)
driver.get(self.url)
element = driver.find_element_by_name('q')
element.send_keys(patent)
element.send_keys(Keys.RETURN)
time.sleep(waiting_time) # wait X secs
raw_html = driver.page_source # get html code for page with expanded tree of recommendations
driver.quit() # close driver
# parse html code from that webpage
soup = BeautifulSoup(raw_html, 'html.parser')
pdf_link = self.get_pdf_link(soup, patent)
if pdf_link:
path_prefix = os.path.abspath(output_path)
validate_directory(path_prefix)
patent_file = requests.get(pdf_link)
with open(os.path.join(path_prefix, f'{patent}.pdf'), 'wb') as pdf_file:
pdf_file.write(patent_file.content)
print(f'>>> Patent {patent} successfully downloaded <<<') # print statement
else:
pass
def get_pdfs(self, patents: Union[List[str], str], output_path: str = "./", waiting_time: int = 6,
remove_kind_codes: Optional[List[str]] = None) -> None:
if isinstance(patents, str):
if patents.lower().endswith('csv'):
df_patents = pd.read_csv(patents)
patents = df_patents['patent_number'].to_list()
elif patents.lower().endswith('txt'):
with open(patents, 'r') as txt_file:
patents = txt_file.read().splitlines()
else:
raise NotImplementedError(f'Unsupported file type: {patents}')
for i, patent in enumerate(patents):
print(len(patents) - i, "patent(s) remaining.")
self.get_pdf(
patent=patent,
output_path=output_path,
waiting_time=waiting_time,
remove_kind_codes=remove_kind_codes
)
@staticmethod
def get_pdf_link(soup: BeautifulSoup, patent: str):
pdfs: List[str] = [link['href'] for link in soup.find_all('a', href=True)
if link['href'].lower().endswith('pdf')]
for pdf in pdfs:
if patent.lower() in pdf.lower(): # TODO: ignore/remove kind code?
return pdf # return first matching pdf link
else:
continue
print(f'Error: Download link for patent {patent} not found!')
return None
def brave_application_path() -> str:
win_paths = [
r'C:\Program Files\BraveSoftware\Brave-Browser\Application\brave.exe',
r'C:\Program Files (x86)\BraveSoftware\Brave-Browser\Application\brave.exe',
]
# TODO: add paths?
for win_path in win_paths:
if os.path.isfile(win_path):
return win_path
else:
pass
raise FileNotFoundError
def validate_directory(directory: str) -> None:
if os.path.isdir(directory):
return None
os.mkdir(directory) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from bowtie.visual import Plotly, SmartGrid
from bowtie.control import Slider, Number, Button
from bowtie import cache, command, Pager
import numpy as np
import plotlywrapper as pw
import plotly.graph_objs as go
pager = Pager()
#
# Initialize our values
#
# This prevents an empty page from loading
#
def initialize():
r1v1_listener(5)
r1v2_listener(0.5)
r2v1_listener(500)
#
# Scheduled function
#
def timed_event():
ticker = cache.load('scheduled')
if ticker is None:
ticker = -1
ticker += 1
r2v2_listener(ticker)
cache.save('scheduled',ticker)
def page_event():
pager.notify()
#
# Reset Button
#
reset_button = Button(label='RESET')
#
# Row 1
#
r1v1 = Plotly()
r1v2 = Plotly()
r1v1_controller = Slider(caption='Row 1 - Left', minimum=1, maximum=10, start=5, step=1)
r1v2_controller = Slider(caption='Row 1 - Right', minimum=0.1, maximum=1, start=0.5, step=0.1)
def r1v1_listener(freq):
freq = float(freq)
t = np.linspace(0, 10, 100)
r1v1.do_all(pw.line(t, np.sin(freq * t)).to_json())
def r1v2_listener(freq):
freq = float(freq)
t = np.linspace(0, 10, 100)
r1v2.do_all(pw.line(t, np.sin(freq * t)).to_json())
#
# Row 2
#
r2v1 = Plotly()
r2v2 = Plotly()
r2v1_controller = Number(caption='Row 2 - Left', minimum=100, maximum=1000, start=500, step=100)
def r2v1_listener(n):
random_x = np.random.randn(n)
random_y = np.random.randn(n)
trace = go.Scatter(
x = random_x,
y = random_y,
mode = 'markers'
)
fig = { "data": [trace] }
r2v1.do_all(fig)
def r2v2_listener(n):
time_limit = 60
if n > time_limit:
n = time_limit
fig = {
"data": [
{
"values": [n, time_limit-n],
"marker": {"colors": ["white", "green"]},
"textposition":"inside",
"name": "Timer",
"hoverinfo":"none",
"hole": .9,
"showlegend": False,
"textinfo": "none",
"sort": False,
"direction": "clockwise",
"type": "pie"
}],
"layout": {
"title":"Remaining Time",
"annotations": [
{
"font": {
"size": 100
},
"showarrow": False,
"text": str(time_limit-n)
}
]
}
}
r2v2.do_all(fig)
#
# Row 3
#
r3v1 = SmartGrid()
r3v2 = SmartGrid()
def r3v1_listener(a,b,c):
table_data = r3v1.get()
table_data.append({"Control 1": a, "Control 2": b, "Control 3": c})
r3v1.do_update(table_data)
def r3v2_listener(selected_data):
r3v2.do_update(selected_data['points'])
#
# Bowtie section
#
@command
def construct(path):
from bowtie import Layout
description = """
First Bowtie app
===========
Learning Bowtie is fun!
"""
layout = Layout(rows=3,columns=12,description=description,background_color='PaleTurquoise',directory=path,debug=True)
# Schedule a task
# You must edit server.py manually after build for this to work
layout.schedule(1,page_event) # Edit server.py ->socketio.run(app, host=host, port=port, use_reloader=False)
layout.respond(pager,timed_event)
# Add controllers to the sidebar
layout.add_sidebar(reset_button)
layout.add_sidebar(r1v1_controller)
layout.add_sidebar(r1v2_controller)
layout.add_sidebar(r2v1_controller)
# Add the visuals
layout.add(r1v1,row_start=0,row_end=0,column_start=0,column_end=5)
layout.add(r1v2,row_start=0,row_end=0,column_start=6,column_end=11)
layout.add(r2v1,row_start=1,row_end=1,column_start=0,column_end=5)
layout.add(r2v2,row_start=1,row_end=1,column_start=6,column_end=11)
layout.add(r3v1,row_start=2,row_end=2,column_start=0,column_end=5)
layout.add(r3v2,row_start=2,row_end=2,column_start=6,column_end=11)
# Reaction tasks
layout.subscribe(initialize, reset_button.on_click)
layout.subscribe(r1v1_listener, r1v1_controller.on_change) # Continuously changes while adjusting
layout.subscribe(r1v2_listener, r1v2_controller.on_after_change) # Only changes after adjustment
layout.subscribe(r2v1_listener, r2v1_controller.on_change)
layout.subscribe(r3v1_listener, r1v1_controller.on_after_change, r1v2_controller.on_after_change, r2v1_controller.on_change)
layout.subscribe(r3v2_listener, r2v1.on_select)
# Initialize the app on page load
layout.load(initialize)
# Build the app
layout.build() |
import random
import os
import json
import pandas as pd
import numpy as np
import svgwrite
from IPython.display import display, HTML
here = os.path.abspath(os.path.dirname(__file__))
class RelationExtractionVisualizer:
def __init__(self):
self.color_dict = {
"overlap" : "lightsalmon",
"before" : "deepskyblue",
"after" : "springgreen",
"trip": "lightsalmon",
"trwp": "deepskyblue",
"trcp": "springgreen",
"trap": "gold",
"trnap": "maroon",
"terp": "purple",
"tecp": "tomato",
"pip" : "slategray",
"drug-strength" : "purple",
"drug-frequency": "slategray",
"drug-form" : "deepskyblue",
"dosage-drug" : "springgreen",
"strength-drug": "maroon",
"drug-dosage" : "gold"
}
def __get_color(self, l):
r = lambda: random.randint(100,255)
return '#%02X%02X%02X' % (r(), r(), r())
def __size(self, text):
return ((len(text)+1)*9.7)-5
def __draw_line(self, dwg, s_x , s_y, e_x, e_y, d_type, color, show_relations):
# find the a & b points
def get_bezier_coef(points):
# since the formulas work given that we have n+1 points
# then n must be this:
n = len(points) - 1
# build coefficents matrix
C = 4 * np.identity(n)
np.fill_diagonal(C[1:], 1)
np.fill_diagonal(C[:, 1:], 1)
C[0, 0] = 2
C[n - 1, n - 1] = 7
C[n - 1, n - 2] = 2
# build points vector
P = [2 * (2 * points[i] + points[i + 1]) for i in range(n)]
P[0] = points[0] + 2 * points[1]
P[n - 1] = 8 * points[n - 1] + points[n]
# solve system, find a & b
A = np.linalg.solve(C, P)
B = [0] * n
for i in range(n - 1):
B[i] = 2 * points[i + 1] - A[i + 1]
B[n - 1] = (A[n - 1] + points[n]) / 2
return A, B
# returns the general Bezier cubic formula given 4 control points
def get_cubic(a, b, c, d):
return lambda t: np.power(1 - t, 3) * a + 3 * np.power(1 - t, 2) * t * b + 3 * (1 - t) * np.power(t, 2) * c + np.power(t, 3) * d
# return one cubic curve for each consecutive points
def get_bezier_cubic(points):
A, B = get_bezier_coef(points)
return [
get_cubic(points[i], A[i], B[i], points[i + 1])
for i in range(len(points) - 1)
]
# evalute each cubic curve on the range [0, 1] sliced in n points
def evaluate_bezier(points, n):
curves = get_bezier_cubic(points)
return np.array([fun(t) for fun in curves for t in np.linspace(0, 1, n)])
def draw_pointer(dwg, s_x, s_y, e_x, e_y):
size = 8
ratio = 2
fullness1 = 2
fullness2 = 3
bx = e_x
ax = s_x
by = e_y
ay = s_y
abx = bx - ax
aby = by - ay
ab = np.sqrt(abx * abx + aby * aby)
cx = bx - size * abx / ab
cy = by - size * aby / ab
dx = cx + (by - cy) / ratio
dy = cy + (cx - bx) / ratio
ex = cx - (by - cy) / ratio
ey = cy - (cx - bx) / ratio
fx = (fullness1 * cx + bx) / fullness2
fy = (fullness1 * cy + by) / fullness2
text_place_y = s_y-(abs(s_y-e_y)/2)
line = dwg.add(dwg.polyline(
[
(bx, by),
(dx, dy),
(fx, fy),
(ex, ey),
(bx, by)
],
stroke=color, stroke_width = "2", fill='none',))
return text_place_y
if s_x > e_x:
#s_x -= 5
e_x += 10
else:
#s_x += 5
e_x -= 2
if s_y == e_y:
s_y -= 20
e_y = s_y-4#55
text_place_y = s_y-45
pth = evaluate_bezier(np.array([[s_x, s_y],
[(s_x+e_x)/2.0, s_y-40],
[e_x,e_y]]), 50)
dwg.add(dwg.polyline(pth,
stroke=color, stroke_width = "2", fill='none',))
draw_pointer(dwg, (s_x+e_x)/2.0, s_y-50, e_x, e_y)
elif s_y >= e_y:
e_y +=15
s_y-=20
dwg.add(dwg.polyline([(s_x,s_y), (e_x, e_y)],
stroke=color, stroke_width = "2", fill='none',))
text_place_y = draw_pointer(dwg, s_x, s_y, e_x, e_y)
else:
s_y-=5
e_y -= 40
dwg.add(dwg.polyline([(s_x,s_y), (e_x, e_y)],
stroke=color, stroke_width = "2", fill='none',))
text_place_y = draw_pointer(dwg, s_x, s_y, e_x, e_y)
if show_relations:
dwg.add(dwg.text(d_type, insert=(((s_x+e_x)/2)-(self.__size(d_type)/2.75), text_place_y),
fill=color, font_size='12', font_family='courier'))
def __gen_graph(self, rdf, selected_text, show_relations):
done_ent1 = {}
done_ent2 = {}
all_done = {}
start_y = 75
x_limit = 920
y_offset = 100
dwg = svgwrite.Drawing("temp.svg",profile='tiny', size = (x_limit, len(selected_text) * 1.1 + len(rdf)*20))
begin_index = 0
start_x = 10
this_line = 0
all_entities_index = set()
all_entities_1_index = []
basic_dict = {}
relation_dict = {}
for t in rdf:
if t.result.lower().strip() != 'o':
all_entities_index.add(int(t.metadata['entity1_begin']))
all_entities_index.add(int(t.metadata['entity2_begin']))
basic_dict[int(t.metadata['entity1_begin'])] = [t.metadata['entity1_begin'],
t.metadata['entity1_end'],
t.metadata['chunk1'],
t.metadata['entity1']]
basic_dict[int(t.metadata['entity2_begin'])] = [t.metadata['entity2_begin'],
t.metadata['entity2_end'],
t.metadata['chunk2'],
t.metadata['entity2']]
#all_entities_1_index.append(t[4]['entity1_begin'])
all_entities_index = np.asarray(list(all_entities_index))
all_entities_index = all_entities_index[np.argsort(all_entities_index)]
for ent_start_ind in all_entities_index:
e_start_now, e_end_now, e_chunk_now, e_entity_now = basic_dict[ent_start_ind]
prev_text = selected_text[begin_index:int(e_start_now)]
begin_index = int(e_end_now)+1
for word_ in prev_text.split(' '):
this_size = self.__size(word_)
if (start_x + this_size + 10) >= x_limit:
start_y += y_offset
start_x = 10
this_line = 0
dwg.add(dwg.text(word_, insert=(start_x, start_y ), fill='gray', font_size='16', font_family='courier'))
start_x += this_size + 5
this_size = self.__size(e_chunk_now)
if (start_x + this_size + 10)>= x_limit:# or this_line >= 2:
start_y += y_offset
start_x = 10
this_line = 0
#chunk1
dwg.add(dwg.text(e_chunk_now, insert=(start_x, start_y ), fill='gray', font_size='16', font_family='courier'))
#rectange chunk 1
dwg.add(dwg.rect(insert=(start_x-3, start_y-18), size=(this_size,25),
rx=2, ry=2, stroke='orange',
stroke_width='2', fill='none'))
#entity 1
central_point_x = start_x+(this_size/2)
dwg.add(dwg.text(e_entity_now,
insert=(central_point_x-(self.__size(e_entity_now)/2.75), start_y+20),
fill='slateblue', font_size='12', font_family='courier'))
all_done[int(e_start_now)] = [central_point_x-(self.__size(e_entity_now)/2.75), start_y]
start_x += this_size + 10
this_line += 1
#all_done[ent_start_ind] =
prev_text = selected_text[begin_index:]
for word_ in prev_text.split(' '):
this_size = self.__size(word_)
if (start_x + this_size)>= x_limit:
start_y += y_offset
start_x = 10
dwg.add(dwg.text(word_, insert=(start_x, start_y ), fill='gray', font_size='16', font_family='courier'))
start_x += this_size
for row in rdf:
if row.result.lower().strip() != 'o':
if row.result.lower().strip() not in self.color_dict:
self.color_dict[row.result.lower().strip()] = self.__get_color(row.result.lower().strip())
d_key2 = all_done[int(row.metadata['entity2_begin'])]
d_key1 = all_done[int(row.metadata['entity1_begin'])]
self.__draw_line(dwg, d_key2[0] , d_key2[1], d_key1[0], d_key1[1],
row.result,self.color_dict[row.result.lower().strip()], show_relations)
return dwg.tostring()
def display(self, result, relation_col, document_col='document', show_relations=True):
original_text = result[document_col][0].result
res = result[relation_col]
return display(HTML(self.__gen_graph(res, original_text, show_relations)))
|
# Copyright 2012 by <NAME>. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Bio.SearchIO parser for BLAST+ XML output formats."""
# for more info: http://www.ncbi.nlm.nih.gov/dtd/NCBI_BlastOutput.mod.dtd
import sys
import re
import warnings
from itertools import chain
from xml.sax.saxutils import XMLGenerator, escape
from Bio import BiopythonParserWarning
# For speed try to use cElementTree rather than ElementTree
try:
if (3, 0) <= sys.version_info[:2] <= (3, 1):
# Workaround for bug in python 3.0 and 3.1,
# see http://bugs.python.org/issue9257
from xml.etree import ElementTree as ElementTree
else:
from xml.etree import cElementTree as ElementTree
except ImportError:
from xml.etree import ElementTree as ElementTree
from Bio._py3k import _as_bytes, _bytes_to_string, unicode
_empty_bytes_string = _as_bytes("")
from Bio.Alphabet import generic_dna, generic_protein
from Bio.SearchIO._index import SearchIndexer
from Bio.SearchIO._model import QueryResult, Hit, HSP, HSPFragment
__all__ = ['BlastXmlParser', 'BlastXmlIndexer', 'BlastXmlWriter']
__docformat__ = "restructuredtext en"
# element - optional qresult attribute name mapping
_ELEM_QRESULT_OPT = {
'Statistics_db-num': ('stat_db_num', int),
'Statistics_db-len': ('stat_db_len', int),
'Statistics_eff-space': ('stat_eff_space', float),
'Statistics_hsp-len': ('stat_hsp_len', int),
'Statistics_kappa': ('stat_kappa', float),
'Statistics_lambda': ('stat_lambda', float),
'Statistics_entropy': ('stat_entropy', float),
}
# element - hit attribute name mapping
_ELEM_HIT = {
# 'Hit_def': ('description', str), # not set by this dict
'Hit_accession': ('accession', str),
'Hit_len': ('seq_len', int),
}
# element - hsp attribute name mapping
_ELEM_HSP = {
'Hsp_bit-score': ('bitscore', float),
'Hsp_score': ('bitscore_raw', int),
'Hsp_evalue': ('evalue', float),
'Hsp_identity': ('ident_num', int),
'Hsp_positive': ('pos_num', int),
'Hsp_gaps': ('gap_num', int),
'Hsp_density': ('density', float),
}
# element - fragment attribute name mapping
_ELEM_FRAG = {
'Hsp_query-from': ('query_start', int),
'Hsp_query-to': ('query_end', int),
'Hsp_hit-from': ('hit_start', int),
'Hsp_hit-to': ('hit_end', int),
'Hsp_query-frame': ('query_frame', int),
'Hsp_hit-frame': ('hit_frame', int),
'Hsp_align-len': ('aln_span', int),
'Hsp_pattern-from': ('pattern_start', int),
'Hsp_pattern-to': ('pattern_end', int),
'Hsp_hseq': ('hit', str),
'Hsp_qseq': ('query', str),
}
# dictionary for mapping tag name and meta key name
_ELEM_META = {
'BlastOutput_db': ('target', str),
'BlastOutput_program': ('program', str),
'BlastOutput_version': ('version', str),
'BlastOutput_reference': ('reference', str),
'Parameters_expect': ('param_evalue_threshold', float),
'Parameters_entrez-query': ('param_entrez_query', str),
'Parameters_filter': ('param_filter', str),
'Parameters_gap-extend': ('param_gap_extend', int),
'Parameters_gap-open': ('param_gap_open', int),
'Parameters_include': ('param_include', str),
'Parameters_matrix': ('param_matrix', str),
'Parameters_pattern': ('param_pattern', str),
'Parameters_sc-match': ('param_score_match', int),
'Parameters_sc-mismatch': ('param_score_mismatch', int),
}
# these are fallback tags that store information on the first query
# outside the <Iteration> tag
# only used if query_{ID,def,len} is not found in <Iteration>
# (seen in legacy Blast <2.2.14)
_ELEM_QRESULT_FALLBACK = {
'BlastOutput_query-ID': ('id', str),
'BlastOutput_query-def': ('description', str),
'BlastOutput_query-len': ('len', str),
}
# element-attribute maps, for writing
_WRITE_MAPS = {
'preamble': (
('program', 'program'),
('version', 'version'),
('reference', 'reference'),
('db', 'target'),
('query-ID', 'id'),
('query-def', 'description'),
('query-len', 'seq_len'),
('param', None),
),
'param': (
('matrix', 'param_matrix'),
('expect', 'param_evalue_threshold'),
('sc-match', 'param_score_match'),
('sc-mismatch', 'param_score_mismatch'),
('gap-open', 'param_gap_open'),
('gap-extend', 'param_gap_extend'),
('filter', 'param_filter'),
('pattern', 'param_pattern'),
('entrez-query', 'param_entrez_query'),
),
'qresult': (
('query-ID', 'id'),
('query-def', 'description'),
('query-len', 'seq_len'),
),
'stat': (
('db-num', 'stat_db_num'),
('db-len', 'stat_db_len'),
('hsp-len', 'stat_hsp_len'),
('eff-space', 'stat_eff_space'),
('kappa', 'stat_kappa'),
('lambda', 'stat_lambda'),
('entropy', 'stat_entropy'),
),
'hit': (
('id', 'id'),
('def', 'description'),
('accession', 'accession'),
('len', 'seq_len'),
),
'hsp': (
('bit-score', 'bitscore'),
('score', 'bitscore_raw'),
('evalue', 'evalue'),
('query-from', 'query_start'),
('query-to', 'query_end'),
('hit-from', 'hit_start'),
('hit-to', 'hit_end'),
('pattern-from', 'pattern_start'),
('pattern-to', 'pattern_end'),
('query-frame', 'query_frame'),
('hit-frame', 'hit_frame'),
('identity', 'ident_num'),
('positive', 'pos_num'),
('gaps', 'gap_num'),
('align-len', 'aln_span'),
('density', 'density'),
('qseq', 'query'),
('hseq', 'hit'),
('midline', None),
),
}
# optional elements, based on the DTD
_DTD_OPT = (
'BlastOutput_query-seq', 'BlastOutput_mbstat', 'Iteration_query-def',
'Iteration_query-len', 'Iteration-hits', 'Iteration_stat',
'Iteration_message', 'Parameters_matrix', 'Parameters_include',
'Parameters_sc-match', 'Parameters_sc-mismatch', 'Parameters_filter',
'Parameters_pattern', 'Parameters_entrez-query', 'Hit_hsps',
'Hsp_pattern-from', 'Hsp_pattern-to', 'Hsp_query-frame', 'Hsp_hit-frame',
'Hsp_identity', 'Hsp_positive', 'Hsp_gaps', 'Hsp_align-len', 'Hsp_density',
'Hsp_midline',
)
# compile RE patterns
# for capturing BLAST version
_RE_VERSION = re.compile(r'\d+\.\d+\.\d+\+?')
# for splitting ID-description pairs
_RE_ID_DESC_PAIRS_PATTERN = re.compile(" +>")
# for splitting ID and description (must be used with maxsplit = 1)
_RE_ID_DESC_PATTERN = re.compile(" +")
def _extract_ids_and_descs(concat_str):
# Given a string space-separate string of IDs and descriptions,
# return a list of tuples, each tuple containing an ID and
# a description string (which may be empty)
# create a list of lists, each list containing an ID and description
# or just an ID, if description is not present
id_desc_pairs = [re.split(_RE_ID_DESC_PATTERN, x, 1) \
for x in re.split(_RE_ID_DESC_PAIRS_PATTERN, concat_str)]
# make sure empty descriptions are added as empty strings
# also, we return lists for compatibility reasons between Py2 and Py3
add_descs = lambda x: x if len(x) == 2 else x + [""]
return [pair for pair in map(add_descs, id_desc_pairs)]
class BlastXmlParser(object):
"""Parser for the BLAST XML format"""
def __init__(self, handle):
self.xml_iter = iter(ElementTree.iterparse(handle, events=('start', 'end')))
self._meta, self._fallback = self._parse_preamble()
def __iter__(self):
for qresult in self._parse_qresult():
yield qresult
def _parse_preamble(self):
"""Parses all tag data prior to the first query result."""
# dictionary for containing all information prior to the first query
meta = {}
# dictionary for fallback information
fallback = {}
# parse the preamble part (anything prior to the first result)
for event, elem in self.xml_iter:
# get the tag values, cast appropriately, store into meta
if event == 'end' and elem.tag in _ELEM_META:
attr_name, caster = _ELEM_META[elem.tag]
if caster is not str:
meta[attr_name] = caster(elem.text)
else:
meta[attr_name] = elem.text
# delete element after we finish parsing it
elem.clear()
continue
# capture fallback values
# these are used only if the first <Iteration> does not have any
# ID, ref, or len.
elif event == 'end' and elem.tag in _ELEM_QRESULT_FALLBACK:
attr_name, caster = _ELEM_QRESULT_FALLBACK[elem.tag]
if caster is not str:
fallback[attr_name] = caster(elem.text)
else:
fallback[attr_name] = elem.text
elem.clear()
continue
if event == 'start' and elem.tag == 'Iteration':
break
# we only want the version number, sans the program name or date
if meta.get('version') is not None:
meta['version'] = re.search(_RE_VERSION,
meta['version']).group(0)
return meta, fallback
def _parse_qresult(self):
"""Parses query results."""
# parse the queries
for event, qresult_elem in self.xml_iter:
# </Iteration> marks the end of a single query
# which means we can process it
if event == 'end' and qresult_elem.tag == 'Iteration':
# we'll use the following schema
# <!ELEMENT Iteration (
# Iteration_iter-num,
# Iteration_query-ID?,
# Iteration_query-def?,
# Iteration_query-len?,
# Iteration_hits?,
# Iteration_stat?,
# Iteration_message?)>
# assign query attributes with fallbacks
query_id = qresult_elem.findtext('Iteration_query-ID')
if query_id is None:
query_id = self._fallback['id']
query_desc = qresult_elem.findtext('Iteration_query-def')
if query_desc is None:
query_desc = self._fallback['description']
query_len = qresult_elem.findtext('Iteration_query-len')
if query_len is None:
query_len = self._fallback['len']
# handle blast searches against databases with Blast's IDs
# 'Query_' marks the beginning of a BLAST+-generated ID,
# 'lcl|' marks the beginning of a BLAST legacy-generated ID
if query_id.startswith('Query_') or query_id.startswith('lcl|'):
# store the Blast-generated query ID
blast_query_id = query_id
id_desc = query_desc.split(' ', 1)
query_id = id_desc[0]
try:
query_desc = id_desc[1]
except IndexError:
query_desc = ''
else:
blast_query_id = ''
hit_list, key_list = [], []
for hit in self._parse_hit(qresult_elem.find('Iteration_hits'),
query_id):
if hit:
# need to keep track of hit IDs, since there could be duplicates,
if hit.id in key_list:
warnings.warn("Adding hit with BLAST-generated ID "
"%r since hit ID %r is already present "
"in query %r. Your BLAST database may contain "
"duplicate entries." %
(hit._blast_id, hit.id, query_id), BiopythonParserWarning)
# fallback to Blast-generated IDs, if the ID is already present
# and restore the desc, too
hit.description = '%s %s' % (hit.id, hit.description)
hit.id = hit._blast_id
# and change the hit_id of the HSPs contained
for hsp in hit:
hsp.hit_id = hit._blast_id
else:
key_list.append(hit.id)
hit_list.append(hit)
# create qresult and assign its attributes
qresult = QueryResult(hit_list, query_id)
qresult.description = query_desc
qresult.seq_len = int(query_len)
qresult._blast_id = blast_query_id
for key, value in self._meta.items():
setattr(qresult, key, value)
# statistics are stored in Iteration_stat's 'grandchildren' with the
# following DTD
# <!ELEMENT Statistics (
# Statistics_db-num,
# Statistics_db-len,
# Statistics_hsp-len,
# Statistics_eff-space,
# Statistics_kappa,
# Statistics_lambda,
# Statistics_entropy)>
stat_iter_elem = qresult_elem.find('Iteration_stat')
if stat_iter_elem is not None:
stat_elem = stat_iter_elem.find('Statistics')
for key, val_info in _ELEM_QRESULT_OPT.items():
value = stat_elem.findtext(key)
if value is not None:
caster = val_info[1]
# recast only if value is not intended to be str
if value is not None and caster is not str:
value = caster(value)
setattr(qresult, val_info[0], value)
# delete element after we finish parsing it
qresult_elem.clear()
yield qresult
def _parse_hit(self, root_hit_elem, query_id):
"""Generator that transforms Iteration_hits XML elements into Hit objects.
:param root_hit_elem: root element of the Iteration_hits tag.
:type root_hit_elem: XML element tag
:param query_id: QueryResult ID of this Hit
:type query_id: string
"""
# Hit level processing
# Hits are stored in the Iteration_hits tag, with the following
# DTD
# <!ELEMENT Hit (
# Hit_num,
# Hit_id,
# Hit_def,
# Hit_accession,
# Hit_len,
# Hit_hsps?)>
# feed the loop below an empty list so iteration still works
if root_hit_elem is None:
root_hit_elem = []
for hit_elem in root_hit_elem:
# create empty hit object
hit_id = hit_elem.findtext('Hit_id')
hit_desc = hit_elem.findtext('Hit_def')
# handle blast searches against databases with Blast's IDs
if hit_id.startswith('gnl|BL_ORD_ID|'):
blast_hit_id = hit_id
id_desc = hit_desc.split(' ', 1)
hit_id = id_desc[0]
try:
hit_desc = id_desc[1]
except IndexError:
hit_desc = ''
else:
blast_hit_id = ''
# combine primary ID and defline first before splitting
full_id_desc = hit_id + ' ' + hit_desc
id_descs = _extract_ids_and_descs(full_id_desc)
hit_id, hit_desc = id_descs[0]
hsps = [hsp for hsp in
self._parse_hsp(hit_elem.find('Hit_hsps'),
query_id, hit_id)]
hit = Hit(hsps)
hit.description = hit_desc
hit._id_alt = [x[0] for x in id_descs[1:]]
hit._description_alt = [x[1] for x in id_descs[1:]]
# blast_hit_id is only set if the hit ID is Blast-generated
hit._blast_id = blast_hit_id
for key, val_info in _ELEM_HIT.items():
value = hit_elem.findtext(key)
if value is not None:
caster = val_info[1]
# recast only if value is not intended to be str
if value is not None and caster is not str:
value = caster(value)
setattr(hit, val_info[0], value)
# delete element after we finish parsing it
hit_elem.clear()
yield hit
def _parse_hsp(self, root_hsp_frag_elem, query_id, hit_id):
"""Iterator that transforms Hit_hsps XML elements into HSP objects.
:param root_hsp_frag_elem: the ``Hit_hsps`` tag
:type root_hsp_frag_elem: XML element tag
:param query_id: query ID
:type query_id: string
:param hit_id: hit ID
:type hit_id: string
"""
# Hit_hsps DTD:
# <!ELEMENT Hsp (
# Hsp_num,
# Hsp_bit-score,
# Hsp_score,
# Hsp_evalue,
# Hsp_query-from,
# Hsp_query-to,
# Hsp_hit-from,
# Hsp_hit-to,
# Hsp_pattern-from?,
# Hsp_pattern-to?,
# Hsp_query-frame?,
# Hsp_hit-frame?,
# Hsp_identity?,
# Hsp_positive?,
# Hsp_gaps?,
# Hsp_align-len?,
# Hsp_density?,
# Hsp_qseq,
# Hsp_hseq,
# Hsp_midline?)>
# if value is None, feed the loop below an empty list
if root_hsp_frag_elem is None:
root_hsp_frag_elem = []
for hsp_frag_elem in root_hsp_frag_elem:
coords = {} # temporary container for coordinates
frag = HSPFragment(hit_id, query_id)
for key, val_info in _ELEM_FRAG.items():
value = hsp_frag_elem.findtext(key)
caster = val_info[1]
# adjust 'from' and 'to' coordinates to 0-based ones
if value is not None:
if key.endswith('-from') or key.endswith('-to'):
# store coordinates for further processing
coords[val_info[0]] = caster(value)
continue
# recast only if value is not intended to be str
elif caster is not str:
value = caster(value)
setattr(frag, val_info[0], value)
# set the similarity characters into aln_annotation dict
frag.aln_annotation['similarity'] = \
hsp_frag_elem.findtext('Hsp_midline')
# process coordinates
# since 'x-from' could be bigger than 'x-to', we need to figure
# out which one is smaller/bigger since 'x_start' is always smaller
# than 'x_end'
for coord_type in ('query', 'hit', 'pattern'):
start_type = coord_type + '_start'
end_type = coord_type + '_end'
try:
start = coords[start_type]
end = coords[end_type]
except KeyError:
continue
else:
# convert to python range and setattr
setattr(frag, start_type, min(start, end) - 1)
setattr(frag, end_type, max(start, end))
# set alphabet, based on program
prog = self._meta.get('program')
if prog == 'blastn':
frag.alphabet = generic_dna
elif prog in ['blastp', 'blastx', 'tblastn', 'tblastx']:
frag.alphabet = generic_protein
hsp = HSP([frag])
for key, val_info in _ELEM_HSP.items():
value = hsp_frag_elem.findtext(key)
caster = val_info[1]
if value is not None:
if caster is not str:
value = caster(value)
setattr(hsp, val_info[0], value)
# delete element after we finish parsing it
hsp_frag_elem.clear()
yield hsp
class BlastXmlIndexer(SearchIndexer):
"""Indexer class for BLAST XML output."""
_parser = BlastXmlParser
qstart_mark = _as_bytes('<Iteration>')
qend_mark = _as_bytes('</Iteration>')
block_size = 16384
def __init__(self, filename):
SearchIndexer.__init__(self, filename)
# TODO: better way to do this?
iter_obj = self._parser(self._handle)
self._meta, self._fallback = iter_obj._meta, iter_obj._fallback
def __iter__(self):
qstart_mark = self.qstart_mark
qend_mark = self.qend_mark
blast_id_mark = _as_bytes('Query_')
block_size = self.block_size
handle = self._handle
handle.seek(0)
re_desc = re.compile(_as_bytes(r'<Iteration_query-ID>(.*?)'
'</Iteration_query-ID>\s+?<Iteration_query-def>'
'(.*?)</Iteration_query-def>'))
re_desc_end = re.compile(_as_bytes(r'</Iteration_query-def>'))
counter = 0
while True:
start_offset = handle.tell()
line = handle.readline()
if not line:
break
if qstart_mark not in line:
continue
# The following requirements are to make supporting BGZF compressed
# BLAST XML files simpler (avoids complex offset manipulations):
assert line.count(qstart_mark) == 1, "XML without line breaks?"
assert line.lstrip().startswith(qstart_mark), line
if qend_mark in line:
# Should cope with <Iteration>...</Iteration> on one long line
block = line
else:
# Load the rest of this block up to and including </Iteration>
block = [line]
while line and qend_mark not in line:
line = handle.readline()
assert qstart_mark not in line, line
block.append(line)
assert line.rstrip().endswith(qend_mark), line
block = _empty_bytes_string.join(block)
assert block.count(qstart_mark) == 1, "XML without line breaks? %r" % block
assert block.count(qend_mark) == 1, "XML without line breaks? %r" % block
# Now we have a full <Iteration>...</Iteration> block, find the ID
regx = re.search(re_desc, block)
try:
qstart_desc = regx.group(2)
qstart_id = regx.group(1)
except AttributeError:
# use the fallback values
assert re.search(re_desc_end, block)
qstart_desc = _as_bytes(self._fallback['description'])
qstart_id = _as_bytes(self._fallback['id'])
if qstart_id.startswith(blast_id_mark):
qstart_id = qstart_desc.split(_as_bytes(' '), 1)[0]
yield _bytes_to_string(qstart_id), start_offset, len(block)
counter += 1
def _parse(self, handle):
# overwrites SearchIndexer._parse, since we need to set the meta and
# fallback dictionaries to the parser
generator = self._parser(handle, **self._kwargs)
generator._meta = self._meta
generator._fallback = self._fallback
return next(iter(generator))
def get_raw(self, offset):
qend_mark = self.qend_mark
handle = self._handle
handle.seek(offset)
qresult_raw = handle.readline()
assert qresult_raw.lstrip().startswith(self.qstart_mark)
while qend_mark not in qresult_raw:
qresult_raw += handle.readline()
assert qresult_raw.rstrip().endswith(qend_mark)
assert qresult_raw.count(qend_mark) == 1
# Note this will include any leading and trailing whitespace, in
# general expecting " <Iteration>\n...\n </Iteration>\n"
return qresult_raw
class _BlastXmlGenerator(XMLGenerator):
"""Event-based XML Generator."""
def __init__(self, out, encoding='utf-8', indent=" ", increment=2):
XMLGenerator.__init__(self, out, encoding)
# the indentation character
self._indent = indent
# nest level
self._level = 0
# how many indentation character should we increment per level
self._increment = increment
# container for names of tags with children
self._parent_stack = []
# determine writer method
try:
# this should work for all platforms except Jython
self.write = self._write
except AttributeError:
# Jython uses self._out.write
self.write = self._out.write
def startDocument(self):
"""Starts the XML document."""
self.write(u'<?xml version="1.0"?>\n'
'<!DOCTYPE BlastOutput PUBLIC "-//NCBI//NCBI BlastOutput/EN" '
'"http://www.ncbi.nlm.nih.gov/dtd/NCBI_BlastOutput.dtd">\n')
def startElement(self, name, attrs={}, children=False):
"""Starts an XML element.
:param name: element name
:type name: string
:param attrs: element attributes
:type attrs: dictionary {string: object}
:param children: whether the element has children or not
:type children: bool
"""
self.ignorableWhitespace(self._indent * self._level)
XMLGenerator.startElement(self, name, attrs)
def endElement(self, name):
"""Ends and XML element of the given name."""
XMLGenerator.endElement(self, name)
self.write(u'\n')
def startParent(self, name, attrs={}):
"""Starts an XML element which has children.
:param name: element name
:type name: string
:param attrs: element attributes
:type attrs: dictionary {string: object}
"""
self.startElement(name, attrs, children=True)
self._level += self._increment
self.write(u'\n')
# append the element name, so we can end it later
self._parent_stack.append(name)
def endParent(self):
"""Ends an XML element with children."""
# the element to end is the one on top of the stack
name = self._parent_stack.pop()
self._level -= self._increment
self.ignorableWhitespace(self._indent * self._level)
self.endElement(name)
def startParents(self, *names):
"""Starts XML elements without children."""
for name in names:
self.startParent(name)
def endParents(self, num):
"""Ends XML elements, according to the given number."""
for i in range(num):
self.endParent()
def simpleElement(self, name, content=None):
"""Creates an XML element without children with the given content."""
self.startElement(name, attrs={})
if content:
self.characters(content)
self.endElement(name)
def characters(self, content):
content = escape(unicode(content))
for a, b in ((u'"', u'"'), (u"'", u''')):
content = content.replace(a, b)
self.write(content)
class BlastXmlWriter(object):
"""Stream-based BLAST+ XML Writer."""
def __init__(self, handle):
self.xml = _BlastXmlGenerator(handle, 'utf-8')
def write_file(self, qresults):
"""Writes the XML contents to the output handle."""
xml = self.xml
self.qresult_counter, self.hit_counter, self.hsp_counter, \
self.frag_counter = 0, 0, 0, 0
# get the first qresult, since the preamble requires its attr values
first_qresult = next(qresults)
# start the XML document, set the root element, and create the preamble
xml.startDocument()
xml.startParent('BlastOutput')
self._write_preamble(first_qresult)
# and write the qresults
xml.startParent('BlastOutput_iterations')
self._write_qresults(chain([first_qresult], qresults))
xml.endParents(2)
xml.endDocument()
return self.qresult_counter, self.hit_counter, self.hsp_counter, \
self.frag_counter
def _write_elem_block(self, block_name, map_name, obj, opt_dict={}):
"""Writes sibling XML elements.
:param block_name: common element name prefix
:type block_name: string
:param map_name: name of mapping between element and attribute names
:type map_name: string
:param obj: object whose attribute value will be used
:type obj: object
:param opt_dict: custom element-attribute mapping
:type opt_dict: dictionary {string: string}
"""
for elem, attr in _WRITE_MAPS[map_name]:
elem = block_name + elem
try:
content = str(getattr(obj, attr))
except AttributeError:
# ensure attrs that is not present is optional
assert elem in _DTD_OPT, "Element %r (attribute %r) not " \
"found" % (elem, attr)
else:
# custom element-attribute mapping, for fallback values
if elem in opt_dict:
content = opt_dict[elem]
self.xml.simpleElement(elem, content)
def _write_preamble(self, qresult):
"""Writes the XML file preamble."""
xml = self.xml
for elem, attr in _WRITE_MAPS['preamble']:
elem = 'BlastOutput_' + elem
if elem == 'BlastOutput_param':
xml.startParent(elem)
self._write_param(qresult)
xml.endParent()
continue
try:
content = str(getattr(qresult, attr))
except AttributeError:
assert elem in _DTD_OPT, "Element %s (attribute %s) not " \
"found" % (elem, attr)
else:
if elem == 'BlastOutput_version':
content = '%s %s' % (qresult.program.upper(),
qresult.version)
elif qresult._blast_id:
if elem == 'BlastOutput_query-ID':
content = qresult._blast_id
elif elem == 'BlastOutput_query-def':
content = ' '.join([qresult.id,
qresult.description]).strip()
xml.simpleElement(elem, content)
def _write_param(self, qresult):
"""Writes the parameter block of the preamble."""
xml = self.xml
xml.startParent('Parameters')
self._write_elem_block('Parameters_', 'param', qresult)
xml.endParent()
def _write_qresults(self, qresults):
"""Writes QueryResult objects into iteration elements."""
xml = self.xml
for num, qresult in enumerate(qresults):
xml.startParent('Iteration')
xml.simpleElement('Iteration_iter-num', str(num + 1))
opt_dict = {}
# use custom Iteration_query-ID and Iteration_query-def mapping
# if the query has a BLAST-generated ID
if qresult._blast_id:
opt_dict = {
'Iteration_query-ID': qresult._blast_id,
'Iteration_query-def': ' '.join([qresult.id,
qresult.description]).strip(),
}
self._write_elem_block('Iteration_', 'qresult', qresult, opt_dict)
# the Iteration_hits tag only has children if there are hits
if qresult:
xml.startParent('Iteration_hits')
self._write_hits(qresult.hits)
xml.endParent()
# otherwise it's a simple element without any contents
else:
xml.simpleElement('Iteration_hits', '')
xml.startParents('Iteration_stat', 'Statistics')
self._write_elem_block('Statistics_', 'stat', qresult)
xml.endParents(2)
# there's a message if no hits is present
if not qresult:
xml.simpleElement('Iteration_message', 'No hits found')
self.qresult_counter += 1
xml.endParent()
def _write_hits(self, hits):
"""Writes Hit objects."""
xml = self.xml
for num, hit in enumerate(hits):
xml.startParent('Hit')
xml.simpleElement('Hit_num', str(num + 1))
# use custom hit_id and hit_def mapping if the hit has a
# BLAST-generated ID
opt_dict = {}
if hit._blast_id:
opt_dict = {
'Hit_id': hit._blast_id,
'Hit_def': ' '.join([hit.id, hit.description]).strip(),
}
self._write_elem_block('Hit_', 'hit', hit, opt_dict)
xml.startParent('Hit_hsps')
self._write_hsps(hit.hsps)
self.hit_counter += 1
xml.endParents(2)
def _write_hsps(self, hsps):
"""Writes HSP objects."""
xml = self.xml
for num, hsp in enumerate(hsps):
xml.startParent('Hsp')
xml.simpleElement('Hsp_num', str(num + 1))
for elem, attr in _WRITE_MAPS['hsp']:
elem = 'Hsp_' + elem
try:
content = self._adjust_output(hsp, elem, attr)
# make sure any elements that is not present is optional
# in the DTD
except AttributeError:
assert elem in _DTD_OPT, "Element %s (attribute %s) not found" \
% (elem, attr)
else:
xml.simpleElement(elem, str(content))
self.hsp_counter += 1
self.frag_counter += len(hsp.fragments)
xml.endParent()
def _adjust_output(self, hsp, elem, attr):
"""Adjusts output to mimic native BLAST+ XML as much as possible."""
# adjust coordinates
if attr in ('query_start', 'query_end', 'hit_start', 'hit_end',
'pattern_start', 'pattern_end'):
content = getattr(hsp, attr) + 1
if '_start' in attr:
content = getattr(hsp, attr) + 1
else:
content = getattr(hsp, attr)
# adjust for 'from' <--> 'to' flip if it's not a translated search
# and frames are different
# adapted from /src/algo/blast/format/blastxml_format.cpp#L216
if hsp.query_frame != 0 and hsp.hit_frame < 0:
if attr == 'hit_start':
content = getattr(hsp, 'hit_end')
elif attr == 'hit_end':
content = getattr(hsp, 'hit_start') + 1
# for seqrecord objects, we only need the sequence string
elif elem in ('Hsp_hseq', 'Hsp_qseq'):
content = str(getattr(hsp, attr).seq)
elif elem == 'Hsp_midline':
content = hsp.aln_annotation['similarity']
elif elem in ('Hsp_evalue', 'Hsp_bit-score'):
# adapted from src/algo/blast/format/blastxml_format.cpp#L138-140
content = '%.*g' % (6, getattr(hsp, attr))
else:
content = getattr(hsp, attr)
return content
# if not used as a module, run the doctest
if __name__ == "__main__":
from Bio._utils import run_doctest
run_doctest()
|
<filename>mining/temp-processing/data-deal-and-draw-graph.py<gh_stars>1-10
from pyecharts import options as opts
from pyecharts.globals import GeoType
from pyecharts.charts import Line3D, Scatter3D, Geo
import csv
import time
def read_csv(filename):
"""读取csv文件"""
with open(filename, newline='') as csvfile:
items = csv.reader(csvfile)
next(items) # 读取首行
dd = []
for item in items:
dd.append(item)
return dd
def write_result(filename, result):
"""将result写入csv文件"""
outfile = open(filename, 'w', newline='', encoding='UTF-8')
writer = csv.writer(outfile)
writer.writerow(('timestamp', 'imsi', 'lac_id', 'cell_id', 'longitude', 'latitude'))
for i in range(0, len(result)):
writer.writerow((result[i][0], result[i][1], result[i][2], result[i][3], result[i][4], result[i][5]))
outfile.close()
def clean_data():
"""数据清洗"""
# 读取原始数据
original_file_name = 'E:/demon/data/服创大赛-原始数据.csv'
original = read_csv(original_file_name)
print(len(original))
# 保存前四列数据
next0 = []
for i in range(0, len(original)):
next0.append([original[i][0], original[i][1], original[i][2], original[i][3]])
print(next0[i])
# 去掉为空和imsi中包含特殊字符的数据条目(‘#’,’*’,’^’)
next1 = []
for i in range(0, len(next0)):
if len(next0[i][1]) == 0:
continue
elif len(next0[i][2]) == 0:
continue
elif len(next0[i][3]) == 0:
continue
elif next0[i][1].find("#") >= 0:
continue
elif next0[i][1].find("*") >= 0:
continue
elif next0[i][1].find("^") >= 0:
continue
else:
next1.append(next0[i])
print(len(next1))
# 输出next1内容(调试使用)
# for i in range(0, len(next1)):
# print(i, next1[i])
# 转化时间戳
for i in range(0, len(next1)):
temp = int(next1[i][0])
timestamp = float(temp / 1000)
timearray = time.localtime(timestamp)
next1[i][0] = time.strftime("%Y%m%d%H%M%S", timearray)
# 去掉不是20181003的记录
next2 = []
for i in range(0, len(next1)):
if next1[i][0].find("20181003") >= 0:
next2.append(next1[i])
else:
continue
print(len(next2))
# 去除两数据源关联后经纬度为空的数据条目
# 读取基站数据
base_data_file = 'E:/demon/data/服创大赛-基站经纬度数据.csv'
locate = read_csv(base_data_file)
next3 = []
for i in range(0, len(next2)):
temp = next2[i][2] + "-" + next2[i][3]
for j in range(0, len(locate)):
if locate[j][2].find(temp) >= 0:
next3.append((next2[i][0], next2[i][1], next2[i][2], next2[i][3], locate[j][0], locate[j][1]))
break
else:
continue
print(len(next3))
# 排序
result = sorted(next3)
# 输出结果
for i in range(0, len(result)):
print(i, result[i][0], result[i][1], result[i][2], result[i][3], result[i][4], result[i][5])
# 输出到文件
outfilename = 'E:/demon/data/newdata/newData.csv'
write_result(outfilename, result)
# clean_data()
def count_everyhour_num():
"""计算每个小时的记录数"""
newdatafile = 'E:/demon/data/newdata/newData.csv'
item = read_csv(newdatafile)
print(len(item))
hour = [0 for x in range(0, 24)]
print(len(hour))
for i in range(0, len(item)):
if item[i][0][8] == '0':
temp = int(item[i][0][9])
hour[temp] += 1
if item[i][0][8] == '1':
temp = int(item[i][0][9])
hour[temp + 10] += 1
if item[i][0][8] == '2':
temp = int(item[i][0][9])
hour[temp + 20] += 1
print(hour)
# 输出文件
outfilename = 'E:/demon/data/newdata/everyHour.csv'
outfile = open(outfilename, 'w', newline='', encoding='UTF-8')
writer = csv.writer(outfile)
writer.writerow(('hour', 'num'))
for i in range(0, len(hour)):
s = (i.__str__() + '--' + (i + 1).__str__())
writer.writerow((s, hour[i]))
outfile.close()
def every_everypeople_num():
"""计算每一个人的记录数"""
newdatafile = 'E:/demon/data/newdata/newData.csv'
item = read_csv(newdatafile)
people = []
data = []
for i in range(0, len(item)):
data.append(item[i][1])
if item[i][1] not in people:
people.append(item[i][1])
res_data = []
for i in people:
res_data.append(data.count(i))
print(len(res_data))
for i in range(0, len(people)):
people[i] = (people[i], res_data[i])
for i in range(0, len(people)):
print(i, people[i])
print(len(people))
people_item = []
for j in range(0, len(people)):
for i in range(0, len(item)):
if item[i][1] == people[j][0]:
people_item.append((item[i][1], item[i][0], item[i][4], item[i][5]))
print(len(people_item))
for i in range(0, len(people_item)):
print(i, people_item[i])
# 将每一个人的记录数写入文件
every_people_num_filename = 'E:/demon/data/newdata/people_num.csv'
every_people_num = open(every_people_num_filename, 'w', newline='', encoding='UTF-8')
writer = csv.writer(every_people_num)
writer.writerow(('people_id', 'num'))
for i in range(0, len(people)):
writer.writerow(people[i])
every_people_num.close()
# 将每一个人的记录写入文件(按人员和时间顺序排列)
every_people_item_filename = 'E:/demon/data/newdata/people_item.csv'
every_people_item = open(every_people_item_filename, 'w', newline='', encoding='UTF-8')
writer = csv.writer(every_people_item)
writer.writerow(('people_id', 'timestamp', 'longitude', 'latitude'))
for i in range(0, len(people_item)):
writer.writerow(people_item[i])
every_people_item.close()
def geo(base_location, staticdata, people_item):
"""绘制地图并描点"""
city = '沈阳'
g = Geo()
g.add_schema(maptype=city)
# 定义坐标对应的名称,添加到坐标库中 add_coordinate(name, lng, lat)
# 将基站信息添加到坐标库中
for i in range(0, len(base_location)):
g.add_coordinate(base_location[i][2], base_location[i][0], base_location[i][1])
# 将出行方式静态数据添加到坐标库中(地铁和公交)
for i in range(0, len(staticdata)):
g.add_coordinate(staticdata[i][3], staticdata[i][0], staticdata[i][1])
# 将人员的信息记录添加到坐标库中
for i in range(0, len(people_item)):
g.add_coordinate(people_item[i][1], people_item[i][2], people_item[i][3])
# 定义数据对
data_pair = []
# 基站
for i in range(0, len(base_location)):
data_pair.append((base_location[i][2], '基站'))
for i in range(0, len(staticdata)):
# 地铁
if staticdata[i][2] == '地铁':
data_pair.append((staticdata[i][3], staticdata[i][2] + staticdata[i][4] + '号线'))
# 公交
elif staticdata[i][2] == '公交':
data_pair.append((staticdata[i][3], '公交'))
# 人员记录
for i in range(0, len(people_item)):
data_pair.append((people_item[i][1], '人'))
# 将数据添加到地图上
g.add('', data_pair, type_=GeoType.EFFECT_SCATTER, symbol_size=6)
# 设置样式
g.set_series_opts(label_opts=opts.LabelOpts(is_show=False))
# 自定义分段 color 可以用取色器取色
pieces = [
{'min': '基站', 'max': '基站', 'label': '基站', 'color': '#D94E5D'},
{'min': '地铁1号线', 'max': '地铁1号线', 'label': '地铁1号线', 'color': '#87CEFA'},
{'min': '地铁2号线', 'max': '地铁2号线', 'label': '地铁2号线', 'color': '#DA70D6'},
{'min': '地铁9号线', 'max': '地铁9号线', 'label': '地铁9号线', 'color': '#32CD32'},
{'min': '公交', 'max': '公交', 'label': '公交', 'color': '#6495ED'},
{'min': '人', 'max': '人', 'label': '人', 'color': '#000000'}
]
# is_piecewise 是否自定义分段, 变为true 才能生效
g.set_global_opts(
visualmap_opts=opts.VisualMapOpts(is_piecewise=True, pieces=pieces),
title_opts=opts.TitleOpts(title="{}-站点分布".format(city)),
)
return g
def get_people_item():
people_item_file = 'E:/demon/data/newdata/people_item.csv'
people_item = read_csv(people_item_file)
return people_item
def get_user_id_list(people_item):
user_id_list = []
for i in range(0, len(people_item)):
if people_item[i][0] not in user_id_list:
user_id_list.append(people_item[i][0])
for i in range(0, len(user_id_list)):
user_id_list[i] = (i, user_id_list[i])
return user_id_list
def drow_effectscatter():
"""调用geo()来画散点图"""
# 读取基站数据
base_location_file = 'E:/demon/data/服创大赛-基站经纬度数据.csv'
base_location = read_csv(base_location_file)
print(len(base_location))
# 读取出行方式静态数据
static_data_file = 'E:/demon/data/服创大赛-出行方式静态数据.csv'
static_data = read_csv(static_data_file)
print(len(static_data))
# 读取人员记录数据
people_item = get_people_item()
print(len(people_item))
g = geo(base_location, static_data, people_item)
# 渲染成html, 可用浏览器直接打开
g.render('ShenYang.html')
def get_color():
"""计算颜色值"""
# 分为114个颜色
hexnum = []
for i in range(0, 114):
hexnum.append(str(hex(i * 0x243f6)))
# 六位十六进制表示的颜色值
color = []
for i in range(0, len(hexnum)):
if i < 8:
if i == 0:
color.append('#000000')
else:
temp = '#0'
for j in range(2, len(hexnum[i])):
temp += hexnum[i][j]
color.append(temp)
else:
temp = '#'
for j in range(2, len(hexnum[i])):
temp += hexnum[i][j]
color.append(temp)
return color
def get_pieces(color):
"""得到3D散点图、3D折线图的用例颜色"""
pieces = []
for i in range(0, len(color)):
pieces.append({'min': i, 'max': i, 'label': i, 'color': color[i]})
return pieces
def get_data():
people_item = get_people_item()
user_id_list = get_user_id_list(people_item)
data = []
for i in range(0, len(people_item)):
x = float(people_item[i][2])
y = float(people_item[i][3])
temp = []
for j in range(8, len(people_item[i][1])):
temp.append(int(people_item[i][1][j]))
z = (temp[0] * 10 + temp[1]) * 3600 + (temp[2] * 10 + temp[3]) * 60 + temp[4] * 10 + temp[5]
for j in range(0, len(user_id_list)):
if people_item[i][0] == user_id_list[j][1]:
user_id = user_id_list[j][0]
data.append([x, y, z, user_id])
return data
def get_people(user_id_list, data):
people = []
for i in range(0, len(user_id_list)):
people.append([])
for j in range(0, len(data)):
if data[j][3] == i:
people[i].append(data[j])
return people
def add(i):
"""得到114个add函数对应的的字符串"""
return ".add(user_id_list[" + str(i) + "][0], \n" \
"\tpeople[" + str(i) + "], \n" \
"\txaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'), \n" \
"\tyaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'), \n" \
"\tzaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'), \n" \
"\tgrid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100)," \
"\n)"
def print_add():
"""打印输出add函数的字符串"""
people_item = get_people_item()
user_id_list = get_user_id_list(people_item)
for i in range(0, len(user_id_list)):
print(add(i))
def line3d() -> Line3D:
people_item = get_people_item()
user_id_list = get_user_id_list(people_item)
data = get_data()
people = get_people(user_id_list, data)
color = get_color()
pieces = get_pieces(color)
c = (
Line3D()
.add(user_id_list[0][0],
people[0],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[1][0],
people[1],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[2][0],
people[2],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[3][0],
people[3],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[4][0],
people[4],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[5][0],
people[5],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[6][0],
people[6],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[7][0],
people[7],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[8][0],
people[8],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[9][0],
people[9],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[10][0],
people[10],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[11][0],
people[11],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[12][0],
people[12],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[13][0],
people[13],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[14][0],
people[14],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[15][0],
people[15],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[16][0],
people[16],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[17][0],
people[17],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[18][0],
people[18],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[19][0],
people[19],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[20][0],
people[20],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[21][0],
people[21],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[22][0],
people[22],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[23][0],
people[23],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[24][0],
people[24],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[25][0],
people[25],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[26][0],
people[26],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[27][0],
people[27],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[28][0],
people[28],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[29][0],
people[29],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[30][0],
people[30],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[31][0],
people[31],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[32][0],
people[32],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[33][0],
people[33],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[34][0],
people[34],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[35][0],
people[35],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[36][0],
people[36],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[37][0],
people[37],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[38][0],
people[38],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[39][0],
people[39],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[40][0],
people[40],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[41][0],
people[41],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[42][0],
people[42],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[43][0],
people[43],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[44][0],
people[44],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[45][0],
people[45],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[46][0],
people[46],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[47][0],
people[47],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[48][0],
people[48],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[49][0],
people[49],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[50][0],
people[50],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[51][0],
people[51],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[52][0],
people[52],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[53][0],
people[53],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[54][0],
people[54],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[55][0],
people[55],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[56][0],
people[56],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[57][0],
people[57],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[58][0],
people[58],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[59][0],
people[59],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[60][0],
people[60],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[61][0],
people[61],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[62][0],
people[62],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[63][0],
people[63],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[64][0],
people[64],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[65][0],
people[65],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[66][0],
people[66],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[67][0],
people[67],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[68][0],
people[68],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[69][0],
people[69],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[70][0],
people[70],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[71][0],
people[71],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[72][0],
people[72],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[73][0],
people[73],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[74][0],
people[74],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[75][0],
people[75],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[76][0],
people[76],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[77][0],
people[77],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[78][0],
people[78],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[79][0],
people[79],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[80][0],
people[80],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[81][0],
people[81],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[82][0],
people[82],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[83][0],
people[83],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[84][0],
people[84],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[85][0],
people[85],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[86][0],
people[86],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[87][0],
people[87],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[88][0],
people[88],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[89][0],
people[89],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[90][0],
people[90],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[91][0],
people[91],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[92][0],
people[92],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[93][0],
people[93],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[94][0],
people[94],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[95][0],
people[95],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[96][0],
people[96],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[97][0],
people[97],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[98][0],
people[98],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[99][0],
people[99],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[100][0],
people[100],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[101][0],
people[101],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[102][0],
people[102],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[103][0],
people[103],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[104][0],
people[104],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[105][0],
people[105],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[106][0],
people[106],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[107][0],
people[107],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[108][0],
people[108],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[109][0],
people[109],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[110][0],
people[110],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[111][0],
people[111],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[112][0],
people[112],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.add(user_id_list[113][0],
people[113],
xaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_='value', min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.set_global_opts(
visualmap_opts=opts.VisualMapOpts(is_piecewise=True, pieces=pieces),
title_opts=opts.TitleOpts(title="3D折线图"),
)
)
return c
def drow_line3d():
"""调用line3d()绘制3D折线图"""
g = line3d()
# 渲染成html, 可用浏览器直接打开
g.render('line3D.html')
def scatter3d() -> Scatter3D:
data = get_data()
color = get_color()
pieces = get_pieces(color)
c = (
Scatter3D()
.add("",
data,
xaxis3d_opts=opts.Axis3DOpts(type_="value", min_='dataMin', max_='dataMax'),
yaxis3d_opts=opts.Axis3DOpts(type_="value", min_='dataMin', max_='dataMax'),
zaxis3d_opts=opts.Axis3DOpts(type_="value", min_='dataMin', max_='dataMax'),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.set_global_opts(
visualmap_opts=opts.VisualMapOpts(is_piecewise=True, pieces=pieces),
title_opts=opts.TitleOpts("3D散点图"),
)
)
return c
def drow_scatter3d():
"""调用scatter3d()绘制3D散点图"""
g = scatter3d()
# 渲染成html, 可用浏览器直接打开
g.render('scatter3D.html')
if __name__=="__main__":
drow_effectscatter()
drow_line3d()
drow_scatter3d()
|
from django.shortcuts import render,redirect,HttpResponseRedirect,reverse
from django.urls import reverse
from django.contrib.auth.decorators import login_required
from apps.student.models import Student,StudentList
from django.conf import settings
from django.views.generic.list import ListView
from ..classes.models import ClassLevels,Classes,ClassNames
from ..home.models import Session,Period
from .models import LessonPeriods,DailyAttendance
import datetime
from apps.classes.classes_for_sidebar import all_class_levels
sessions = Session.objects.all()
periods = Period.objects.all()
all_class_levels = all_class_levels()
# Create your views here.
def sessionUpdate(request):
if request.GET.get("sessionID"):
try:
oldSession = Session.objects.get(active=1)
oldSession.active=False
oldSession.save()
except:
pass
newSession=request.GET.get("sessionID")
print("Yeni sezon : ",newSession)
newSession = Session.objects.get(session=newSession)
newSession.active=True
newSession.save()
if request.GET.get("periodID"):
try:
oldPeriod = Period.objects.get(active=1)
oldPeriod.active=False
oldPeriod.save()
except:
pass
newPeriod =request.GET.get("periodID")
print("Yeni Period : ",newPeriod )
newPeriod = Period.objects.get(period=newPeriod )
newPeriod.active=True
newPeriod.save()
@login_required(login_url="/login/")
def stdAttUpdate(request,pk=None):
sessionUpdate(request)
session=Session.objects.get(active=True)
period=Period.objects.get(active=True)
try:
student = Student.objects.get(id=pk)
className = StudentList.objects.get(students=student,session=session,periods=period)
if request.POST:
student.save()
except:
context={
'sessions':sessions,"periods":periods,'all_class_levels':all_class_levels
}
return render(request,"student/att-list.html",context)
context={
'student':student,
'className':className,
'media_url':settings.MEDIA_URL,
'sessions':sessions,"periods":periods,'all_class_levels':all_class_levels
}
return render(request, "student/std-Update.html", context)
@login_required(login_url="/login/")
def stdAttView(request,pk=None):
sessionUpdate(request)
session=Session.objects.get(active=True)
period=Period.objects.get(active=True)
try:
student = Student.objects.get(id=pk)
className = StudentList.objects.get(students=student,session=session,periods=period)
except:
context={
'sessions':sessions,"periods":periods
}
return render(request,"student/std-list.html",context)
context={
'student':student,
'className':className,
'media_url':settings.MEDIA_URL,
'sessions':sessions,"periods":periods,'all_class_levels':all_class_levels
}
return render(request, "student/std-profile.html", context)
@login_required(login_url="/login/")
def stdAttDelete(request):
context={'sessions':sessions,"periods":periods,'all_class_levels':all_class_levels}
return render(request, "student/std-profile.html", context)
@login_required(login_url="/login/")
def stdAttIndex(request):
context={'sessions':sessions,"periods":periods,'all_class_levels':all_class_levels}
return render(request, "attendance/std-dailyAttendance.html", context)
@login_required(login_url="/login/")
def stdAttShowList(request):
day =datetime.date.today()
classNames = ClassNames.objects.all()
classLevels = ClassLevels.objects.all()
session=Session.objects.get(active=True)
period=Period.objects.get(active=True)
lesPeriods=LessonPeriods.objects.filter(session=session,periods=period)
absentStudentList = DailyAttendance.objects.filter(day=day)
#sessions = Session.objects.all()
#periods=Period.objects.all()
#studentList = StudentList.objects.all()
newlist=[]
className = ""
classLevel = ""
if request.GET:
sessionUpdate(request)
className = request.GET.get("className")
classLevel = request.GET.get("classLevel")
attandanceList = request.GET.getlist("cb-1")
if className==None or classLevel==None:
className="A"
classLevel="9"
studentsList = Student.objects.filter(
studentlist__className__className__name__contains=className,
studentlist__className__level__level__contains=classLevel,
studentlist__session__session__contains=session,
studentlist__periods__period__contains=period)
if attandanceList:
for item in attandanceList:
newAttandance = DailyAttendance()
lessonID,studentID,status = item.split("-")
print(lessonID,studentID,status)
try:
oldAttandance = DailyAttendance.objects.get(lesPeriod=lessonID,student=studentID,day=day)
# newAttandance.update(lesPeriod=lessonID,student=studentID,day=day)
if status=="0":
oldAttandance.delete()
print("kayıt var ?")
continue
except:
newAttandance.lesPeriod=LessonPeriods.objects.get(id=lessonID)
newAttandance.student=Student.objects.get(id=studentID)
newAttandance.periods=period
newAttandance.session=session
newAttandance.save()
studentsx=[]
for x in lesPeriods:
statusList=[]
for student in studentsList:
studentss = DailyAttendance.objects.filter(lesPeriod=x.pk,day=day,student=student)
if studentss :
statusList.append(True)
else:
statusList.append(False)
studentsx.append(zip(studentsList,statusList))
newlist = zip(lesPeriods,studentsx)
else:
for listem in StudentList.objects.filter():
studentList= listem.students.all()
context = {
#'students' : students,
#'studentList':studentList,
'sessions':sessions,
'periods':periods,
'lesPeriods':lesPeriods,
'classNames':classNames,
'classLevels':classLevels,
'currentClassLevel':classLevel,
'currentClassName':className,
'newlist':newlist,
'media_url':settings.MEDIA_URL,
'all_class_levels':all_class_levels
}
return render(request, "attendance/std-dailyAttendance.html", context)
@login_required(login_url="/login/")
def lessonPeriodList(request):
sessionUpdate(request)
session=Session.objects.get(active=True)
session_id = session.id
period=Period.objects.get(active=True)
period_id = period.id
lesPeriods = LessonPeriods.objects.filter(session_id=session_id,periods_id=period_id)
if request.GET:
if request.GET.get("add"):
if len(lesPeriods) > 0:
Period_time_list = str(lesPeriods[len(lesPeriods) - 1]).split("-")
hour_value = int(Period_time_list[2].split(":")[0])
minute_value = int(Period_time_list[2].split(":")[1])+10
if minute_value > 59:
hour_value += 1
minute_value = minute_value - 60
hour_value_end = hour_value
minute_value_end = minute_value + 40
if hour_value == 0:
hour_value = "00"
if minute_value == 0:
minute_value = "00"
if minute_value_end > 59:
hour_value_end += 1
minute_value_end = minute_value_end - 60
if hour_value_end == 0:
hour_value_end = "00"
if minute_value_end == 0:
minute_value_end = "00"
id = str(len(lesPeriods)+1)
#str(hour_value)+":"+str(minute_value)+"-"+str(hour_value_end)+":"+str(minute_value_end)
new_lesPeriod = LessonPeriods(lessCount=id,periods=period,session=session,lessName = id+".Ders",lesPeriod=str(hour_value)+":"+str(minute_value)+"-"+str(hour_value_end)+":"+str(minute_value_end))
new_lesPeriod.save()
else:
new_lesPeriod = LessonPeriods(lessName = "1.Ders",periods=period,session=session,lessCount="1")
new_lesPeriod.save()
return redirect("lesperiod-edit")
if request.GET.get("delete"):
id = str(len(lesPeriods))
old_lesPeriod = LessonPeriods.objects.filter(lessCount=id,periods=period,session=session)
old_lesPeriod.delete()
return redirect("lesperiod-edit")
if request.GET.get("change"):
for i in range(1,len(lesPeriods)+1):
i = str(i)
start = request.GET.get(i+"-start")
end = request.GET.get(i+"-end")
start_end = start + "-" + end
old_lesPeriod_object = LessonPeriods.objects.get(lessCount=i,session_id=session_id,periods_id=period_id)
old_lesPeriod_object.lesPeriod = start_end
old_lesPeriod_object.save()
return redirect("lesperiod-edit")
lesPeriods=LessonPeriods.objects.filter(session_id=session_id,periods_id=period_id)
all_lesPeriods = []
lessonCount = 1
for i in lesPeriods:
Period_list = str(i).split("-")
all_lesPeriods.append([Period_list[0],Period_list[1],Period_list[2],str(lessonCount)])
lessonCount += 1
context={
'lesPeriods':all_lesPeriods,
'sessions':sessions,
'periods':periods,
'media_url':settings.MEDIA_URL,
'all_class_levels':all_class_levels
}
return render(request, "attendance/lessonPeriods.html",context)
# Create your views here.
|
<reponame>ystk/debian-python3.1
# asciixmas
# December 1989 <NAME> Indianapolis, IN
#
# $Id: xmas.py 46754 2006-06-08 15:35:45Z thomas.wouters $
#
# I'm dreaming of an ascii character-based monochrome Christmas,
# Just like the ones I used to know!
# Via a full duplex communications channel,
# At 9600 bits per second,
# Even though it's kinda slow.
#
# I'm dreaming of an ascii character-based monochrome Christmas,
# With ev'ry C program I write!
# May your screen be merry and bright!
# And may all your Christmases be amber or green,
# (for reduced eyestrain and improved visibility)!
#
#
# Notes on the Python version:
# I used a couple of `try...except curses.error' to get around some functions
# returning ERR. The errors come from using wrapping functions to fill
# windows to the last character cell. The C version doesn't have this problem,
# it simply ignores any return values.
#
import curses
import sys
FROMWHO = "<NAME> <<EMAIL>>"
def set_color(win, color):
if curses.has_colors():
n = color + 1
curses.init_pair(n, color, my_bg)
win.attroff(curses.A_COLOR)
win.attron(curses.color_pair(n))
def unset_color(win):
if curses.has_colors():
win.attrset(curses.color_pair(0))
def look_out(msecs):
curses.napms(msecs)
if stdscr.getch() != -1:
curses.beep()
sys.exit(0)
def boxit():
for y in range(0, 20):
stdscr.addch(y, 7, ord('|'))
for x in range(8, 80):
stdscr.addch(19, x, ord('_'))
for x in range(0, 80):
stdscr.addch(22, x, ord('_'))
return
def seas():
stdscr.addch(4, 1, ord('S'))
stdscr.addch(6, 1, ord('E'))
stdscr.addch(8, 1, ord('A'))
stdscr.addch(10, 1, ord('S'))
stdscr.addch(12, 1, ord('O'))
stdscr.addch(14, 1, ord('N'))
stdscr.addch(16, 1, ord("'"))
stdscr.addch(18, 1, ord('S'))
return
def greet():
stdscr.addch(3, 5, ord('G'))
stdscr.addch(5, 5, ord('R'))
stdscr.addch(7, 5, ord('E'))
stdscr.addch(9, 5, ord('E'))
stdscr.addch(11, 5, ord('T'))
stdscr.addch(13, 5, ord('I'))
stdscr.addch(15, 5, ord('N'))
stdscr.addch(17, 5, ord('G'))
stdscr.addch(19, 5, ord('S'))
return
def fromwho():
stdscr.addstr(21, 13, FROMWHO)
return
def tree():
set_color(treescrn, curses.COLOR_GREEN)
treescrn.addch(1, 11, ord('/'))
treescrn.addch(2, 11, ord('/'))
treescrn.addch(3, 10, ord('/'))
treescrn.addch(4, 9, ord('/'))
treescrn.addch(5, 9, ord('/'))
treescrn.addch(6, 8, ord('/'))
treescrn.addch(7, 7, ord('/'))
treescrn.addch(8, 6, ord('/'))
treescrn.addch(9, 6, ord('/'))
treescrn.addch(10, 5, ord('/'))
treescrn.addch(11, 3, ord('/'))
treescrn.addch(12, 2, ord('/'))
treescrn.addch(1, 13, ord('\\'))
treescrn.addch(2, 13, ord('\\'))
treescrn.addch(3, 14, ord('\\'))
treescrn.addch(4, 15, ord('\\'))
treescrn.addch(5, 15, ord('\\'))
treescrn.addch(6, 16, ord('\\'))
treescrn.addch(7, 17, ord('\\'))
treescrn.addch(8, 18, ord('\\'))
treescrn.addch(9, 18, ord('\\'))
treescrn.addch(10, 19, ord('\\'))
treescrn.addch(11, 21, ord('\\'))
treescrn.addch(12, 22, ord('\\'))
treescrn.addch(4, 10, ord('_'))
treescrn.addch(4, 14, ord('_'))
treescrn.addch(8, 7, ord('_'))
treescrn.addch(8, 17, ord('_'))
treescrn.addstr(13, 0, "//////////// \\\\\\\\\\\\\\\\\\\\\\\\")
treescrn.addstr(14, 11, "| |")
treescrn.addstr(15, 11, "|_|")
unset_color(treescrn)
treescrn.refresh()
w_del_msg.refresh()
return
def balls():
treescrn.overlay(treescrn2)
set_color(treescrn2, curses.COLOR_BLUE)
treescrn2.addch(3, 9, ord('@'))
treescrn2.addch(3, 15, ord('@'))
treescrn2.addch(4, 8, ord('@'))
treescrn2.addch(4, 16, ord('@'))
treescrn2.addch(5, 7, ord('@'))
treescrn2.addch(5, 17, ord('@'))
treescrn2.addch(7, 6, ord('@'))
treescrn2.addch(7, 18, ord('@'))
treescrn2.addch(8, 5, ord('@'))
treescrn2.addch(8, 19, ord('@'))
treescrn2.addch(10, 4, ord('@'))
treescrn2.addch(10, 20, ord('@'))
treescrn2.addch(11, 2, ord('@'))
treescrn2.addch(11, 22, ord('@'))
treescrn2.addch(12, 1, ord('@'))
treescrn2.addch(12, 23, ord('@'))
unset_color(treescrn2)
treescrn2.refresh()
w_del_msg.refresh()
return
def star():
treescrn2.attrset(curses.A_BOLD | curses.A_BLINK)
set_color(treescrn2, curses.COLOR_YELLOW)
treescrn2.addch(0, 12, ord('*'))
treescrn2.standend()
unset_color(treescrn2)
treescrn2.refresh()
w_del_msg.refresh()
return
def strng1():
treescrn2.attrset(curses.A_BOLD | curses.A_BLINK)
set_color(treescrn2, curses.COLOR_WHITE)
treescrn2.addch(3, 13, ord('\''))
treescrn2.addch(3, 12, ord(':'))
treescrn2.addch(3, 11, ord('.'))
treescrn2.attroff(curses.A_BOLD | curses.A_BLINK)
unset_color(treescrn2)
treescrn2.refresh()
w_del_msg.refresh()
return
def strng2():
treescrn2.attrset(curses.A_BOLD | curses.A_BLINK)
set_color(treescrn2, curses.COLOR_WHITE)
treescrn2.addch(5, 14, ord('\''))
treescrn2.addch(5, 13, ord(':'))
treescrn2.addch(5, 12, ord('.'))
treescrn2.addch(5, 11, ord(','))
treescrn2.addch(6, 10, ord('\''))
treescrn2.addch(6, 9, ord(':'))
treescrn2.attroff(curses.A_BOLD | curses.A_BLINK)
unset_color(treescrn2)
treescrn2.refresh()
w_del_msg.refresh()
return
def strng3():
treescrn2.attrset(curses.A_BOLD | curses.A_BLINK)
set_color(treescrn2, curses.COLOR_WHITE)
treescrn2.addch(7, 16, ord('\''))
treescrn2.addch(7, 15, ord(':'))
treescrn2.addch(7, 14, ord('.'))
treescrn2.addch(7, 13, ord(','))
treescrn2.addch(8, 12, ord('\''))
treescrn2.addch(8, 11, ord(':'))
treescrn2.addch(8, 10, ord('.'))
treescrn2.addch(8, 9, ord(','))
treescrn2.attroff(curses.A_BOLD | curses.A_BLINK)
unset_color(treescrn2)
treescrn2.refresh()
w_del_msg.refresh()
return
def strng4():
treescrn2.attrset(curses.A_BOLD | curses.A_BLINK)
set_color(treescrn2, curses.COLOR_WHITE)
treescrn2.addch(9, 17, ord('\''))
treescrn2.addch(9, 16, ord(':'))
treescrn2.addch(9, 15, ord('.'))
treescrn2.addch(9, 14, ord(','))
treescrn2.addch(10, 13, ord('\''))
treescrn2.addch(10, 12, ord(':'))
treescrn2.addch(10, 11, ord('.'))
treescrn2.addch(10, 10, ord(','))
treescrn2.addch(11, 9, ord('\''))
treescrn2.addch(11, 8, ord(':'))
treescrn2.addch(11, 7, ord('.'))
treescrn2.addch(11, 6, ord(','))
treescrn2.addch(12, 5, ord('\''))
treescrn2.attroff(curses.A_BOLD | curses.A_BLINK)
unset_color(treescrn2)
treescrn2.refresh()
w_del_msg.refresh()
return
def strng5():
treescrn2.attrset(curses.A_BOLD | curses.A_BLINK)
set_color(treescrn2, curses.COLOR_WHITE)
treescrn2.addch(11, 19, ord('\''))
treescrn2.addch(11, 18, ord(':'))
treescrn2.addch(11, 17, ord('.'))
treescrn2.addch(11, 16, ord(','))
treescrn2.addch(12, 15, ord('\''))
treescrn2.addch(12, 14, ord(':'))
treescrn2.addch(12, 13, ord('.'))
treescrn2.addch(12, 12, ord(','))
treescrn2.attroff(curses.A_BOLD | curses.A_BLINK)
unset_color(treescrn2)
# save a fully lit tree
treescrn2.overlay(treescrn)
treescrn2.refresh()
w_del_msg.refresh()
return
def blinkit():
treescrn8.touchwin()
for cycle in range(5):
if cycle == 0:
treescrn3.overlay(treescrn8)
treescrn8.refresh()
w_del_msg.refresh()
break
elif cycle == 1:
treescrn4.overlay(treescrn8)
treescrn8.refresh()
w_del_msg.refresh()
break
elif cycle == 2:
treescrn5.overlay(treescrn8)
treescrn8.refresh()
w_del_msg.refresh()
break
elif cycle == 3:
treescrn6.overlay(treescrn8)
treescrn8.refresh()
w_del_msg.refresh()
break
elif cycle == 4:
treescrn7.overlay(treescrn8)
treescrn8.refresh()
w_del_msg.refresh()
break
treescrn8.touchwin()
# ALL ON
treescrn.overlay(treescrn8)
treescrn8.refresh()
w_del_msg.refresh()
return
def deer_step(win, y, x):
win.mvwin(y, x)
win.refresh()
w_del_msg.refresh()
look_out(5)
def reindeer():
y_pos = 0
for x_pos in range(70, 62, -1):
if x_pos < 66: y_pos = 1
for looper in range(0, 4):
dotdeer0.addch(y_pos, x_pos, ord('.'))
dotdeer0.refresh()
w_del_msg.refresh()
dotdeer0.erase()
dotdeer0.refresh()
w_del_msg.refresh()
look_out(50)
y_pos = 2
for x_pos in range(x_pos - 1, 50, -1):
for looper in range(0, 4):
if x_pos < 56:
y_pos = 3
try:
stardeer0.addch(y_pos, x_pos, ord('*'))
except curses.error:
pass
stardeer0.refresh()
w_del_msg.refresh()
stardeer0.erase()
stardeer0.refresh()
w_del_msg.refresh()
else:
dotdeer0.addch(y_pos, x_pos, ord('*'))
dotdeer0.refresh()
w_del_msg.refresh()
dotdeer0.erase()
dotdeer0.refresh()
w_del_msg.refresh()
x_pos = 58
for y_pos in range(2, 5):
lildeer0.touchwin()
lildeer0.refresh()
w_del_msg.refresh()
for looper in range(0, 4):
deer_step(lildeer3, y_pos, x_pos)
deer_step(lildeer2, y_pos, x_pos)
deer_step(lildeer1, y_pos, x_pos)
deer_step(lildeer2, y_pos, x_pos)
deer_step(lildeer3, y_pos, x_pos)
lildeer0.touchwin()
lildeer0.refresh()
w_del_msg.refresh()
x_pos -= 2
x_pos = 35
for y_pos in range(5, 10):
middeer0.touchwin()
middeer0.refresh()
w_del_msg.refresh()
for looper in range(2):
deer_step(middeer3, y_pos, x_pos)
deer_step(middeer2, y_pos, x_pos)
deer_step(middeer1, y_pos, x_pos)
deer_step(middeer2, y_pos, x_pos)
deer_step(middeer3, y_pos, x_pos)
middeer0.touchwin()
middeer0.refresh()
w_del_msg.refresh()
x_pos -= 3
look_out(300)
y_pos = 1
for x_pos in range(8, 16):
deer_step(bigdeer4, y_pos, x_pos)
deer_step(bigdeer3, y_pos, x_pos)
deer_step(bigdeer2, y_pos, x_pos)
deer_step(bigdeer1, y_pos, x_pos)
deer_step(bigdeer2, y_pos, x_pos)
deer_step(bigdeer3, y_pos, x_pos)
deer_step(bigdeer4, y_pos, x_pos)
deer_step(bigdeer0, y_pos, x_pos)
x_pos -= 1
for looper in range(0, 6):
deer_step(lookdeer4, y_pos, x_pos)
deer_step(lookdeer3, y_pos, x_pos)
deer_step(lookdeer2, y_pos, x_pos)
deer_step(lookdeer1, y_pos, x_pos)
deer_step(lookdeer2, y_pos, x_pos)
deer_step(lookdeer3, y_pos, x_pos)
deer_step(lookdeer4, y_pos, x_pos)
deer_step(lookdeer0, y_pos, x_pos)
for y_pos in range(y_pos, 10):
for looper in range(0, 2):
deer_step(bigdeer4, y_pos, x_pos)
deer_step(bigdeer3, y_pos, x_pos)
deer_step(bigdeer2, y_pos, x_pos)
deer_step(bigdeer1, y_pos, x_pos)
deer_step(bigdeer2, y_pos, x_pos)
deer_step(bigdeer3, y_pos, x_pos)
deer_step(bigdeer4, y_pos, x_pos)
deer_step(bigdeer0, y_pos, x_pos)
y_pos -= 1
deer_step(lookdeer3, y_pos, x_pos)
return
def main(win):
global stdscr
stdscr = win
global my_bg, y_pos, x_pos
global treescrn, treescrn2, treescrn3, treescrn4
global treescrn5, treescrn6, treescrn7, treescrn8
global dotdeer0, stardeer0
global lildeer0, lildeer1, lildeer2, lildeer3
global middeer0, middeer1, middeer2, middeer3
global bigdeer0, bigdeer1, bigdeer2, bigdeer3, bigdeer4
global lookdeer0, lookdeer1, lookdeer2, lookdeer3, lookdeer4
global w_holiday, w_del_msg
my_bg = curses.COLOR_BLACK
# curses.curs_set(0)
treescrn = curses.newwin(16, 27, 3, 53)
treescrn2 = curses.newwin(16, 27, 3, 53)
treescrn3 = curses.newwin(16, 27, 3, 53)
treescrn4 = curses.newwin(16, 27, 3, 53)
treescrn5 = curses.newwin(16, 27, 3, 53)
treescrn6 = curses.newwin(16, 27, 3, 53)
treescrn7 = curses.newwin(16, 27, 3, 53)
treescrn8 = curses.newwin(16, 27, 3, 53)
dotdeer0 = curses.newwin(3, 71, 0, 8)
stardeer0 = curses.newwin(4, 56, 0, 8)
lildeer0 = curses.newwin(7, 53, 0, 8)
lildeer1 = curses.newwin(2, 4, 0, 0)
lildeer2 = curses.newwin(2, 4, 0, 0)
lildeer3 = curses.newwin(2, 4, 0, 0)
middeer0 = curses.newwin(15, 42, 0, 8)
middeer1 = curses.newwin(3, 7, 0, 0)
middeer2 = curses.newwin(3, 7, 0, 0)
middeer3 = curses.newwin(3, 7, 0, 0)
bigdeer0 = curses.newwin(10, 23, 0, 0)
bigdeer1 = curses.newwin(10, 23, 0, 0)
bigdeer2 = curses.newwin(10, 23, 0, 0)
bigdeer3 = curses.newwin(10, 23, 0, 0)
bigdeer4 = curses.newwin(10, 23, 0, 0)
lookdeer0 = curses.newwin(10, 25, 0, 0)
lookdeer1 = curses.newwin(10, 25, 0, 0)
lookdeer2 = curses.newwin(10, 25, 0, 0)
lookdeer3 = curses.newwin(10, 25, 0, 0)
lookdeer4 = curses.newwin(10, 25, 0, 0)
w_holiday = curses.newwin(1, 27, 3, 27)
w_del_msg = curses.newwin(1, 20, 23, 60)
try:
w_del_msg.addstr(0, 0, "Hit any key to quit")
except curses.error:
pass
try:
w_holiday.addstr(0, 0, "H A P P Y H O L I D A Y S")
except curses.error:
pass
# set up the windows for our various reindeer
lildeer1.addch(0, 0, ord('V'))
lildeer1.addch(1, 0, ord('@'))
lildeer1.addch(1, 1, ord('<'))
lildeer1.addch(1, 2, ord('>'))
try:
lildeer1.addch(1, 3, ord('~'))
except curses.error:
pass
lildeer2.addch(0, 0, ord('V'))
lildeer2.addch(1, 0, ord('@'))
lildeer2.addch(1, 1, ord('|'))
lildeer2.addch(1, 2, ord('|'))
try:
lildeer2.addch(1, 3, ord('~'))
except curses.error:
pass
lildeer3.addch(0, 0, ord('V'))
lildeer3.addch(1, 0, ord('@'))
lildeer3.addch(1, 1, ord('>'))
lildeer3.addch(1, 2, ord('<'))
try:
lildeer2.addch(1, 3, ord('~')) # XXX
except curses.error:
pass
middeer1.addch(0, 2, ord('y'))
middeer1.addch(0, 3, ord('y'))
middeer1.addch(1, 2, ord('0'))
middeer1.addch(1, 3, ord('('))
middeer1.addch(1, 4, ord('='))
middeer1.addch(1, 5, ord(')'))
middeer1.addch(1, 6, ord('~'))
middeer1.addch(2, 3, ord('\\'))
middeer1.addch(2, 5, ord('/'))
middeer2.addch(0, 2, ord('y'))
middeer2.addch(0, 3, ord('y'))
middeer2.addch(1, 2, ord('0'))
middeer2.addch(1, 3, ord('('))
middeer2.addch(1, 4, ord('='))
middeer2.addch(1, 5, ord(')'))
middeer2.addch(1, 6, ord('~'))
middeer2.addch(2, 3, ord('|'))
middeer2.addch(2, 5, ord('|'))
middeer3.addch(0, 2, ord('y'))
middeer3.addch(0, 3, ord('y'))
middeer3.addch(1, 2, ord('0'))
middeer3.addch(1, 3, ord('('))
middeer3.addch(1, 4, ord('='))
middeer3.addch(1, 5, ord(')'))
middeer3.addch(1, 6, ord('~'))
middeer3.addch(2, 3, ord('/'))
middeer3.addch(2, 5, ord('\\'))
bigdeer1.addch(0, 17, ord('\\'))
bigdeer1.addch(0, 18, ord('/'))
bigdeer1.addch(0, 19, ord('\\'))
bigdeer1.addch(0, 20, ord('/'))
bigdeer1.addch(1, 18, ord('\\'))
bigdeer1.addch(1, 20, ord('/'))
bigdeer1.addch(2, 19, ord('|'))
bigdeer1.addch(2, 20, ord('_'))
bigdeer1.addch(3, 18, ord('/'))
bigdeer1.addch(3, 19, ord('^'))
bigdeer1.addch(3, 20, ord('0'))
bigdeer1.addch(3, 21, ord('\\'))
bigdeer1.addch(4, 17, ord('/'))
bigdeer1.addch(4, 18, ord('/'))
bigdeer1.addch(4, 19, ord('\\'))
bigdeer1.addch(4, 22, ord('\\'))
bigdeer1.addstr(5, 7, "^~~~~~~~~// ~~U")
bigdeer1.addstr(6, 7, "( \\_____( /") # ))
bigdeer1.addstr(7, 8, "( ) /")
bigdeer1.addstr(8, 9, "\\\\ /")
bigdeer1.addstr(9, 11, "\\>/>")
bigdeer2.addch(0, 17, ord('\\'))
bigdeer2.addch(0, 18, ord('/'))
bigdeer2.addch(0, 19, ord('\\'))
bigdeer2.addch(0, 20, ord('/'))
bigdeer2.addch(1, 18, ord('\\'))
bigdeer2.addch(1, 20, ord('/'))
bigdeer2.addch(2, 19, ord('|'))
bigdeer2.addch(2, 20, ord('_'))
bigdeer2.addch(3, 18, ord('/'))
bigdeer2.addch(3, 19, ord('^'))
bigdeer2.addch(3, 20, ord('0'))
bigdeer2.addch(3, 21, ord('\\'))
bigdeer2.addch(4, 17, ord('/'))
bigdeer2.addch(4, 18, ord('/'))
bigdeer2.addch(4, 19, ord('\\'))
bigdeer2.addch(4, 22, ord('\\'))
bigdeer2.addstr(5, 7, "^~~~~~~~~// ~~U")
bigdeer2.addstr(6, 7, "(( )____( /") # ))
bigdeer2.addstr(7, 7, "( / |")
bigdeer2.addstr(8, 8, "\\/ |")
bigdeer2.addstr(9, 9, "|> |>")
bigdeer3.addch(0, 17, ord('\\'))
bigdeer3.addch(0, 18, ord('/'))
bigdeer3.addch(0, 19, ord('\\'))
bigdeer3.addch(0, 20, ord('/'))
bigdeer3.addch(1, 18, ord('\\'))
bigdeer3.addch(1, 20, ord('/'))
bigdeer3.addch(2, 19, ord('|'))
bigdeer3.addch(2, 20, ord('_'))
bigdeer3.addch(3, 18, ord('/'))
bigdeer3.addch(3, 19, ord('^'))
bigdeer3.addch(3, 20, ord('0'))
bigdeer3.addch(3, 21, ord('\\'))
bigdeer3.addch(4, 17, ord('/'))
bigdeer3.addch(4, 18, ord('/'))
bigdeer3.addch(4, 19, ord('\\'))
bigdeer3.addch(4, 22, ord('\\'))
bigdeer3.addstr(5, 7, "^~~~~~~~~// ~~U")
bigdeer3.addstr(6, 6, "( ()_____( /") # ))
bigdeer3.addstr(7, 6, "/ / /")
bigdeer3.addstr(8, 5, "|/ \\")
bigdeer3.addstr(9, 5, "/> \\>")
bigdeer4.addch(0, 17, ord('\\'))
bigdeer4.addch(0, 18, ord('/'))
bigdeer4.addch(0, 19, ord('\\'))
bigdeer4.addch(0, 20, ord('/'))
bigdeer4.addch(1, 18, ord('\\'))
bigdeer4.addch(1, 20, ord('/'))
bigdeer4.addch(2, 19, ord('|'))
bigdeer4.addch(2, 20, ord('_'))
bigdeer4.addch(3, 18, ord('/'))
bigdeer4.addch(3, 19, ord('^'))
bigdeer4.addch(3, 20, ord('0'))
bigdeer4.addch(3, 21, ord('\\'))
bigdeer4.addch(4, 17, ord('/'))
bigdeer4.addch(4, 18, ord('/'))
bigdeer4.addch(4, 19, ord('\\'))
bigdeer4.addch(4, 22, ord('\\'))
bigdeer4.addstr(5, 7, "^~~~~~~~~// ~~U")
bigdeer4.addstr(6, 6, "( )______( /") # )
bigdeer4.addstr(7, 5, "(/ \\") # )
bigdeer4.addstr(8, 0, "v___= ----^")
lookdeer1.addstr(0, 16, "\\/ \\/")
lookdeer1.addstr(1, 17, "\\Y/ \\Y/")
lookdeer1.addstr(2, 19, "\\=/")
lookdeer1.addstr(3, 17, "^\\o o/^")
lookdeer1.addstr(4, 17, "//( )")
lookdeer1.addstr(5, 7, "^~~~~~~~~// \\O/")
lookdeer1.addstr(6, 7, "( \\_____( /") # ))
lookdeer1.addstr(7, 8, "( ) /")
lookdeer1.addstr(8, 9, "\\\\ /")
lookdeer1.addstr(9, 11, "\\>/>")
lookdeer2.addstr(0, 16, "\\/ \\/")
lookdeer2.addstr(1, 17, "\\Y/ \\Y/")
lookdeer2.addstr(2, 19, "\\=/")
lookdeer2.addstr(3, 17, "^\\o o/^")
lookdeer2.addstr(4, 17, "//( )")
lookdeer2.addstr(5, 7, "^~~~~~~~~// \\O/")
lookdeer2.addstr(6, 7, "(( )____( /") # ))
lookdeer2.addstr(7, 7, "( / |")
lookdeer2.addstr(8, 8, "\\/ |")
lookdeer2.addstr(9, 9, "|> |>")
lookdeer3.addstr(0, 16, "\\/ \\/")
lookdeer3.addstr(1, 17, "\\Y/ \\Y/")
lookdeer3.addstr(2, 19, "\\=/")
lookdeer3.addstr(3, 17, "^\\o o/^")
lookdeer3.addstr(4, 17, "//( )")
lookdeer3.addstr(5, 7, "^~~~~~~~~// \\O/")
lookdeer3.addstr(6, 6, "( ()_____( /") # ))
lookdeer3.addstr(7, 6, "/ / /")
lookdeer3.addstr(8, 5, "|/ \\")
lookdeer3.addstr(9, 5, "/> \\>")
lookdeer4.addstr(0, 16, "\\/ \\/")
lookdeer4.addstr(1, 17, "\\Y/ \\Y/")
lookdeer4.addstr(2, 19, "\\=/")
lookdeer4.addstr(3, 17, "^\\o o/^")
lookdeer4.addstr(4, 17, "//( )")
lookdeer4.addstr(5, 7, "^~~~~~~~~// \\O/")
lookdeer4.addstr(6, 6, "( )______( /") # )
lookdeer4.addstr(7, 5, "(/ \\") # )
lookdeer4.addstr(8, 0, "v___= ----^")
###############################################
curses.cbreak()
stdscr.nodelay(1)
while 1:
stdscr.clear()
treescrn.erase()
w_del_msg.touchwin()
treescrn.touchwin()
treescrn2.erase()
treescrn2.touchwin()
treescrn8.erase()
treescrn8.touchwin()
stdscr.refresh()
look_out(150)
boxit()
stdscr.refresh()
look_out(150)
seas()
stdscr.refresh()
greet()
stdscr.refresh()
look_out(150)
fromwho()
stdscr.refresh()
look_out(150)
tree()
look_out(150)
balls()
look_out(150)
star()
look_out(150)
strng1()
strng2()
strng3()
strng4()
strng5()
# set up the windows for our blinking trees
#
# treescrn3
treescrn.overlay(treescrn3)
# balls
treescrn3.addch(4, 18, ord(' '))
treescrn3.addch(7, 6, ord(' '))
treescrn3.addch(8, 19, ord(' '))
treescrn3.addch(11, 22, ord(' '))
# star
treescrn3.addch(0, 12, ord('*'))
# strng1
treescrn3.addch(3, 11, ord(' '))
# strng2
treescrn3.addch(5, 13, ord(' '))
treescrn3.addch(6, 10, ord(' '))
# strng3
treescrn3.addch(7, 16, ord(' '))
treescrn3.addch(7, 14, ord(' '))
# strng4
treescrn3.addch(10, 13, ord(' '))
treescrn3.addch(10, 10, ord(' '))
treescrn3.addch(11, 8, ord(' '))
# strng5
treescrn3.addch(11, 18, ord(' '))
treescrn3.addch(12, 13, ord(' '))
# treescrn4
treescrn.overlay(treescrn4)
# balls
treescrn4.addch(3, 9, ord(' '))
treescrn4.addch(4, 16, ord(' '))
treescrn4.addch(7, 6, ord(' '))
treescrn4.addch(8, 19, ord(' '))
treescrn4.addch(11, 2, ord(' '))
treescrn4.addch(12, 23, ord(' '))
# star
treescrn4.standout()
treescrn4.addch(0, 12, ord('*'))
treescrn4.standend()
# strng1
treescrn4.addch(3, 13, ord(' '))
# strng2
# strng3
treescrn4.addch(7, 15, ord(' '))
treescrn4.addch(8, 11, ord(' '))
# strng4
treescrn4.addch(9, 16, ord(' '))
treescrn4.addch(10, 12, ord(' '))
treescrn4.addch(11, 8, ord(' '))
# strng5
treescrn4.addch(11, 18, ord(' '))
treescrn4.addch(12, 14, ord(' '))
# treescrn5
treescrn.overlay(treescrn5)
# balls
treescrn5.addch(3, 15, ord(' '))
treescrn5.addch(10, 20, ord(' '))
treescrn5.addch(12, 1, ord(' '))
# star
treescrn5.addch(0, 12, ord(' '))
# strng1
treescrn5.addch(3, 11, ord(' '))
# strng2
treescrn5.addch(5, 12, ord(' '))
# strng3
treescrn5.addch(7, 14, ord(' '))
treescrn5.addch(8, 10, ord(' '))
# strng4
treescrn5.addch(9, 15, ord(' '))
treescrn5.addch(10, 11, ord(' '))
treescrn5.addch(11, 7, ord(' '))
# strng5
treescrn5.addch(11, 17, ord(' '))
treescrn5.addch(12, 13, ord(' '))
# treescrn6
treescrn.overlay(treescrn6)
# balls
treescrn6.addch(6, 7, ord(' '))
treescrn6.addch(7, 18, ord(' '))
treescrn6.addch(10, 4, ord(' '))
treescrn6.addch(11, 23, ord(' '))
# star
treescrn6.standout()
treescrn6.addch(0, 12, ord('*'))
treescrn6.standend()
# strng1
# strng2
treescrn6.addch(5, 11, ord(' '))
# strng3
treescrn6.addch(7, 13, ord(' '))
treescrn6.addch(8, 9, ord(' '))
# strng4
treescrn6.addch(9, 14, ord(' '))
treescrn6.addch(10, 10, ord(' '))
treescrn6.addch(11, 6, ord(' '))
# strng5
treescrn6.addch(11, 16, ord(' '))
treescrn6.addch(12, 12, ord(' '))
# treescrn7
treescrn.overlay(treescrn7)
# balls
treescrn7.addch(3, 15, ord(' '))
treescrn7.addch(6, 7, ord(' '))
treescrn7.addch(7, 18, ord(' '))
treescrn7.addch(10, 4, ord(' '))
treescrn7.addch(11, 22, ord(' '))
# star
treescrn7.addch(0, 12, ord('*'))
# strng1
treescrn7.addch(3, 12, ord(' '))
# strng2
treescrn7.addch(5, 13, ord(' '))
treescrn7.addch(6, 9, ord(' '))
# strng3
treescrn7.addch(7, 15, ord(' '))
treescrn7.addch(8, 11, ord(' '))
# strng4
treescrn7.addch(9, 16, ord(' '))
treescrn7.addch(10, 12, ord(' '))
treescrn7.addch(11, 8, ord(' '))
# strng5
treescrn7.addch(11, 18, ord(' '))
treescrn7.addch(12, 14, ord(' '))
look_out(150)
reindeer()
w_holiday.touchwin()
w_holiday.refresh()
w_del_msg.refresh()
look_out(500)
for i in range(0, 20):
blinkit()
curses.wrapper(main)
|
<gh_stars>1-10
#!/usr/bin/env python3
import optparse
import os
import sys
from distutils.spawn import find_executable
import chpl_platform, chpl_locale_model, overrides
from utils import error, memoize, warning
#
# If we can't find a file $CHPL_HOME/make/Makefile.<compiler_val>,
# that suggests that this is a compiler that we're not familiar with.
# In practice, this will cause our Makefiles to use defaults like CC
# and CXX to compile things, for better or worse.
#
@memoize
def validate_compiler(compiler_val):
if compiler_val != 'llvm':
import chpl_home_utils
chpl_home = chpl_home_utils.get_chpl_home()
comp_makefile = os.path.join(chpl_home, 'make', 'compiler', 'Makefile.{0}'.format(compiler_val))
if not os.path.isfile(comp_makefile):
warning('Unknown compiler: "{0}"'.format(compiler_val))
@memoize
def get_prgenv_compiler():
platform_val = chpl_platform.get('target')
if platform_val.startswith('cray-x') or platform_val == 'hpe-cray-ex':
subcompiler = os.environ.get('PE_ENV', 'none')
if subcompiler != 'none':
return "cray-prgenv-{0}".format(subcompiler.lower())
else:
warning("Compiling on {0} without a PrgEnv loaded".format(platform_val))
return 'none'
# Don't use CC / CXX to set other variables if any of
# CHPL_HOST_COMPILER
# CHPL_HOST_CC
# CHPL_HOST_CXX
# CHPL_TARGET_COMPILER
# CHPL_TARGET_CC
# CHPL_TARGET_CXX
# are overridden by the user (in config file or env vars).
#
# Additionally, for the target compiler, don't use CC / CXX
# if we would like to default to LLVM.
@memoize
def should_consider_cc_cxx(flag):
default_llvm = default_to_llvm(flag)
if default_llvm:
return False
if (overrides.get('CHPL_HOST_COMPILER') != None or
overrides.get('CHPL_HOST_CC') != None or
overrides.get('CHPL_HOST_CXX') != None or
overrides.get('CHPL_TARGET_COMPILER') != None or
overrides.get('CHPL_TARGET_CC') != None or
overrides.get('CHPL_TARGET_CXX') != None):
# A compilation configuration setting was adjusted,
# so require CHPL_HOST_CC etc rather than using CC
return False
if flag == 'target' and get_prgenv_compiler() != 'none':
# On an XC etc with a PrgEnv compiler,
# setting CC/CXX should only impact the host compiler.
return False
return True
# Figures out the compiler family (e.g. gnu) from the CC/CXX enviro vars
# Returns '' if CC / CXX are not set and 'unknown' if they are set
# to something too complex.
@memoize
def get_compiler_from_cc_cxx():
cc_compiler = 'unknown'
cxx_compiler = 'unknown'
warn = False
compiler_val = 'unknown'
cc_val = overrides.get('CC', '')
cxx_val = overrides.get('CXX', '')
if cc_val == '' and cxx_val == '':
return ''
if cc_val:
cc_compiler = get_compiler_from_command(cc_val)
if cxx_val:
cxx_compiler = get_compiler_from_command(cxx_val)
if cc_val and cxx_val:
if cc_compiler == cxx_compiler:
compiler_val = cc_compiler
else:
error("Conflicting compiler families for CC and CXX settings\n"
" {0} -> {1}\n"
" {2} -> {3}\n"
"Set CHPL_HOST_COMPILER and CHPL_TARGET_COMPILER to the "
"desired compiler family".format(cc_val, cc_compiler,
cxx_val, cxx_compiler))
compiler_val = 'unknown'
else:
# if we get here, CC or CXX is provided, but not both.
# Usually we warn in that case.
# Check to see if the command name matches the default
# for the compiler family.
# In that event, omit the warning.
if cc_val:
compiler_val = cc_compiler
warn = (get_compiler_name_c(compiler_val) != cc_val)
if cxx_val:
compiler_val = cxx_compiler
warn = (get_compiler_name_cxx(compiler_val) != cxx_val)
if compiler_val == 'unknown':
error("Could not infer CHPL_TARGET_COMPILER from "
"CC={0} CXX={1}".format(cc_val, cxx_val));
else:
if warn and cc_val:
error('CC is set but not CXX -- please set both\n')
if warn and cxx_val:
error('CXX is set but not CC -- please set both\n')
return compiler_val
# Returns True if the compiler defaults to LLVM
def default_to_llvm(flag):
ret = False
if flag == 'target':
import chpl_llvm
has_llvm = chpl_llvm.get()
if has_llvm == 'bundled' or has_llvm == 'system':
# Default to CHPL_TARGET_COMPILER=llvm when CHPL_LLVM!=none
ret = True
return ret
@memoize
def get(flag='host'):
if flag == 'host':
compiler_val = overrides.get('CHPL_HOST_COMPILER', '')
elif flag == 'target':
compiler_val = overrides.get('CHPL_TARGET_COMPILER', '')
else:
error("Invalid flag: '{0}'".format(flag), ValueError)
default_llvm = False
if not compiler_val:
default_llvm = default_to_llvm(flag)
# If allowable, look at CC/CXX
if should_consider_cc_cxx(flag):
compiler_val = get_compiler_from_cc_cxx()
if compiler_val:
validate_compiler(compiler_val)
return compiler_val
prgenv_compiler = get_prgenv_compiler()
if default_llvm:
compiler_val = 'llvm'
elif prgenv_compiler != 'none':
# The cray platforms are a special case in that we want to
# "cross-compile" by default. (the compiler is different between host
# and target, but the platform is the same).
if flag == 'host':
compiler_val = 'gnu'
else:
compiler_val = prgenv_compiler
else:
platform_val = chpl_platform.get(flag)
locale_model_val = chpl_locale_model.get()
# Normal compilation (not "cross-compiling")
# inherit the host compiler if the target compiler is not set and
# the host and target platforms are the same
if flag == 'target':
if chpl_platform.get('host') == platform_val:
compiler_val = get('host')
elif platform_val.startswith('pwr'):
compiler_val = 'ibm'
elif platform_val == 'darwin' or platform_val == 'freebsd':
if find_executable('clang'):
compiler_val = 'clang'
else:
compiler_val = 'gnu'
elif locale_model_val == 'gpu':
if find_executable('clang'):
compiler_val = 'clang'
else:
error("clang not found. The 'gpu' locale model is supported "
"with clang only.")
else:
compiler_val = 'gnu'
validate_compiler(compiler_val)
return compiler_val
@memoize
def get_path_component(flag='host'):
val = get(flag=flag)
if val == 'clang' and flag == 'target':
import chpl_llvm
has_llvm = chpl_llvm.get()
if has_llvm == 'bundled':
# selecting the included clang - distinguish that
# with 'llvm' in the path component.
val = 'llvm'
return get(flag)
# This array consists of tuples of
# ( family-name, c-compilation-command-name, c++compilation-command-name)
# where family-name corresponds to CHPL_TARGET_COMPILER etc settings e.g. gnu.
# This table only includes the cases where it is reasonable to
# infer the family from the command name.
COMPILERS = [ ('gnu', 'gcc', 'g++'),
('clang', 'clang', 'clang++'),
('ibm', 'xlc', 'xlC'),
('intel', 'icc', 'icpc'),
('pgi', 'pgicc', 'pgc++') ]
# given a compiler command string, (e.g. "gcc" or "/path/to/clang++"),
# figure out the compiler family (e.g. gnu or clang),
# and the C and C++ variants of that command
def get_compiler_from_command(command):
# the following adjustments are to handle a command like
# /path/to/gcc-10 --some-option
# where we are looking for just the 'gcc' part.
first = command.split()[0]
basename = os.path.basename(first)
name = basename.split('-')[0].strip()
for tup in COMPILERS:
if name == tup[1] or name == tup[2]:
return tup[0]
# if it was not one of the above cases we don't know how to
# go from the command name to the compiler family.
# E.g. cc/CC/mpicc could be many compilers.
#
# We could consider trying to run it to figure it out.
return 'unknown'
def get_compiler_name_c(compiler):
for tup in COMPILERS:
if compiler == tup[0]:
return tup[1]
# handle special cases not in the COMPILERS table
if compiler_is_prgenv(compiler):
return 'cc'
elif compiler in ['llvm', 'allinea']:
return 'clang'
elif compiler == 'mpi-gnu':
return 'mpicc'
elif 'gnu' in compiler:
return 'gcc'
return 'unknown-c-compiler'
def get_compiler_name_cxx(compiler):
for tup in COMPILERS:
if compiler == tup[0]:
return tup[2]
# handle special cases not in the COMPILERS table
if compiler_is_prgenv(compiler):
return 'CC'
elif compiler in ['llvm', 'allinea']:
return 'clang++'
elif compiler == 'mpi-gnu':
return 'mpicxx'
elif 'gnu' in compiler:
return 'g++'
return 'unknown-c++-compiler'
def compiler_is_prgenv(compiler_val):
return compiler_val.startswith('cray-prgenv')
# flag should be host or target
# lang should be c or cxx (aka c++)
# this function returns an array of arguments
# e.g. ['clang', '--gcc-toolchain=/usr']
@memoize
def get_compiler_command(flag, lang):
flag_upper = flag.upper()
lang_upper = lang.upper()
if lang_upper == 'C++':
lang_upper = 'CXX'
elif lang_upper == 'C':
lang_upper = 'CC'
if flag_upper == 'HOST' or flag_upper == 'TARGET':
pass
else:
error('unknown flag {0}'.format(flag))
if lang_upper == 'CC' or lang_upper == 'CXX':
pass
else:
error('unknown lang {0}'.format(lang))
# construct CHPL_HOST_CC / CHPL_TARGET_CXX etc
varname = 'CHPL_' + flag_upper + '_' + lang_upper
command = overrides.get(varname, '');
if command:
return command.split()
compiler_val = get(flag=flag)
# If other settings allow it, look also at CC/CXX.
if should_consider_cc_cxx(flag):
cc_cxx_val = overrides.get(lang_upper, '')
if cc_cxx_val:
return cc_cxx_val.split()
if lang_upper == 'CC':
command = [get_compiler_name_c(compiler_val)]
elif lang_upper == 'CXX':
command = [get_compiler_name_cxx(compiler_val)]
# Adjust the path in two situations:
# CHPL_TARGET_COMPILER=llvm -- means use the selected llvm/clang
# CHPL_TARGET_COMPILER=clang with CHPL_LLVM=bundled -- use bundled clang
if compiler_val == 'clang' or compiler_val == 'llvm':
import chpl_llvm
llvm_val = chpl_llvm.get()
if llvm_val == 'none' and compiler_val == 'llvm':
error("Cannot use CHPL_TARGET_COMPILER=llvm when CHPL_LLVM=none")
if llvm_val == 'bundled' or compiler_val == 'llvm':
if (flag == 'host' and
llvm_val == 'bundled' and
compiler_val == 'clang'):
# don't change the prefix in this setting
# (bundled LLVM might not be built yet)
pass
else:
command = chpl_llvm.get_llvm_clang(lang_upper)
return command
# Returns any -I options needed to find bundled headers
#
# Can include other compiler args but *needs to work both
# for C and C++ compilation*.
#
# flag should be host or target.
# returns a Python list of -I flags
@memoize
def get_bundled_compile_args(flag):
paths = [ ]
# TODO - port over third-party arg gathering
return paths
# Returns any -I options needed for this compiler / system
# to find headers
#
# Can include other compiler args but *needs to work both
# for C and C++ compilation*.
#
# flag should be host or target.
# returns a Python list of -I flags
@memoize
def get_system_compile_args(flag):
platform_val = chpl_platform.get(flag)
compiler_val = get(flag)
paths = [ ]
# For PrgEnv compilation with LLVM, gather arguments from PrgEnv driver
if compiler_val == 'llvm' and flag == 'target':
import chpl_llvm
(comp_args, link_args) = chpl_llvm.get_clang_prgenv_args()
paths.extend(comp_args)
# FreeBSD uses /usr/local but compilers don't search there by default
if platform_val == 'freebsd':
paths.append('-I/usr/local/include')
# Add Homebrew include directory if Homebrew is installed
homebrew_prefix = chpl_platform.get_homebrew_prefix()
if homebrew_prefix:
paths.append('-I' + homebrew_prefix + '/include')
return paths
# Returns any -L options needed to find bundled libraries
#
# Can include other link args but *needs to work both
# for C and C++ compilation*.
#
# flag should be host or target.
# returns a Python list of -L flags
@memoize
def get_bundled_link_args(flag):
paths = [ ]
# TODO - port over third-party arg gathering
return paths
# Returns any -L options needed for this compiler / system
# to find libraries
#
# Can include other link args but *needs to work both
# for C and C++ compilation*.
#
# flag should be host or target.
# returns a Python list of -L flags
@memoize
def get_system_link_args(flag):
platform_val = chpl_platform.get(flag)
compiler_val = get(flag)
paths = [ ]
# For PrgEnv compilation with LLVM, gather arguments from PrgEnv driver
if compiler_val == 'llvm' and flag == 'target':
import chpl_llvm
(comp_args, link_args) = chpl_llvm.get_clang_prgenv_args()
paths.extend(link_args)
# FreeBSD uses /usr/local but compilers don't search there by default
if platform_val == 'freebsd':
paths.append('-L/usr/local/lib')
# Add Homebrew lib directory if Homebrew is installed
homebrew_prefix = chpl_platform.get_homebrew_prefix()
if homebrew_prefix:
paths.append('-L' + homebrew_prefix + '/lib')
return paths
def validate_inference_matches(flag, lang):
flag_upper = flag.upper()
lang_upper = lang.upper()
if lang_upper == 'C++':
lang_upper = 'CXX'
elif lang_upper == 'C':
lang_upper = 'CC'
compiler = get(flag)
cmd = get_compiler_command(flag, lang)
inferred = get_compiler_from_command(cmd[0])
if (inferred != 'unknown' and
inferred != compiler and
not (compiler == 'llvm' and inferred == 'clang')):
error("Conflicting compiler families: "
"CHPL_{0}_COMPILER={1} but CHPL_{0}_{2}={3} but has family {4}"
.format(flag_upper, compiler, lang_upper, cmd, inferred))
return False
return True
# Issue an error if, after all the various inferences are done,
# CHPL_HOST_CC / CXX is inconsintent with CHPL_HOST_COMPILER
# and similarly for TARGET variants.
@memoize
def validate_compiler_settings():
validate_inference_matches('host', 'c')
validate_inference_matches('host', 'c++')
validate_inference_matches('target', 'c')
validate_inference_matches('target', 'c++')
def _main():
parser = optparse.OptionParser(usage='usage: %prog [--host|target])')
parser.add_option('--host', dest='flag', action='store_const',
const='host', default='host')
parser.add_option('--target', dest='flag', action='store_const',
const='target')
(options, args) = parser.parse_args()
compiler_val = get(options.flag)
sys.stdout.write("{0}\n".format(compiler_val))
if __name__ == '__main__':
_main()
|
import numpy as np
import numpy.ma as ma
import logging
#Own imports
from . import inout
class MicroscopeData:
"""Retrieves and stores the tile set and corner coordinates of each
tile from raw coordinate input.
Parameters:
-----------
y_flip: bool
It is assumed that the microscope
coordinates have their origin in right top of the image.
During the stitching we use the left, top as the origin.
Therefore the x-coordinates will be inverted by the
normalization. But not the y-coords.
The y_flip variable is designed for the cases where the
microscope sequence is inverted in the y-direction. When
set to True the y-coordinates will also be inverted
before determining the tile set.
x_coords: np.array
Array of x coordinates, will be loaded
as raw microscope coordinates in um by init() and
converted to pixels and normalized to start at zero by
normalize_coords().
y_coords: np.array
Array of y coordinates, will be loaded
as raw microscope coordinates in um by init() and
converted to pixels and normalized to start at zero by
normalize_coords().
z_coords: np.array
Array of z coordinates, will be loaded
as raw microscope coordinates in um by init() and
converted to pixels and normalized to start at zero by
normalize_coords().
tile_nr: np.array
The numbers of the tiles as found in coord_data.
tile_set: np.array
Array with the same shape as the
tile set, values indicate the index at which the
corresponing tile is found in tiles and tile_nr.
running_av: float
The estimated overlap between two
neighbouring tiles in pixels, adapted for the tiles we
already know the placement and overlap of.
logger: logger
logger instance
"""
def __init__(self, coord_data, y_flip, nr_dim):
""" Create looger, read-in the coordinates and tile numbers,
create empty tile set.
The tile numbers refer to the number in the file name of the
image, they identify which image file belongs to which tile.
Parameters:
-----------
coord_data: dict
Dictionary with for each tile a list with
x, y and z coordinate, where the key is the tile
number.
utils.experimental_metadata_parser() outputs such a
dictionary when reading a file with microscope
coordinates.
y_flip: bool
Designed for the cases where the
microscope sequence is inverted in the
y-direction. When set to True the y-coordinates will
be inverted before determining the tile set. The
x-direction is assumed to be always inverted.
nr_dim: int
Valid values: 2 and 3. The number of
dimension the loaded tiles and stitched image will have.
"""
#Create logger
self.logger = logging.getLogger(__name__)
#Set flag for y flip:
self.y_flip = y_flip
#Extract coordinates
self.x_coords = []
self.y_coords = []
self.z_coords = []
self.tile_nr = []
# Do not substract 1 anymore to compensate for difference in
# filenames and image numbers in coordinates file
self.logger.debug('Reading coord_data:')
for key, row in coord_data.items():
self.logger.debug('key: {} row: {}'.format(key, row))
self.x_coords.append(row[0])
self.y_coords.append(row[1])
self.z_coords.append(row[2])
self.tile_nr.append(int(key))
#Make sure everything is np arrays
self.x_coords = np.array(self.x_coords)
self.y_coords = np.array(self.y_coords)
self.z_coords = np.array(self.z_coords)
self.tile_nr = np.array(self.tile_nr)
self.logger.debug(("Parsed microscope data:\n tile_nr: {} \n "
+ "x: {} \n y: {}")
.format(self.tile_nr, self.x_coords,
self.y_coords))
#self.logger.debug("x, y coord and tile nr: {} {} {}"
# .format(self.x_coords, self.y_coords, self.tile_nr))
#Init tile set
self.tile_set = []
def __getstate__(self):
"""Causes objects to be pickled without the logger attribute"""
class_dict = dict(self.__dict__)
del class_dict['logger']
return class_dict
def __setstate__(self, state):
"""Restores the logger atribute when unpickling the object"""
# Restore instance attributes (i.e., filename and lineno).
self.__dict__.update(state)
# Recreate the logger file, that has not been pickled
self.logger = logging.getLogger(__name__)
def normalize_coords(self, pixel_size):
"""Normalize the coordinates and put the origin in upper left
corner.
Takes list with absolute index, x, y values from microscope
and normalizes, converts and inverts them to get the coordinates
in pixels with the origin in left upper corner.
NB:In the new microscope setup the origin is in the bottom right
corner.
Parameters:
-----------
pixel_size: float
The size of one pixel in the microscope
image, used to convert the microscope coordinates to pixels.
"""
self.logger.info("Normalizing coordinates")
# Convert to pixels
self.x_coords /= pixel_size
self.y_coords /= pixel_size
# Commented out because in the new system the origin is in the bottom right
# Flip the coordinate system to get tiles in right order (0,0 is left top)
# OLD SYSTEM
# self.x_coords *= -1
# if self.y_flip:
# self.y_coords *= -1
# # Normalization to get postive coordinates
# x_min = np.amin(self.x_coords)
# self.x_coords -= x_min
# y_min = np.amin(self.y_coords)
# self.y_coords -= y_min
# self.x_coords *=1
# self.y_coords *=1
# if self.y_flip:
# self.y_coords *= -1
# # NEW SYSTEM
# Normalization to get postive coordinates
x_min = np.amin(self.x_coords)
self.x_coords -= x_min
y_min = np.amin(self.y_coords)
self.y_coords -= y_min
x_max=np.amax(self.x_coords)
self.x_coords -= x_max
self.x_coords=np.abs(self.x_coords)
y_max=np.amax(self.y_coords)
self.y_coords -= y_max
self.y_coords=np.abs(self.y_coords)
self.logger.debug(('Normalized microscope data:\n tile_nr: {} '
+ '\n x: {} \n y: {}')
.format(self.tile_nr, self.x_coords,
self.y_coords))
def make_tile_set(self, est_x_tol, nr_pixels, row_tol = None):
"""Based on the coordinates find a tile set.
Use the coordinates to produce a tile set that has the shape
of the grid the tiles should be placed on and for each tile in
this grid gives the index for the tile_nr, x_coords and
y_coords.
Plots the coordinates when plot_avaible == True in inout.
Parameters:
-----------
est_x_tol: int
The estimated difference in the x
direction between the corners of two
neighbouring tiles in pixels
nr_pixels: int
Needed to estimate the distance between two separate tiles
row_tol: int
Row tolerance in pixels: The distance
between y coordinates above which a tile is
considered to belong to a new row. Default is None,
which leads to 0.5 * nr_pixels.
"""
self.logger.info("Making tile set")
# If row_tolerance in not passed; set the row tolerance
# according to number of pixels:
if row_tol is None:
row_tol = 0.5 * nr_pixels
self.logger.debug("Row tolereance: {}".format(row_tol))
# Pre-sort y-coords
coord_inds = np.arange(0,len(self.tile_nr))
sorting_inds = np.argsort(self.y_coords)
sorted_coord_inds = coord_inds[sorting_inds]
sorted_y = self.y_coords[sorting_inds]
cur_row = [sorted_coord_inds[0]]
# Sort into rows, according to y-coordinates:
self.logger.debug("Finding rows...")
for i in range(1,len(sorted_y)):
# Check the difference with the neighbour
if abs(sorted_y[i - 1] - sorted_y[i]) > row_tol:
self.tile_set.append(list(cur_row))
self.logger.debug("Added row: {} ".format(cur_row))
cur_row = [sorted_coord_inds[i]]
else:
cur_row.append(sorted_coord_inds[i])
# Dump leftover in last row of tile_set
self.tile_set.append(list(cur_row))
self.logger.debug('Added last row: {}'.format(cur_row))
# Sort each row according to x-coordinates
# Initialize
self.logger.debug("Sorting within rows...")
self.running_av = est_x_tol
x_max = max(self.x_coords) + nr_pixels
nr_col = int(np.around(x_max / est_x_tol))
self.logger.debug(("Estimated x-size of final picture: {}, "
+ "Number of expected collumns in tile "
+ "set: {}")
.format(x_max, nr_col))
av_counter = 1
# Sort each row in tile_set
for i in range(len(self.tile_set)):
# Convert to np-array
self.tile_set[i] = np.array(self.tile_set[i])
# Sort row
sorting_inds = np.argsort(self.x_coords[self.tile_set[i]])
self.logger.debug(("Row {}. "
+ "Sorted x-coordinates:\n {}")
.format(i, self.x_coords[self.tile_set[
i]][sorting_inds]))
# Local variable for easy adjustmet of sorted tiles
sorted_tiles = self.tile_set[i][sorting_inds]
#self.logger.debug("Current sorted tiles {}".format(
# sorted_tiles))
# Make local copy with 0 appended, for easy adjustment of sorted x coordinates
sorted_x = np.concatenate(([0.0], self.x_coords[self.tile_set[i]][sorting_inds]))
# Check for missing tiles:
# Get distance between the tiles
diff_x = abs(sorted_x[1:] - sorted_x[:-1])
self.logger.debug("Row {}. Distance between tiles:\n {}"
.format(i, diff_x))
# Update the average distance between directly adjacent tiles
neighbouring_tiles = np.argwhere(diff_x[1:] < nr_pixels)
if neighbouring_tiles.any():
#self.logger.debug("running av calculated with {}".format(diff_x[1:][neighbouring_tiles]))
#self.logger.debug("running av calculated with {}".format(np.mean(diff_x[1:][neighbouring_tiles])))
av_counter = 2
self.running_av = (self.running_av
+ np.mean(diff_x[1:][neighbouring_tiles])) \
/ av_counter
self.logger.debug(("Row {}. Running average of estimated "
+ "overlap: {}")
.format(i, self.running_av))
# Find missing tiles (ignore the first tile, this one is checked later)
missing_tiles = np.argwhere(diff_x[1:] > nr_pixels)
missing_tiles = [tile[0] + 1 for tile in missing_tiles]
self.logger.debug("Row {}. Missing tiles: {}".format(i,
missing_tiles))
# Check how many tiles are missing and insert substituting value: -1
for diff_ind in missing_tiles:
#self.logger.debug("Difference {}".format(diff_x[diff_ind]))
nr_missing_tiles = int(np.around(diff_x[diff_ind] / self.running_av) - 1)
ins_value = np.full(nr_missing_tiles, -1, dtype = int)
#self.logger.debug("inserting missing tile at index {}".format(diff_ind))
sorted_tiles = np.insert(sorted_tiles, diff_ind, ins_value)
#self.logger.debug("inserting missing tile, tiles: {}".format(sorted_tiles))
# Check for missing tiles at the start insert substituting value: -1
if diff_x[0] > row_tol:
nr_missing_tiles = int(np.around(diff_x[0] / self.running_av))
ins_value = np.full(nr_missing_tiles, -1, dtype = int)
sorted_tiles = np.insert(sorted_tiles, 0, ins_value)
# Check if this row is as long as the rest, if it not, assume
# that tiles are missing at the end
# and insert substituting value: -1
if len(sorted_tiles) < nr_col:
nr_missing_tiles = nr_col - len(sorted_tiles)
ins_value = np.full(nr_missing_tiles, -1, dtype = int)
sorted_tiles = np.append(sorted_tiles, ins_value)
# Mask the substituting value
sorted_tiles = ma.masked_equal(sorted_tiles, -1)
self.logger.debug("Row {}. Current sorted tiles {}".format(
i, sorted_tiles))
# Add to tile set:
self.tile_set[i] = sorted_tiles
# Mask the tile set
self.tile_set = ma.array(self.tile_set)
#self.tile_set = np.array(self.tile_set)
# Logging and plot to check
self.logger.info("Tile set:\n {}".format(self.tile_set))
self.logger.info("Tile set shape: {}".format(
self.tile_set.shape))
self.logger.info("Tile numbers:\n {}".format( self.tile_nr))
inout.plot_coordinates(self, invert_yaxis=False)
def check_tile_set(self, est_x_tol, nr_pixels, row_tol = None):
"""Check if the estimated overlap between tiles is close enough
to the running average.
Parameters:
-----------
est_x_tol: int
The distance between two tiles in the
x-direction as estimated before reading all coordinates.
nr_pixels: int
The the width of the tile in pixels.
row_tol: int
The distance between y coordinates
above which a tile is considered to belong to a
new row, this value is passed directly to
make_tile_set. (default = None)
"""
x_max = max(self.x_coords)
nr_col = int(np.around(x_max / est_x_tol))
if ((abs(est_x_tol - self.running_av) > 1000)
or not(self.tile_set.shape[1] == nr_col)):
self.make_tile_set(self.running_av, nr_pixels,
row_tol = row_tol)
|
<gh_stars>10-100
# Distributed under MIT License
# Copyright (c) 2021 <NAME>
# historically based on :
# https://github.com/robert-hh/FTP-Server-for-ESP8266-ESP32-and-PYBD/blob/master/ftp.py
# but I have modified a lot, there must still be some original functions.
""" Ftp server implementation core class """
import socket
import os
import time
import uos
from server import stream
from server.server import Server
from server.user import User
from wifi.accesspoint import AccessPoint
from wifi.station import Station
from tools import useful, fnmatch
MONTHS = [b"Jan", b"Feb", b"Mar", b"Apr", b"May", b"Jun", b"Jul", b"Aug", b"Sep", b"Oct", b"Nov", b"Dec"]
class FtpServerCore:
""" Ftp implementation server core """
portbase = [12345]
def __init__(self):
""" Ftp constructor method """
self.portbase[0] += 1
self.dataport = self.portbase[0]
self.pasvsocket = None
self.addr = b""
self.user = b""
self.password = b""
self.path = b""
self.cwd = b"/"
self.fromname = None
if useful.ismicropython():
self.root = b""
self.path_length = 64
else:
self.root = useful.tobytes(os.getcwd() + "/")
self.root = b"/Users/remi/Downloads/ftp/"
self.path_length = 256
self.command = b""
self.payload = b""
self.datasocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.datasocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.datasocket.bind(socket.getaddrinfo("0.0.0.0", self.dataport)[0][4])
self.datasocket.listen(1)
self.datasocket.settimeout(None)
self.data_addr = None
self.quit = None
self.received = None
self.remoteaddr = None
self.client = None
useful.syslog(b"[FTP] Open data %d"%self.dataport)
def get_ip(self):
""" Get the ip address of the board """
if Station.is_ip_on_interface(self.remoteaddr):
result = useful.tobytes(Station.get_info()[0])
else:
result = useful.tobytes(AccessPoint.get_info()[0])
return result
def close(self):
""" Close all ftp connections """
self.close_pasv()
if self.datasocket:
self.datasocket.close()
self.datasocket = None
def __del__(self):
""" Destroy ftp instance """
self.close()
def get_file_description(self, filename, typ, size, date, now, full):
""" Build list of file description """
if full:
file_permissions = b"drwxr-xr-x" if (typ & 0xF000 == 0x4000) else b"-rw-r--r--"
d = time.localtime(date)
year,month,day,hour,minute,_,_,_ = d[:8]
if year != now[0] and month != now[1]:
file_date = b"%s %2d %4d"%(MONTHS[month-1], day, year)
else:
file_date = b"%s %2d %02d:%02d"%(MONTHS[month-1], day, hour, minute)
description = b"%s 1 owner group %10d %s %s\r\n"%(file_permissions, size, file_date, useful.tobytes(filename))
else:
description = useful.tobytes(filename) + b"\r\n"
return description
def send_file_list_with_pattern(self, path, stream_, full, now, pattern=None):
""" Send the list of file with pattern """
description = b""
quantity = 0
counter = 0
for fileinfo in uos.ilistdir(useful.tostrings(path)):
filename = fileinfo[0]
typ = fileinfo[1]
if len(fileinfo) > 3:
size = fileinfo[3]
else:
size = 0
if pattern is None:
accepted = True
else:
accepted = fnmatch(useful.tostrings(filename), useful.tostrings(pattern))
if accepted:
if quantity > 100:
date = 0
else:
sta = (0,0,0,0,0,0,0,0,0)
try:
# If it is a file
if not (typ & 0xF000 == 0x4000):
sta = useful.fileinfo(useful.tostrings(useful.abspathbytes(path,useful.tobytes(filename))))
except Exception:
pass
date = sta[8]
description += self.get_file_description(filename, typ, size, date, now, full)
counter += 1
if counter == 20:
counter = 0
stream_.write(description)
description = b""
quantity += 1
if description != b"":
stream_.write(description)
def send_file_list(self, path, stream_, full):
""" Send the list of file """
now = useful.now()
try:
self.send_file_list_with_pattern(path, stream_, full, now)
except Exception as err:
useful.syslog(err)
pattern = path.split(b"/")[-1]
path = path[:-(len(pattern) + 1)]
if path == b"":
path = b"/"
self.send_file_list_with_pattern(path, stream_, full, now, pattern)
async def send_ok(self):
""" Send ok to ftp client """
await self.send_response(250,b"OK")
async def send_response(self, code, message):
""" Send response to ftp client """
useful.syslog(b"[FTP] %d %s"%(code, message))
await self.client.write(b'%d %s\r\n'%(code,message))
async def send_error(self, err):
""" Send error to ftp client """
showError = False
if type(err) != type(b""):
if useful.ismicropython():
if type(err) != type(OSError):
showError = True
else:
if isinstance(err,FileNotFoundError) or isinstance(err,NotADirectoryError):
showError = False
else:
showError = True
if showError:
useful.syslog(err, msg=b"[FTP] cmd='%s' cwd='%s' root='%s' path='%s' payload='%s'"%(self.command, self.cwd, self.root, self.path, self.payload))
await self.send_response(550, b"Failed")
async def USER(self):
""" Ftp command USER """
if User.get_user() == b"":
await self.send_response(230, b"User Logged In.")
else:
self.user = self.path[1:]
await self.send_response(331, b"User known, enter password")
async def PASS(self):
""" Ftp command PASS """
self.password = self.path[1:]
if User.check(self.user, self.password, False):
await self.send_response(230, b"Logged in.")
else:
await self.send_response(430, b"Invalid username or password")
async def SYST(self):
""" Ftp command SYST """
await self.send_response(215, b"UNIX Type: L8")
async def NOOP(self):
""" Ftp command NOOP """
await self.send_response(200, b"OK")
async def FEAT(self):
""" Ftp command FEAT """
await self.send_response(211, b"no-features")
async def XPWD(self):
""" Ftp command XPWD """
await self.PWD()
async def PWD(self):
""" Ftp command PWD """
await self.send_response(257,b'"%s" is current directory.'%self.cwd)
async def XCWD(self):
""" Ftp command XCWD """
await self.CWD()
async def CWD(self):
""" Ftp command CWD """
if len(self.path) <= self.path_length:
try:
dd = os.listdir(useful.tostrings(self.root + self.path))
self.cwd = self.path
await self.send_response(250,b"CWD command successful.")
except Exception as err:
useful.syslog(err)
await self.send_error(b"Path not existing")
else:
await self.send_error(b"Path too long")
async def CDUP(self):
""" Ftp command CDUP """
self.cwd = useful.abspathbytes(self.cwd, b"..")
await self.send_ok()
async def TYPE(self):
""" Ftp command TYPE """
await self.send_response(200, b"Binary transfer mode active.")
async def SIZE(self):
""" Ftp command SIZE """
size = useful.filesize(useful.tostrings(self.root + self.path))
await self.send_response(213, b"%d"%(size))
async def PASV(self):
""" Ftp command PASV """
await self.send_response(227, b"Entering Passive Mode (%s,%d,%d)"%(self.addr.replace(b'.',b','), self.dataport>>8, self.dataport%256))
self.close_pasv()
self.pasvsocket, self.data_addr = self.datasocket.accept()
useful.syslog(b"[FTP] PASV Accepted")
async def PORT(self):
""" Ftp command PORT """
items = self.payload.split(b",")
if len(items) >= 6:
self.data_addr = b'.'.join(items[:4])
if self.data_addr == b"127.0.1.1":
self.data_addr = self.remoteaddr
self.dataport = int(items[4]) * 256 + int(items[5])
self.close_pasv()
self.pasvsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.pasvsocket.settimeout(1000)
self.pasvsocket.connect((self.data_addr, self.dataport))
useful.syslog("[FTP] Data connection with: %s"%useful.tostrings(self.data_addr))
await self.send_response(200, b"OK")
else:
await self.send_response(504, b"Fail")
async def NLST(self):
""" Ftp command NLST """
await self.LIST()
async def LIST(self):
""" Ftp command LIST """
if not self.payload.startswith(b"-"):
place = self.path
else:
place = self.cwd
await self.send_response(150, b"Connection accepted.") # Start list files
listsocket = stream.Socket(self.pasvsocket)
useful.syslog("[FTP] List '%s'"%(useful.tostrings(self.root+place)))
self.send_file_list(self.root + place, listsocket, self.command == b"LIST" or self.payload == b"-l")
listsocket.close()
await self.send_response(226, b"Transfert complete.") # End list files
self.close_pasv()
async def STAT(self):
""" Ftp command STAT """
if self.payload == b"":
await self.send_response(211, b"Connected to (%s)"%self.remoteaddr[0])
await self.send_response(211, b"Data address (%s)"%self.addr)
await self.send_response(211, b"TYPE: Binary STRU: File MODE: Stream")
else:
await self.send_response(213,b"Directory listing:")
useful.syslog("[FTP] List '%s'"%useful.tostrings(self.root+self.path))
self.send_file_list(self.root + self.path, self.client, True)
await self.send_response(213, b"Stat end")
async def RETR(self):
""" Ftp command RETR """
await self.send_response(150, b"Start send file")
useful.syslog("[FTP] Send file '%s'"%useful.tostrings(self.root+self.path))
filename = self.root + self.path
if useful.ismicropython():
buffer_size = 1440
chunk = bytearray(buffer_size)
with open(useful.tostrings(filename), "r") as file:
length = file.readinto(chunk)
while length > 0:
# pylint: disable=no-member
sent = self.pasvsocket.write(chunk[:length])
length = file.readinto(chunk)
else:
with open(useful.tostrings(filename), "rb") as file:
self.pasvsocket.sendall(file.read())
await self.send_response(226, b"End send file")
self.close_pasv()
def close_pasv(self):
""" Close PASV connection """
if self.pasvsocket is not None:
useful.syslog(b"[FTP] Close PASV")
self.pasvsocket.close()
self.pasvsocket = None
def write_file(self, path, dataclient):
""" Write ftp received """
chunk = bytearray(1440)
with open(useful.tostrings(path), "wb") as file:
length = dataclient.readinto(chunk)
while length > 0:
file.write(chunk, length)
length = dataclient.readinto(chunk)
async def STOR(self):
""" Ftp command STOR """
await self.send_response(150, b"Start receive file")
useful.syslog("[FTP] Receive file '%s'"%useful.tostrings(self.root + self.path))
filename = self.root + self.path
if useful.ismicropython():
try:
self.write_file(filename, self.pasvsocket)
except Exception as err:
useful.syslog(err)
directory, file = useful.split(useful.tostrings(filename))
useful.makedir(directory, True)
self.write_file(filename, self.pasvsocket)
else:
with open(filename, "wb") as file:
data = b" "
while len(data) > 0:
data = self.pasvsocket.recv(1440)
file.write(data)
data = b""
await self.send_response(226, b"End receive file")
self.close_pasv()
async def DELE(self):
""" Ftp command DELE """
useful.syslog("[FTP] Delete '%s'"%useful.tostrings(self.root + self.path))
os.remove(useful.tostrings(self.root + self.path))
await self.send_ok()
async def XRMD(self):
""" Ftp command XRMD """
await self.RMD()
async def RMD(self):
""" Ftp command RMD """
os.rmdir(useful.tostrings(self.root + self.path))
await self.send_ok()
async def XMKD(self):
""" Ftp command XMKD """
await self.MKD()
async def MKD(self):
""" Ftp command MKD """
os.mkdir(useful.tostrings(self.root + self.path))
await self.send_ok()
async def RNFR(self):
""" Ftp command RNFR """
self.fromname = self.path
await self.send_response(350, b"Rename from")
async def RNTO(self):
""" Ftp command RNTO """
if self.fromname is not None:
useful.syslog("[FTP] Rename '%s' to '%s'"%(useful.tostrings(self.root + self.fromname), useful.tostrings(self.root + self.path)))
os.rename(useful.tostrings(self.root + self.fromname), useful.tostrings(self.root + self.path))
await self.send_ok()
else:
await self.send_error(self.fromname)
self.fromname = None
async def QUIT(self):
""" Ftp command QUIT """
self.quit = True
await self.send_response(221, b"Bye.")
async def unsupported_command(self):
""" Ftp unknown command """
await self.send_response(502, b"Unsupported command")
async def receive_command(self):
""" Ftp command reception """
Server.slow_down()
try:
self.received = await self.client.readline()
except Exception as err:
useful.syslog(err)
useful.syslog(b"[FTP] Reset connection")
self.quit = True
if len(self.received) <= 0:
self.quit = True
else:
self.received = self.received.rstrip(b"\r\n")
if useful.tobytes(self.received[:4]) == b"PASS":
message = b"PASS ????"
else:
message = self.received
self.command = self.received.split(b" ")[0].upper()
self.payload = self.received[len(self.command):].lstrip()
self.path = useful.abspathbytes(self.cwd, self.payload)
useful.syslog(b"[FTP] '%s' id=%08X cwd='%s' payload='%s' path='%s'"%(message, id(self), self.cwd, self.payload, self.path))
async def treat_command(self):
""" Treat ftp command """
Server.slow_down()
if self.quit is False:
try:
command = useful.tostrings(self.command)
if hasattr(self, command):
callback = getattr(self, command)
if self.command not in [b"USER",b"PASS"]:
if User.check(self.user, self.password):
await callback()
else:
await self.send_response(430, b"Invalid username or password")
else:
await callback()
else:
await self.unsupported_command()
except Exception as err:
useful.syslog(err)
await self.send_error(err)
async def on_connection(self, reader, writer):
""" Asyncio on ftp connection method """
Server.slow_down()
self.remoteaddr = useful.tobytes(writer.get_extra_info('peername')[0])
self.addr = self.get_ip()
useful.syslog("[FTP] Connected from %s"%useful.tostrings(self.remoteaddr))
self.client = stream.Stream(reader, writer)
try:
await self.send_response(220, b"Ftp " + useful.tobytes(os.uname()[4]) + b".")
self.quit = False
while self.quit is False:
await self.receive_command()
await self.treat_command()
except Exception as err:
useful.syslog(err)
await self.send_error(err)
finally:
self.close_pasv()
await self.client.close()
useful.syslog("[FTP] Disconnected")
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime
from typing import (
Any,
Dict,
Optional,
TYPE_CHECKING
)
from superset.db_engine_specs.base import BaseEngineSpec
from superset.utils import core as utils
from superset.sql_parse import Table
if TYPE_CHECKING:
# prevent circular imports
from superset.models.core import Database
class AthenaEngineSpec(BaseEngineSpec):
engine = "awsathena"
engine_name = "Amazon Athena"
_time_grain_expressions = {
None: "{col}",
"PT1S": "date_trunc('second', CAST({col} AS TIMESTAMP))",
"PT1M": "date_trunc('minute', CAST({col} AS TIMESTAMP))",
"PT1H": "date_trunc('hour', CAST({col} AS TIMESTAMP))",
"P1D": "date_trunc('day', CAST({col} AS TIMESTAMP))",
"P1W": "date_trunc('week', CAST({col} AS TIMESTAMP))",
"P1M": "date_trunc('month', CAST({col} AS TIMESTAMP))",
"P0.25Y": "date_trunc('quarter', CAST({col} AS TIMESTAMP))",
"P1Y": "date_trunc('year', CAST({col} AS TIMESTAMP))",
"P1W/1970-01-03T00:00:00Z": "date_add('day', 5, date_trunc('week', \
date_add('day', 1, CAST({col} AS TIMESTAMP))))",
"1969-12-28T00:00:00Z/P1W": "date_add('day', -1, date_trunc('week', \
date_add('day', 1, CAST({col} AS TIMESTAMP))))",
}
@classmethod
def convert_dttm(cls, target_type: str, dttm: datetime) -> Optional[str]:
tt = target_type.upper()
if tt == utils.TemporalType.DATE:
return f"from_iso8601_date('{dttm.date().isoformat()}')"
if tt == utils.TemporalType.TIMESTAMP:
datetime_formatted = dttm.isoformat(timespec="microseconds")
return f"""from_iso8601_timestamp('{datetime_formatted}')"""
return None
@classmethod
def epoch_to_dttm(cls) -> str:
return "from_unixtime({col})"
@staticmethod
def _mutate_label(label: str) -> str:
"""
Athena only supports lowercase column names and aliases.
:param label: Expected expression label
:return: Conditionally mutated label
"""
return label.lower()
@classmethod
def create_table_from_csv( # pylint: disable=too-many-arguments
cls,
filename: str,
table: Table,
database: "Database",
csv_to_df_kwargs: Dict[str, Any],
df_to_sql_kwargs: Dict[str, Any],
) -> None:
"""
Create table from contents of a csv. Note: this method does not create
metadata for the table.
"""
df = cls.csv_to_df(filepath_or_buffer=filename, **csv_to_df_kwargs)
engine = cls.get_engine(database)
if table.schema:
# only add schema when it is preset and non empty
df_to_sql_kwargs["schema"] = table.schema
if engine.dialect.dbapi.__name__ == "pyathena":
from pyathena.pandas.util import to_sql
with engine.connect() as conn:
pyathena_conn = conn.connection.connection
to_sql(
df,
conn=pyathena_conn,
location=pyathena_conn.s3_staging_dir.rstrip("/")
+ "/{}/{}/".format(table.schema, table.table),
**df_to_sql_kwargs,
)
else:
if engine.dialect.supports_multivalues_insert:
df_to_sql_kwargs["method"] = "multi"
cls.df_to_sql(df=df, con=engine, **df_to_sql_kwargs)
|
from lib.datasets import MNIST as Data
from lib.model import Model as BaseModel, generate_placeholders, train
from lib.segmentation import extract_features_fixed
# from lib.segmentation import slic_fixed
from lib.segmentation import quickshift_fixed
from lib.pipeline import preprocess_pipeline_fixed
from lib.layer import EmbeddedGCNN as Conv, MaxPool, AveragePool, FC
# SLIC_FEATURES = [4, 5, 6, 7, 8, 18, 20, 21, 22]
QUICKSHIFT_FEATURES = [4, 6, 7, 8, 24, 28, 29, 31, 37]
DATA_DIR = 'data/mnist'
# PREPROCESS_FIRST = 'data/mnist/slic'
PREPROCESS_FIRST = 'data/mnist/quickshift'
LEVELS = 4
CONNECTIVITY = 8
SCALE_INVARIANCE = False
STDDEV = 1
LEARNING_RATE = 0.001
TRAIN_DIR = None
# LOG_DIR = 'data/summaries/mnist_slic_graph'
LOG_DIR = 'data/summaries/mnist_quickshift_graph'
AUGMENT_TRAIN_EXAMPLES = False
DROPOUT = 0.5
BATCH_SIZE = 64
MAX_STEPS = 15000
DISPLAY_STEP = 10
# FORM_FEATURES = SLIC_FEATURES
FORM_FEATURES = QUICKSHIFT_FEATURES
NUM_FEATURES = len(FORM_FEATURES) + 1
data = Data(DATA_DIR)
# segmentation_algorithm = slic_fixed(
# num_segments=100, compactness=5, max_iterations=10, sigma=0)
segmentation_algorithm = quickshift_fixed(
ratio=1, kernel_size=2, max_dist=2, sigma=0)
feature_extraction_algorithm = extract_features_fixed(FORM_FEATURES)
preprocess_algorithm = preprocess_pipeline_fixed(
segmentation_algorithm, feature_extraction_algorithm, LEVELS, CONNECTIVITY,
SCALE_INVARIANCE, STDDEV)
class Model(BaseModel):
def _build(self):
conv_1_1 = Conv(
NUM_FEATURES,
64,
adjs_dist=self.placeholders['adj_dist_1'],
adjs_rad=self.placeholders['adj_rad_1'],
logging=self.logging)
conv_1_2 = Conv(
64,
64,
adjs_dist=self.placeholders['adj_dist_1'],
adjs_rad=self.placeholders['adj_rad_1'],
logging=self.logging)
max_pool_1 = MaxPool(size=4)
conv_2_1 = Conv(
64,
128,
adjs_dist=self.placeholders['adj_dist_3'],
adjs_rad=self.placeholders['adj_rad_3'],
logging=self.logging)
conv_2_2 = Conv(
128,
128,
adjs_dist=self.placeholders['adj_dist_3'],
adjs_rad=self.placeholders['adj_rad_3'],
logging=self.logging)
max_pool_2 = MaxPool(size=4)
average_pool = AveragePool()
fc_1 = FC(
128,
data.num_classes,
act=lambda x: x,
bias=False,
dropout=self.placeholders['dropout'],
logging=self.logging)
self.layers = [
conv_1_1, conv_1_2, max_pool_1, conv_2_1, conv_2_2, max_pool_2,
average_pool, fc_1
]
placeholders = generate_placeholders(BATCH_SIZE, LEVELS, NUM_FEATURES,
data.num_classes)
model = Model(
placeholders=placeholders,
learning_rate=LEARNING_RATE,
train_dir=TRAIN_DIR,
log_dir=LOG_DIR)
train(model, data, preprocess_algorithm, BATCH_SIZE, DROPOUT,
AUGMENT_TRAIN_EXAMPLES, MAX_STEPS, PREPROCESS_FIRST, DISPLAY_STEP)
|
from yacs.config import CfgNode as CN
#--------------------------ECG OPTIONS ------------------------
#--------------------------------------------------------------
_C = CN(new_allowed=True)
cfg = _C
#------------SAMPLING RATE---------
_C.FS = 500
_C.FS_RESAMPLE = 360
_C.ROUND = 3
#--------------------------------------------------------------
#--------------------------PREPROCESS OPTIONS -----------------
#--------------------------------------------------------------
#-----------PREPROCESS FILTER---------
_C.HIGHPASS = 1.0 # Hz
_C.NOTCH = CN()
_C.NOTCH.FS = 50.0 # Hz
_C.NOTCH.QF = 40.0
_C.DETREND = 600.0 # msec
_C.INTERPOLATED_MSEC = 3000.0 # msec
_C.NORMALIZE_SEC = 10
#------------ ENHANCER---------
_C.ENHANCER = CN()
# SWT enhancer
_C.ENHANCER.SWT = CN()
_C.ENHANCER.SWT.LOWPASS = 0.01
_C.ENHANCER.SWT.HIGHPASS = 8.0
# -------------- DETECTOR ------
_C.DETECTOR = CN()
# above mean (AM) detector
_C.DETECTOR.AM_SMOOTH_MSEC = 500.0
# Threshold recomment [-5, 5]
_C.DETECTOR.AM_START_THRESHOLD = 0.0
_C.DETECTOR.AM_THRESHOLD_INTERCEPT1 = 0.0
_C.DETECTOR.AM_THRESHOLD_INTERCEPT2 = 0.0
_C.DETECTOR.AMF_SMOOTH_MSEC = 1000.0
# Threshold recomment [0, 10]
_C.DETECTOR.AMF_THRESHOLD = 0.2
_C.DETECTOR.AMF_WIDTH_THRESHOLD_MSEC = 60.0
# two average (TA) detector
_C.DETECTOR.TA_SHORT_MSEC = 120.0
_C.DETECTOR.TA_LONG_MSEC = 600.0
_C.DETECTOR.TA_PEAK_WIDTH = 80.0
# R peak should spacing 250ms
_C.DETECTOR.RPEAK_SPACING_MSEC = 250.0
# RECOMMENT combination
# [modified_swt_enhancer, above_mean]
# [modified_swt_enhancer, pan_tompkins]
# [none, two_average]
# [pan_tompkins_enhancer, pan_tompkins]
# [modified_pan_tompkins_enhancer, pan_tompkins]
# usable enhancer [modified_swt_enhancer, swt_enhancer, modified_pan_tompkins_enhancer, pan_tompkins_enhancer, none]
# default: modified_swt_enhancer
_C.DETECTOR.ENHANCER = "modified_swt_enhancer"
# usable detector [above_mean, above_mean_fix, pan_tompkins, two_average]
# above_mean sensitive to large spike
# default: pan_tompkins
_C.DETECTOR.DETECTOR = "above_mean_fix"
_C.DETECT_T = CN()
_C.DETECT_T.MYALGO_LANDMASK_FROM_RPEAK_MSEC = 70.0 # msec (starting point to detect T wave)
_C.DETECT_T.MYALGO_LANDMASK_INTERVAL_RATIO = 0.7 # ratio of RR interval
_C.DETECT_T.MYALGO_SMOOTH_BASELINE = 150.0 # msec
_C.HRV = CN()
# should be one but other said zero
_C.HRV.DDOF = 0
_C.HRV.MIN_NUMBER_OF_INTERVAL = 3
_C.DIAG = CN()
_C.DIAG.TACHY = 150 # bpm
_C.DIAG.BRADY = 40 #bpm
_C.DIAG.PAUSE = 2.0 # second
_C.DIAG.HR_PEAK_BUFFER = 10 # peak
_C.DIAG.HR_TIME_SEGMENT_SEC = 10 # sec
_C.DIAG.ECTOPIC_TIME_SEGMENT_SEC = 10
_C.DIAG.ECTOPIC_RATIO = 0.7
_C.DIAG.ECTOPIC_MEDIAN = True
_C.DIAG.QRS_WIDTH = 120.0 #msec
_C.DIAG.VTFT_TCSC_SEGMENT_SEC = 3.0 # second
_C.DIAG.VTFT_TCSC_SMOOTH_SEC = 6.0 # second
_C.DIAG.VTFT_TCSC_BINARY_THRESHOLD = 0.2 # lower increase sensitivity
_C.DIAG.VTFT_TCSC_THRESHOLD = 0.4 # lower increase sensitivity
_C.DIAG.VTFT_VFF_THRESHOLD = 0.4 # lower increase sensitivity |
import functools
import hashlib
import json
import random
from urlparse import urlparse
import uuid
from operator import attrgetter
from django import http
from django.conf import settings
from django.db.models import Q
from django.shortcuts import get_list_or_404, get_object_or_404, redirect
from django.utils.translation import trans_real as translation
from django.views.decorators.cache import cache_control
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.vary import vary_on_headers
import caching.base as caching
import jingo
import jinja2
import commonware.log
import session_csrf
from tower import ugettext as _, ugettext_lazy as _lazy
import waffle
from mobility.decorators import mobilized, mobile_template
import amo
from amo import messages
from amo.decorators import login_required, post_required, write
from amo.forms import AbuseForm
from amo.helpers import shared_url
from amo.utils import randslice, sorted_groupby, urlparams
from amo.models import manual_order
from amo import urlresolvers
from amo.urlresolvers import reverse
from abuse.models import send_abuse_report
from bandwagon.models import Collection, CollectionFeature, CollectionPromo
from market.forms import PriceCurrencyForm
import paypal
from reviews.forms import ReviewForm
from reviews.models import Review, GroupedRating
from session_csrf import anonymous_csrf, anonymous_csrf_exempt
from sharing.views import share as share_redirect
from stats.models import Contribution
from translations.query import order_by_translation
from versions.models import Version
from .forms import ContributionForm
from .models import Addon, Persona, FrozenAddon
from .decorators import (addon_view_factory, can_be_purchased, has_purchased,
has_not_purchased)
from mkt.webapps.models import Installed
log = commonware.log.getLogger('z.addons')
paypal_log = commonware.log.getLogger('z.paypal')
addon_view = addon_view_factory(qs=Addon.objects.valid)
addon_unreviewed_view = addon_view_factory(qs=Addon.objects.unreviewed)
addon_disabled_view = addon_view_factory(qs=Addon.objects.valid_and_disabled)
def author_addon_clicked(f):
"""Decorator redirecting clicks on "Other add-ons by author"."""
@functools.wraps(f)
def decorated(request, *args, **kwargs):
redirect_id = request.GET.get('addons-author-addons-select', None)
if not redirect_id:
return f(request, *args, **kwargs)
try:
target_id = int(redirect_id)
return http.HttpResponsePermanentRedirect(reverse(
'addons.detail', args=[target_id]))
except ValueError:
return http.HttpResponseBadRequest('Invalid add-on ID.')
return decorated
@addon_disabled_view
def addon_detail(request, addon):
"""Add-ons details page dispatcher."""
if addon.is_deleted:
raise http.Http404
if addon.is_disabled:
return jingo.render(request, 'addons/impala/disabled.html',
{'addon': addon}, status=404)
if addon.is_webapp():
# Apps don't deserve AMO detail pages.
raise http.Http404
# addon needs to have a version and be valid for this app.
if addon.type in request.APP.types:
if addon.type == amo.ADDON_PERSONA:
return persona_detail(request, addon)
else:
if not addon.current_version:
raise http.Http404
return extension_detail(request, addon)
else:
# Redirect to an app that supports this type.
try:
new_app = [a for a in amo.APP_USAGE if addon.type
in a.types][0]
except IndexError:
raise http.Http404
else:
prefixer = urlresolvers.get_url_prefix()
prefixer.app = new_app.short
return http.HttpResponsePermanentRedirect(reverse(
'addons.detail', args=[addon.slug]))
@vary_on_headers('X-Requested-With')
def extension_detail(request, addon):
"""Extensions details page."""
# If current version is incompatible with this app, redirect.
comp_apps = addon.compatible_apps
if comp_apps and request.APP not in comp_apps:
prefixer = urlresolvers.get_url_prefix()
prefixer.app = comp_apps.keys()[0].short
return redirect('addons.detail', addon.slug, permanent=True)
# get satisfaction only supports en-US.
lang = translation.to_locale(translation.get_language())
addon.has_satisfaction = (lang == 'en_US' and
addon.get_satisfaction_company)
# Addon recommendations.
recommended = Addon.objects.listed(request.APP).filter(
recommended_for__addon=addon)[:6]
# Popular collections this addon is part of.
collections = Collection.objects.listed().filter(
addons=addon, application__id=request.APP.id)
ctx = {
'addon': addon,
'src': request.GET.get('src', 'dp-btn-primary'),
'version_src': request.GET.get('src', 'dp-btn-version'),
'tags': addon.tags.not_blacklisted(),
'grouped_ratings': GroupedRating.get(addon.id),
'recommendations': recommended,
'review_form': ReviewForm(),
'reviews': Review.objects.valid().filter(addon=addon, is_latest=True),
'get_replies': Review.get_replies,
'collections': collections.order_by('-subscribers')[:3],
'abuse_form': AbuseForm(request=request),
}
# details.html just returns the top half of the page for speed. The bottom
# does a lot more queries we don't want on the initial page load.
if request.is_ajax():
# Other add-ons/apps from the same author(s).
ctx['author_addons'] = addon.authors_other_addons(app=request.APP)[:6]
return jingo.render(request, 'addons/impala/details-more.html', ctx)
else:
if addon.is_webapp():
ctx['search_placeholder'] = 'apps'
return jingo.render(request, 'addons/impala/details.html', ctx)
@mobilized(extension_detail)
def extension_detail(request, addon):
return jingo.render(request, 'addons/mobile/details.html',
{'addon': addon})
def _category_personas(qs, limit):
f = lambda: randslice(qs, limit=limit)
key = 'cat-personas:' + qs.query_key()
return caching.cached(f, key)
@mobile_template('addons/{mobile/}persona_detail.html')
def persona_detail(request, addon, template=None):
"""Details page for Personas."""
if not addon.is_public():
raise http.Http404
persona = addon.persona
# this persona's categories
categories = addon.categories.filter(application=request.APP.id)
if categories:
qs = Addon.objects.public().filter(categories=categories[0])
category_personas = _category_personas(qs, limit=6)
else:
category_personas = None
data = {
'addon': addon,
'persona': persona,
'categories': categories,
'author_personas': persona.authors_other_addons(request.APP)[:3],
'category_personas': category_personas,
}
if not persona.is_new():
# Remora uses persona.author despite there being a display_username.
data['author_gallery'] = settings.PERSONAS_USER_ROOT % persona.author
if not request.MOBILE:
# tags
dev_tags, user_tags = addon.tags_partitioned_by_developer
data.update({
'dev_tags': dev_tags,
'user_tags': user_tags,
'review_form': ReviewForm(),
'reviews': Review.objects.valid().filter(addon=addon,
is_latest=True),
'get_replies': Review.get_replies,
'search_cat': 'personas',
'abuse_form': AbuseForm(request=request),
})
return jingo.render(request, template, data)
class BaseFilter(object):
"""
Filters help generate querysets for add-on listings.
You have to define ``opts`` on the subclass as a sequence of (key, title)
pairs. The key is used in GET parameters and the title can be used in the
view.
The chosen filter field is combined with the ``base`` queryset using
the ``key`` found in request.GET. ``default`` should be a key in ``opts``
that's used if nothing good is found in request.GET.
"""
def __init__(self, request, base, key, default, model=Addon):
self.opts_dict = dict(self.opts)
self.extras_dict = dict(self.extras) if hasattr(self, 'extras') else {}
self.request = request
self.base_queryset = base
self.key = key
self.model = model
self.field, self.title = self.options(self.request, key, default)
self.qs = self.filter(self.field)
def options(self, request, key, default):
"""Get the (option, title) pair we want according to the request."""
if key in request.GET and (request.GET[key] in self.opts_dict or
request.GET[key] in self.extras_dict):
opt = request.GET[key]
else:
opt = default
if opt in self.opts_dict:
title = self.opts_dict[opt]
else:
title = self.extras_dict[opt]
return opt, title
def all(self):
"""Get a full mapping of {option: queryset}."""
return dict((field, self.filter(field)) for field in dict(self.opts))
def filter(self, field):
"""Get the queryset for the given field."""
filter = self._filter(field) & self.base_queryset
order = getattr(self, 'order_%s' % field, None)
if order:
return order(filter)
return filter
def _filter(self, field):
return getattr(self, 'filter_%s' % field)()
def filter_featured(self):
ids = self.model.featured_random(self.request.APP, self.request.LANG)
return manual_order(self.model.objects, ids, 'addons.id')
def filter_price(self):
return self.model.objects.order_by('addonpremium__price__price', 'id')
def filter_free(self):
if self.model == Addon:
return self.model.objects.top_free(self.request.APP, listed=False)
else:
return self.model.objects.top_free(listed=False)
def filter_paid(self):
if self.model == Addon:
return self.model.objects.top_paid(self.request.APP, listed=False)
else:
return self.model.objects.top_paid(listed=False)
def filter_popular(self):
return (self.model.objects.order_by('-weekly_downloads')
.with_index(addons='downloads_type_idx'))
def filter_downloads(self):
return self.filter_popular()
def filter_users(self):
return (self.model.objects.order_by('-average_daily_users')
.with_index(addons='adus_type_idx'))
def filter_created(self):
return (self.model.objects.order_by('-created')
.with_index(addons='created_type_idx'))
def filter_updated(self):
return (self.model.objects.order_by('-last_updated')
.with_index(addons='last_updated_type_idx'))
def filter_rating(self):
return (self.model.objects.order_by('-bayesian_rating')
.with_index(addons='rating_type_idx'))
def filter_hotness(self):
return self.model.objects.order_by('-hotness')
def filter_name(self):
return order_by_translation(self.model.objects.all(), 'name')
class ESBaseFilter(BaseFilter):
"""BaseFilter that uses elasticsearch."""
def __init__(self, request, base, key, default):
super(ESBaseFilter, self).__init__(request, base, key, default)
def filter(self, field):
sorts = {'name': 'name_sort',
'created': '-created',
'updated': '-last_updated',
'popular': '-weekly_downloads',
'users': '-average_daily_users',
'rating': '-bayesian_rating'}
return self.base_queryset.order_by(sorts[field])
class HomepageFilter(BaseFilter):
opts = (('featured', _lazy(u'Featured')),
('popular', _lazy(u'Popular')),
('new', _lazy(u'Recently Added')),
('updated', _lazy(u'Recently Updated')))
filter_new = BaseFilter.filter_created
def home(request):
# Add-ons.
base = Addon.objects.listed(request.APP).filter(type=amo.ADDON_EXTENSION)
# This is lame for performance. Kill it with ES.
frozen = list(FrozenAddon.objects.values_list('addon', flat=True))
# Collections.
collections = Collection.objects.filter(listed=True,
application=request.APP.id,
type=amo.COLLECTION_FEATURED)
featured = Addon.objects.featured(request.APP, request.LANG,
amo.ADDON_EXTENSION)[:18]
popular = base.exclude(id__in=frozen).order_by('-average_daily_users')[:10]
hotness = base.exclude(id__in=frozen).order_by('-hotness')[:18]
personas = Addon.objects.featured(request.APP, request.LANG,
amo.ADDON_PERSONA)[:18]
return jingo.render(request, 'addons/home.html',
{'popular': popular, 'featured': featured,
'hotness': hotness, 'personas': personas,
'src': 'homepage', 'collections': collections})
@mobilized(home)
def home(request):
# Shuffle the list and get 3 items.
rand = lambda xs: random.shuffle(xs) or xs[:3]
# Get some featured add-ons with randomness.
featured = Addon.featured_random(request.APP, request.LANG)[:3]
# Get 10 popular add-ons, then pick 3 at random.
qs = list(Addon.objects.listed(request.APP)
.filter(type=amo.ADDON_EXTENSION)
.order_by('-average_daily_users')
.values_list('id', flat=True)[:10])
popular = rand(qs)
# Do one query and split up the add-ons.
addons = (Addon.objects.filter(id__in=featured + popular)
.filter(type=amo.ADDON_EXTENSION))
featured = [a for a in addons if a.id in featured]
popular = sorted([a for a in addons if a.id in popular],
key=attrgetter('average_daily_users'), reverse=True)
return jingo.render(request, 'addons/mobile/home.html',
{'featured': featured, 'popular': popular})
def homepage_promos(request):
from discovery.views import promos
version, platform = request.GET.get('version'), request.GET.get('platform')
if not (platform or version):
raise http.Http404
return promos(request, 'home', version, platform)
class CollectionPromoBox(object):
def __init__(self, request):
self.request = request
def features(self):
return CollectionFeature.objects.all()
def collections(self):
features = self.features()
lang = translation.to_language(translation.get_language())
locale = Q(locale='') | Q(locale=lang)
promos = (CollectionPromo.objects.filter(locale)
.filter(collection_feature__in=features)
.transform(CollectionPromo.transformer))
groups = sorted_groupby(promos, 'collection_feature_id')
# We key by feature_id and locale, so we can favor locale specific
# promos.
promo_dict = {}
for feature_id, v in groups:
promo = v.next()
key = (feature_id, translation.to_language(promo.locale))
promo_dict[key] = promo
rv = {}
# If we can, we favor locale specific collections.
for feature in features:
key = (feature.id, lang)
if key not in promo_dict:
key = (feature.id, '')
if key not in promo_dict:
continue
# We only want to see public add-ons on the front page.
c = promo_dict[key].collection
c.public_addons = c.addons.all() & Addon.objects.public()
rv[feature] = c
return rv
def __nonzero__(self):
return self.request.APP == amo.FIREFOX
@addon_view
def eula(request, addon, file_id=None):
if not addon.eula:
return http.HttpResponseRedirect(addon.get_url_path())
if file_id:
version = get_object_or_404(addon.versions, files__id=file_id)
else:
version = addon.current_version
return jingo.render(request, 'addons/eula.html',
{'addon': addon, 'version': version})
@addon_view
def privacy(request, addon):
if not addon.privacy_policy:
return http.HttpResponseRedirect(addon.get_url_path())
return jingo.render(request, 'addons/privacy.html', {'addon': addon})
@addon_view
def developers(request, addon, page):
if addon.is_persona():
raise http.Http404()
if 'version' in request.GET:
qs = addon.versions.filter(files__status__in=amo.VALID_STATUSES)
version = get_list_or_404(qs, version=request.GET['version'])[0]
else:
version = addon.current_version
if 'src' in request.GET:
contribution_src = src = request.GET['src']
else:
page_srcs = {
'developers': ('developers', 'meet-developers'),
'installed': ('meet-the-developer-post-install', 'post-download'),
'roadblock': ('meetthedeveloper_roadblock', 'roadblock'),
}
# Download src and contribution_src are different.
src, contribution_src = page_srcs.get(page)
return jingo.render(request, 'addons/impala/developers.html',
{'addon': addon, 'page': page, 'src': src,
'contribution_src': contribution_src,
'version': version})
# TODO(andym): remove this once we figure out how to process for
# anonymous users. For now we are concentrating on logged in users.
@login_required
@addon_view
@can_be_purchased
@has_not_purchased
@write
@post_required
def purchase(request, addon):
log.debug('Starting purchase of addon: %s by user: %s'
% (addon.pk, request.amo_user.pk))
amount = addon.premium.get_price()
source = request.POST.get('source', '')
uuid_ = hashlib.md5(str(uuid.uuid4())).hexdigest()
# l10n: {0} is the addon name
contrib_for = _(u'Purchase of {0}').format(jinja2.escape(addon.name))
# Default is USD.
amount, currency = addon.premium.get_price(), 'USD'
# If tier is specified, then let's look it up.
form = PriceCurrencyForm(data=request.POST, addon=addon)
if form.is_valid():
tier = form.get_tier()
if tier:
amount, currency = tier.price, tier.currency
paykey, status, error = '', '', ''
preapproval = None
if waffle.flag_is_active(request, 'allow-pre-auth') and request.amo_user:
preapproval = request.amo_user.get_preapproval()
try:
pattern = 'addons.purchase.finished'
slug = addon.slug
if addon.is_webapp():
pattern = 'apps.purchase.finished'
slug = addon.app_slug
paykey, status = paypal.get_paykey(
dict(amount=amount,
chains=settings.PAYPAL_CHAINS,
currency=currency,
email=addon.paypal_id,
ip=request.META.get('REMOTE_ADDR'),
memo=contrib_for,
pattern=pattern,
preapproval=preapproval, qs={'realurl':
request.POST.get('realurl')},
slug=slug, uuid=uuid_))
except paypal.PaypalError as error:
paypal.paypal_log_cef(request, addon, uuid_,
'PayKey Failure', 'PAYKEYFAIL',
'There was an error getting the paykey')
log.error('Error getting paykey, purchase of addon: %s' % addon.pk,
exc_info=True)
if paykey:
contrib = Contribution(addon_id=addon.id, amount=amount,
source=source, source_locale=request.LANG,
uuid=str(uuid_), type=amo.CONTRIB_PENDING,
paykey=paykey, user=request.amo_user)
log.debug('Storing contrib for uuid: %s' % uuid_)
# If this was a pre-approval, it's completed already, we'll
# double check this with PayPal, just to be sure nothing went wrong.
if status == 'COMPLETED':
paypal.paypal_log_cef(request, addon, uuid_,
'Purchase', 'PURCHASE',
'A user purchased using pre-approval')
log.debug('Status is completed for uuid: %s' % uuid_)
if paypal.check_purchase(paykey) == 'COMPLETED':
log.debug('Check purchase is completed for uuid: %s' % uuid_)
contrib.type = amo.CONTRIB_PURCHASE
else:
# In this case PayPal disagreed, we should not be trusting
# what get_paykey said. Which is a worry.
log.error('Check purchase failed on uuid: %s' % uuid_)
status = 'NOT-COMPLETED'
contrib.save()
else:
log.error('No paykey present for uuid: %s' % uuid_)
log.debug('Got paykey for addon: %s by user: %s'
% (addon.pk, request.amo_user.pk))
url = '%s?paykey=%s' % (settings.PAYPAL_FLOW_URL, paykey)
if request.POST.get('result_type') == 'json' or request.is_ajax():
return http.HttpResponse(json.dumps({'url': url,
'paykey': paykey,
'error': str(error),
'status': status}),
content_type='application/json')
# This is the non-Ajax fallback.
if status != 'COMPLETED':
return http.HttpResponseRedirect(url)
messages.success(request, _('Purchase complete'))
return http.HttpResponseRedirect(shared_url('addons.detail', addon))
# TODO(andym): again, remove this once we figure out logged out flow.
@csrf_exempt
@login_required
@addon_view
@can_be_purchased
@write
def purchase_complete(request, addon, status):
result = ''
if status == 'complete':
uuid_ = request.GET.get('uuid')
log.debug('Looking up contrib for uuid: %s' % uuid_)
# The IPN may, or may not have come through. Which means looking for
# a for pre or post IPN contributions. If both fail, then we've not
# got a matching contribution.
lookup = (Q(uuid=uuid_, type=amo.CONTRIB_PENDING) |
Q(transaction_id=uuid_, type=amo.CONTRIB_PURCHASE))
con = get_object_or_404(Contribution, lookup)
log.debug('Check purchase paypal addon: %s, user: %s, paykey: %s'
% (addon.pk, request.amo_user.pk, con.paykey[:10]))
try:
result = paypal.check_purchase(con.paykey)
if result == 'ERROR':
paypal.paypal_log_cef(request, addon, uuid_, 'Purchase Fail',
'PURCHASEFAIL',
'Checking purchase state returned error')
raise
except:
paypal.paypal_log_cef(request, addon, uuid_, 'Purchase Fail',
'PURCHASEFAIL',
'There was an error checking purchase state')
log.error('Check purchase paypal addon: %s, user: %s, paykey: %s'
% (addon.pk, request.amo_user.pk, con.paykey[:10]),
exc_info=True)
result = 'ERROR'
status = 'error'
log.debug('Paypal returned: %s for paykey: %s'
% (result, con.paykey[:10]))
if result == 'COMPLETED' and con.type == amo.CONTRIB_PENDING:
con.update(type=amo.CONTRIB_PURCHASE)
context = {'realurl': request.GET.get('realurl', ''),
'status': status, 'result': result}
# For mobile, bounce back to the details page.
if request.MOBILE:
url = urlparams(shared_url('detail', addon), **context)
return http.HttpResponseRedirect(url)
context.update({'addon': addon})
response = jingo.render(request, 'addons/paypal_result.html', context)
response['x-frame-options'] = 'allow'
return response
@login_required
@addon_view
@can_be_purchased
@has_purchased
def purchase_thanks(request, addon):
download = urlparse(request.GET.get('realurl', '')).path
data = {'addon': addon, 'is_ajax': request.is_ajax(),
'download': download}
if addon.is_webapp():
installed, c = Installed.objects.safer_get_or_create(
addon=addon, user=request.amo_user)
data['receipt'] = installed.receipt
return jingo.render(request, 'addons/paypal_thanks.html', data)
@login_required
@addon_view
@can_be_purchased
def purchase_error(request, addon):
data = {'addon': addon, 'is_ajax': request.is_ajax()}
return jingo.render(request, 'addons/paypal_error.html', data)
@addon_view
@anonymous_csrf_exempt
@post_required
def contribute(request, addon):
webapp = addon.is_webapp()
contrib_type = request.POST.get('type', 'suggested')
is_suggested = contrib_type == 'suggested'
source = request.POST.get('source', '')
comment = request.POST.get('comment', '')
amount = {
'suggested': addon.suggested_amount,
'onetime': request.POST.get('onetime-amount', '')
}.get(contrib_type, '')
if not amount:
amount = settings.DEFAULT_SUGGESTED_CONTRIBUTION
# This is all going to get shoved into solitude. Temporary.
form = ContributionForm({'amount': amount})
if not form.is_valid():
return http.HttpResponse(json.dumps({'error': 'Invalid data.',
'status': '', 'url': '',
'paykey': ''}),
content_type='application/json')
contribution_uuid = hashlib.md5(str(uuid.uuid4())).hexdigest()
if addon.charity:
# TODO(andym): Figure out how to get this in the addon authors
# locale, rather than the contributors locale.
name, paypal_id = (u'%s: %s' % (addon.name, addon.charity.name),
addon.charity.paypal)
else:
name, paypal_id = addon.name, addon.paypal_id
# l10n: {0} is the addon name
contrib_for = _(u'Contribution for {0}').format(jinja2.escape(name))
preapproval = None
if waffle.flag_is_active(request, 'allow-pre-auth') and request.amo_user:
preapproval = request.amo_user.get_preapproval()
paykey, error, status = '', '', ''
try:
paykey, status = paypal.get_paykey(
dict(amount=amount,
email=paypal_id,
ip=request.META.get('REMOTE_ADDR'),
memo=contrib_for,
pattern='%s.paypal' % ('apps' if webapp else 'addons'),
preapproval=preapproval,
slug=addon.slug,
uuid=contribution_uuid))
except paypal.PaypalError as error:
paypal.paypal_log_cef(request, addon, contribution_uuid,
'PayKey Failure', 'PAYKEYFAIL',
'There was an error getting the paykey')
log.error('Error getting paykey, contribution for addon: %s'
% addon.pk, exc_info=True)
if paykey:
contrib = Contribution(addon_id=addon.id, charity_id=addon.charity_id,
amount=amount, source=source,
source_locale=request.LANG,
annoying=addon.annoying,
uuid=str(contribution_uuid),
is_suggested=is_suggested,
suggested_amount=addon.suggested_amount,
comment=comment, paykey=paykey)
contrib.save()
url = '%s?paykey=%s' % (settings.PAYPAL_FLOW_URL, paykey)
if request.GET.get('result_type') == 'json' or request.is_ajax():
# If there was an error getting the paykey, then JSON will
# not have a paykey and the JS can cope appropriately.
return http.HttpResponse(json.dumps({'url': url,
'paykey': paykey,
'error': str(error),
'status': status}),
content_type='application/json')
return http.HttpResponseRedirect(url)
@csrf_exempt
@addon_view
def paypal_result(request, addon, status):
uuid = request.GET.get('uuid')
if not uuid:
raise http.Http404()
if status == 'cancel':
log.info('User cancelled contribution: %s' % uuid)
else:
log.info('User completed contribution: %s' % uuid)
response = jingo.render(request, 'addons/paypal_result.html',
{'addon': addon, 'status': status})
response['x-frame-options'] = 'allow'
return response
@addon_view
@can_be_purchased
@anonymous_csrf
def paypal_start(request, addon=None):
download = urlparse(request.GET.get('realurl', '')).path
data = {'addon': addon, 'is_ajax': request.is_ajax(),
'download': download,
'currencies': addon.premium.price.currencies()}
if request.user.is_authenticated():
return jingo.render(request, 'addons/paypal_start.html', data)
from users.views import _login
return _login(request, data=data, template='addons/paypal_start.html',
dont_redirect=True)
@addon_view
def share(request, addon):
"""Add-on sharing"""
return share_redirect(request, addon, addon.name, addon.summary)
@addon_view
def license(request, addon, version=None):
if version is not None:
qs = addon.versions.filter(files__status__in=amo.VALID_STATUSES)
version = get_list_or_404(qs, version=version)[0]
else:
version = addon.current_version
if not (version and version.license):
raise http.Http404
return jingo.render(request, 'addons/impala/license.html',
dict(addon=addon, version=version))
def license_redirect(request, version):
version = get_object_or_404(Version, pk=version)
return redirect(version.license_url(), permanent=True)
@session_csrf.anonymous_csrf_exempt
@addon_view
def report_abuse(request, addon):
form = AbuseForm(request.POST or None, request=request)
if request.method == "POST" and form.is_valid():
send_abuse_report(request, addon, form.cleaned_data['text'])
messages.success(request, _('Abuse reported.'))
return http.HttpResponseRedirect(addon.get_url_path())
else:
return jingo.render(request, 'addons/report_abuse_full.html',
{'addon': addon, 'abuse_form': form, })
@cache_control(max_age=60 * 60 * 24)
def persona_redirect(request, persona_id):
persona = get_object_or_404(Persona, persona_id=persona_id)
to = reverse('addons.detail', args=[persona.addon.slug])
return http.HttpResponsePermanentRedirect(to)
|
<reponame>MarcelRaschke/dactyl
#!/usr/bin/env python3
from dactyl.common import *
import argparse
class DactylCLIParser:
UTIL_BUILD = "Generate static site from markdown and templates."
UTIL_LINKS = "Check files in this repository for broken links."
UTIL_STYLE = "Check content files for style issues."
def __init__(self, utility):
"""Specify commandline usage and parse arguments"""
parser = argparse.ArgumentParser(description=utility)
noisiness = parser.add_mutually_exclusive_group(required=False)
noisiness.add_argument("--quiet", "-q", action="store_true",
help="Suppress status messages")
noisiness.add_argument("--debug", action="store_true",
help="Print debug-level log messages")
parser.add_argument("--config", "-c", type=str,
help="Specify path to an alternate config file.")
parser.add_argument("--version", "-v", action="store_true",
help="Print version information and exit.")
parser.add_argument("--bypass_errors", "-b", action="store_true",
help="Continue if recoverable errors occur")
if utility in (self.UTIL_BUILD, self.UTIL_STYLE):
parser.add_argument("--target", "-t", type=str,
help="Use the specified target (from the config file).")
if utility == self.UTIL_BUILD:
build_mode = parser.add_mutually_exclusive_group(required=False)
build_mode.add_argument("--pdf", nargs="?", type=str,
const=DEFAULT_PDF_FILE, default=NO_PDF,
help="Output a PDF to this file. Requires Prince.")
build_mode.add_argument("--md", action="store_true",
help="Output markdown only")
build_mode.add_argument("--html", action="store_true", default=True,
help="Output HTML files (the default)")
build_mode.add_argument("--es", action="store_true",
help="Output JSON for ElasticSearch upload")
# HTML is the default mode
static_files = parser.add_mutually_exclusive_group(required=False)
static_files.add_argument("--copy_static", "-s", action="store_true",
help="Copy all static files to the out dir",
default=False)
static_files.add_argument("--no_static", "-S", action="store_true",
help="Don't copy any static files to the out dir",
default=False)
static_files.add_argument("--template_static", "-T", action="store_true",
help="Copy only templates' static files to the out dir",
default=False)
static_files.add_argument("--content_static", "-C", action="store_true",
help="Copy only the content's static files to the out dir",
default=False)
parser.add_argument("--es_upload", nargs="?", type=str,
const=DEFAULT_ES_URL, default=NO_ES_UP,
help="Upload documents to ElasticSearch cluster "+
"at this URL (http://localhost:9200 by default). "+
"Ignored when making PDFs.")
parser.add_argument("--leave_temp_files", action="store_true",
help="Leave temp files in place (for debugging or "+
"manual PDF generation). Ignored when using --watch",
default=False)
parser.add_argument("--list_targets_only", "-l", action="store_true",
help="Don't build anything, just display list of "+
"known targets from the config file.")
parser.add_argument("--only", type=str, help=".md or .html filename of a "+
"single page in the config to build alone.")
parser.add_argument("--out_dir", "-o", type=str,
help="Output to this folder (overrides config file)")
parser.add_argument("--pages", type=str, help="Markdown file(s) to build "+\
"that aren't described in the config.", nargs="+")
parser.add_argument("--openapi", type=str, help="OpenAPI spec file "+
"to generate docs from.")
parser.add_argument("--no_cover", "-n", action="store_true",
help="Don't automatically add a cover / index file.")
parser.add_argument("--skip_preprocessor", action="store_true", default=False,
help="Don't pre-process Jinja syntax in markdown files")
parser.add_argument("--template_strict_undefined", action="store_true",
help="Raise an error on undefined variables in "+
"template syntax.")
parser.add_argument("--pp_strict_undefined", action="store_true",
help="Raise an error on undefined variables in "+
"preprocessor syntax.")
parser.add_argument("--title", type=str, help="Override target display "+\
"name. Useful when passing multiple args to --pages.")
parser.add_argument("--vars", type=str, help="A YAML or JSON file with vars "+
"to add to the target so the preprocessor and "+
"templates can reference them.")
parser.add_argument("--watch", "-w", action="store_true",
help="Watch for changes and re-generate output. "+\
"This runs until force-quit.")
elif utility == self.UTIL_LINKS:
parser.add_argument("-o", "--offline", action="store_true",
help="Check local anchors only")
parser.add_argument("-s", "--strict", action="store_true",
help="Exit with error even on known problems")
parser.add_argument("-n", "--no_final_retry", action="store_true",
help="Don't wait and retry failed remote links at the end.")
self.cli_args = parser.parse_args()
|
# Copyright (c) 2020 Spanish National Research Council
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
from paths import PATHS
def get_data(trend=False,
shift_window=None,
labels=True,
multistep=None,
exclude=['Ceuta', 'Melilla', 'Balears, Illes',
'Santa Cruz de Tenerife', 'Palmas, Las']):
"""
trend : bool
Compute velocities and accelerations
shift_window : list of ints
If trend=False, use raw past days.
labels : bool
Compute the labels
multistep : int, None
If labels=True, then compute all shifted days till this date.
If set to None, only compute label t+7.
exclude : list of strs
Outlier provinces to exclude from the dataset
"""
df = pd.read_csv(
PATHS.rawdir / "provinces-incidence-mobility.csv",
header=[0, 1]
)
df = df.fillna(0)
# Format column names correctly
for i, col in enumerate(df.columns.levels):
new = np.where(col.str.contains('Unnamed'), '', col)
df = df.rename(columns=dict(zip(col, new)), level=i)
# # Load density/population data
# prov_path = PATHS.rawdir / 'prov_data.xls'
# prov_data = pd.read_excel(prov_path, dtype={'Provincia': 'string'})
# den_map = dict(zip(prov_data.Provincia, prov_data.Densidad))
# pop_map = dict(zip(prov_data.Provincia, prov_data.Poblacion))
# Process provinces
new_df = []
provinces = df[('province', '')].unique()
for p in provinces:
if p in exclude:
continue
dfp = df[df[('province', '')] == p]
new_dfp = dfp[['date', 'province', 'incidence 7', 'flux intra']]
new_dfp = new_dfp.droplevel(1, axis='columns')
# Compute a 7-day rolling sum for mobility
cols = dfp.xs('flux', level=1, drop_level=False, axis='columns').columns
dfp[cols] = dfp[cols].rolling(7).sum()
new_dfp['flux intra'] = new_dfp['flux intra'].rolling(7).sum()
# Add external risk
flux = dfp.xs('flux', level=1, drop_level=True, axis='columns')
inc = dfp.xs('incidence 7', level=1, drop_level=True, axis='columns')
new_dfp['external risk'] = (flux * inc).sum(axis='columns', min_count=1)
# Add province density
# new_dfp['density'] = den_map[p]
# Normalize external risk and flux_intra by population
# new_dfp['external risk'] = new_dfp['external risk'] / pop_map[p]
# new_dfp['flux intra'] = new_dfp['flux intra'] / pop_map[p]
# Apply log for normalizing scales
# if log:
# new_dfp['external risk'] = np.log(new_dfp['external risk'])
# new_dfp['density'] = np.log(new_dfp['density'])
# new_dfp['incidence 7'] = np.log(new_dfp['incidence 7'] + 1) # +1 to avoid log(0)
# Add time window as additional columns
shift_cols = ['incidence 7', 'flux intra', 'external risk']
shift_days = []
if trend:
shift_days = [-1, -3]
elif shift_window:
shift_days = range(shift_window, 0)
for i in shift_days:
for j in shift_cols:
new_dfp[f'{j} (t{i:+d})'] = new_dfp[j].shift(-i)
# Create a shifted incidence for label
if labels:
if multistep:
for i in range(1, multistep+1):
new_dfp[f'incidence 7 (t+{i})'] = new_dfp['incidence 7'].shift(-i)
else:
new_dfp['incidence 7 (t+7)'] = new_dfp['incidence 7'].shift(-7)
# Keep only rows with non-nans (eg. t+7 shift will remove last 7 rows)
new_dfp = new_dfp[~new_dfp.isna().any(axis='columns')]
# Compute trends (works best normalizing with value at t)
if trend:
for i in shift_cols:
d1, d2 = shift_days[0], shift_days[1]
new_dfp[f'{i} (vel)'] = (new_dfp[f'{i}'] - new_dfp[f'{i} (t{d1})']) \
/ new_dfp[f'{i}']
new_dfp[f'{i} (acc)'] = (new_dfp[f'{i}'] - 2 * new_dfp[f'{i} (t{d1})'] + new_dfp[f'{i} (t{d2})']) \
/ new_dfp[f'{i}']
new_dfp = new_dfp.replace([np.inf, -np.inf, np.nan], 0)
new_dfp = new_dfp.drop(columns=[f'{i} (t{d1})', f'{i} (t{d2})'])
# Append province to final df
new_df.append(new_dfp)
new_df = pd.concat(new_df).reset_index(drop=True)
new_df = new_df.sort_index(axis=1)
return new_df
def make_splits(df,
multistep=None,
norm=True,
dtrain='2020-11-30', # end of training dataset
dval='2021-01-31', # end of validation dataset
):
# Create X,y
df = df.set_index(['date', 'province'])
if multistep:
labels = [f'incidence 7 (t+{i})' for i in range(1, multistep+1)]
else:
labels = ['incidence 7 (t+7)']
X = df[df.columns.difference(labels)]
y = df[labels]
# # Label is difference of inc, instead of raw inc
# inc = X.pop('incidence 7')
# y['incidence 7 (t+7)'] = y['incidence 7 (t+7)'] - inc
# Split train/val/test
dates = df.index.get_level_values('date')
if not dtrain:
dtrain = max(dates)
if not dval:
dval = max(dates)
assert dval >= dtrain, 'Validation date should be bigger than training date.'
splits = {'train': {}, 'val': {}, 'test': {}}
splits['train']['X'] = X[dates <= dtrain]
splits['train']['y'] = y[dates <= dtrain]
splits['val']['X'] = X[(dates > dtrain) & (dates <= dval)]
splits['val']['y'] = y[(dates > dtrain) & (dates <= dval)]
splits['test']['X'] = X[dates > dval]
splits['test']['y'] = y[dates > dval]
# Normalize inputs
if norm:
mean = splits['train']['X'].mean()
std = splits['train']['X'].std()
for i in ['train', 'val', 'test']:
splits[i]['X'] = (splits[i]['X'] - mean) / std
# Save to df
norm = pd.DataFrame([mean, std], index=['mean', 'std'])
norm.to_csv(PATHS.models / 'norm.csv')
return splits
def normalize(X):
norm = pd.read_csv(PATHS.models / 'norm.csv', index_col=0)
return (X - norm.loc['mean']) / norm.loc['std']
def unprocess(df, norm=True, log=True):
"""
Undo the data preprocessing
"""
normdf = pd.read_csv(PATHS.models / 'norm.csv', index_col=0)
df = df.copy() # avoid weird object id issue (function did not create a new df)
for c in df.columns:
cn = c.split(' (')[0] # 'incidence 14 (t+14)' --> 'incidence 14'
if norm:
df[c] = df[c] * normdf.at['std', cn] + normdf.at['mean', cn]
if log and (cn in ['external risk']):
df[c] = np.exp(df[c])
# if log and (cn in ['external risk', 'density', 'incidence 14']):
# df[c] = np.exp(df[c])
# if cn == 'incidence 14':
# df[c] -= 1
if cn == 'incidence 14':
df[c] = np.round(df[c]).astype(np.int)
return df
def single_X_y(splits):
X, y = [], []
for i in ['train', 'val', 'test']:
X.append(splits[i]['X'])
y.append(splits[i]['y'])
X = pd.concat(X)
y = pd.concat(y)
return X, y
|
<gh_stars>1-10
import math
from enum import Enum
from functools import total_ordering
# TODO different fees to lp holders and swap operation
class Fee(Enum):
"""
``Fee`` Enum class defines available fees for UniV3 pools.
Currently 3 values are available: 0.05%, 0.3%, 1%.
The actual enum values are fee * 1_000_000, i.e. 0.05% enum value is integer 500.
"""
LOW = 500
MIDDLE = 3000
HIGH = 10000
@property
def percent(self) -> float:
"""
Returns:
UniswapV3 fee in form of 0.0005, 0.003, 0.01.
"""
return self.value / 1000000
@property
def spacing(self) -> int:
"""
Returns:
Tick spacing for this fee 10, 60 or 200
"""
return SPACING[self]
@total_ordering
class Token(Enum):
"""
``Token`` represents one of mainnet tokens and contains some
additional data line address and decimals.
This class is ordered according to token address. E.g. :code:`Token.WBTC < Token.USDC`.
"""
WBTC = "WBTC"
WETH = "WETH"
stETH = "stETH"
USDC = "USDC"
USDT = "USDT"
@property
def address(self) -> str:
"""
Returns:
Mainnet address of the token.
"""
return TOKEN_DETAILS[self.value]["address"]
@property
def decimals(self) -> int:
"""
Returns:
Decimals of the token.
"""
return TOKEN_DETAILS[self.value]["decimals"]
def _is_valid_operand(self, other: 'Token') -> bool:
"""
Checks if Token is valid.
Args:
other: Other Token.
Returns:
Valid or not.
"""
return isinstance(other, Token)
def __eq__(self, other: 'Token') -> bool:
"""
Checks if Tokens are equal.
Args:
other: Other Token.
Returns:
Equal or not.
"""
if not self._is_valid_operand(other):
return NotImplemented
return self.value == other.value
def __lt__(self, other: 'Token') -> bool:
"""
Args:
other: Other Token.
"""
if not self._is_valid_operand(other):
return NotImplemented
return (
TOKEN_DETAILS[self.value]["address"].lower()
< TOKEN_DETAILS[other.value]["address"].lower()
)
class Pool:
"""
``Pool`` represents a mainnet UniV3 pool.
Attributes:
tokenA:
First token of the pool.
tokenB:
Second token of the pool.
fee:
Pool fee.
"""
def __init__(self, tokenA: "Token", tokenB: "Token", fee: "Fee"):
self._token0, self._token1 = tokenA, tokenB
self._fee = fee
self._address = None
for pool in POOLS:
if (
(pool["token0"] == self._token0)
and (pool["token1"] == self._token1)
and (pool["fee"] == fee)
):
self._address = pool["address"]
break
if not self._address:
raise KeyError("Pool not found")
@property
def decimals_diff(self) -> int:
"""
Difference between ``token0`` and ``token1`` decimals.
Used for conversion of price from `wei` to `eth`.
Returns:
Decimal difference between Tokens.
"""
return self._token0.decimals - self._token1.decimals
@property
def l_decimals_diff(self) -> float:
"""
Used for conversion of liquidity from `wei` to `eth`.
Returns:
Decimal difference between Tokens in Eth.
"""
return float(self._token0.decimals + self._token1.decimals) / 2
@property
def tick_diff(self) -> int:
"""
Used for conversion of tick from `wei` to `eth`.
Returns:
Tick diff. tick(eth/btc) - tick(wei/satoshi)
"""
return int(math.floor(self.decimals_diff) * math.log(10, 1.0001))
@property
def name(self) -> str:
"""
Unique name for the pool.
Returns:
Pool unique name for the pool.
"""
return f"{self._token0.value}_{self._token1.value}_{self._fee.value}"
@property
def _name(self) -> str:
"""
Unique name for the pool.
Returns:
Pool name.
"""
return f"{self._token0.value}/{self._token1.value} {100 * self._fee.percent}%"
@property
def address(self) -> str:
"""
Returns:
Pool mainnet address.
"""
return self._address
@property
def token0(self) -> "Token":
"""
Returns:
First token name.
"""
return self._token0
@property
def token1(self) -> "Token":
"""
Returns:
Second token name.
"""
return self._token1
@property
def fee(self) -> "Fee":
"""
Returns:
Fee of the pool.
"""
return self._fee
TOKEN_DETAILS = {
Token.WBTC.value: {
"name": "WBTC",
"description": "Wrapped BTC",
"address": "0x2260FAC5E5542a773Aa44fBCfeDf7C193bc2C599",
"decimals": 8,
},
Token.USDC.value: {
"name": "USDC",
"description": "USD Coin",
"address": "0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48",
"decimals": 6,
},
Token.WETH.value: {
"name": "WETH",
"description": "Wrapped Ether",
"address": "0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2",
"decimals": 18,
},
Token.stETH.value: {
"name": "stETH",
"description": "Staked Ether",
"address": "0xae7ab96520DE3A18E5e111B5EaAb095312D7fE84",
"decimals": 18,
},
Token.USDT.value: {
"name": "USDT",
"description": "Tether USD",
"address": "0xdAC17F958D2ee523a2206206994597C13D831ec7",
"decimals": 6,
},
}
POOLS = [
{
"address": "0x4585FE77225b41b697C938B018E2Ac67Ac5a20c0",
"token0": Token.WBTC,
"token1": Token.WETH,
"fee": Fee.LOW,
},
{
"address": "0xCBCdF9626bC03E24f779434178A73a0B4bad62eD",
"token0": Token.WBTC,
"token1": Token.WETH,
"fee": Fee.MIDDLE,
},
{
"address": "0x6Ab3bba2F41e7eAA262fa5A1A9b3932fA161526F",
"token0": Token.WBTC,
"token1": Token.WETH,
"fee": Fee.HIGH,
},
{
"address": "0x88e6A0c2dDD26FEEb64F039a2c41296FcB3f5640",
"token0": Token.USDC,
"token1": Token.WETH,
"fee": Fee.LOW,
},
{
"address": "0x8ad599c3A0ff1De082011EFDDc58f1908eb6e6D8",
"token0": Token.USDC,
"token1": Token.WETH,
"fee": Fee.MIDDLE,
},
{
"address": "0x7BeA39867e4169DBe237d55C8242a8f2fcDcc387",
"token0": Token.USDC,
"token1": Token.WETH,
"fee": Fee.HIGH,
},
{
"address": "0x7858E59e0C01EA06Df3aF3D20aC7B0003275D4Bf",
"token0": Token.USDC,
"token1": Token.USDT,
"fee": Fee.LOW,
},
{
"address": "0xEe4Cf3b78A74aFfa38C6a926282bCd8B5952818d",
"token0": Token.USDC,
"token1": Token.USDT,
"fee": Fee.MIDDLE,
},
{
"address": "0xbb256c2F1B677e27118b0345FD2b3894D2E6D487",
"token0": Token.USDC,
"token1": Token.USDT,
"fee": Fee.HIGH,
},
]
MIN_TICK = -887272
MAX_TICK = 887272
SPACING = {Fee.LOW: 10, Fee.MIDDLE: 60, Fee.HIGH: 200}
|
<gh_stars>0
"""
Django settings for backend project.
Generated by 'django-admin startproject' using Django 2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
from datetime import timedelta
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
if (os.environ.get('SECRET_KEY')):
SECRET_KEY = os.environ.get('SECRET_KEY')
else:
SECRET_KEY = 'secret'
# SETTING SMTP EMAILS
if (os.environ.get('ENV') is not 'production'):
# Need to locally install a SMTP server
EMAIL_HOST = 'localhost'
EMAIL_PORT = '25'
EMAIL_HOST_USER = 'test'
EMAIL_HOST_PASSWORD = '<PASSWORD>'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['Deco3801DjangoWeb-env.ggifdnnn8c.us-east-2.elasticbeanstalk.com', 'localhost']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'webpack_loader',
'api.v1.apps.ApiV1Config',
'home.apps.HomeConfig',
'users.apps.UsersConfig',
'systemsdb.apps.SystemsdbConfig',
'storages' # For S3
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'backend.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['./templates',],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'backend.wsgi.application'
# JWT Token Configuration
SIMPLE_JWT = {
'ACCESS_TOKEN_LIFETIME': timedelta(days=1),
'REFRESH_TOKEN_LIFETIME': timedelta(days=5),
'ROTATE_REFRESH_TOKENS': False,
'BLACKLIST_AFTER_ROTATION': True,
'ALGORITHM': 'HS256',
'SIGNING_KEY': SECRET_KEY,
'VERIFYING_KEY': None,
'AUTH_HEADER_TYPES': ('Bearer',),
'USER_ID_FIELD': 'id',
'USER_ID_CLAIM': 'user_id',
'AUTH_TOKEN_CLASSES': ('rest_framework_simplejwt.tokens.AccessToken',),
'TOKEN_TYPE_CLAIM': 'token_type',
'SLIDING_TOKEN_REFRESH_EXP_CLAIM': 'refresh_exp',
'SLIDING_TOKEN_LIFETIME': timedelta(days=1),
'SLIDING_TOKEN_REFRESH_LIFETIME': timedelta(days=5),
}
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
if 'RDS_HOSTNAME' in os.environ:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': os.environ['RDS_DB_NAME'],
'USER': os.environ['RDS_USERNAME'],
'PASSWORD': os.environ['RDS_PASSWORD'],
'HOST': os.environ['RDS_HOSTNAME'],
'PORT': os.environ['RDS_PORT'],
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Custom user model
AUTH_USER_MODEL = 'users.User'
# REST framework
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_simplejwt.authentication.JWTAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
)
}
# Webpack loader
# We do this so that django's collectstatic copies or our bundles to the
# STATIC_ROOT or syncs them to whatever storage we use.
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'assets'),
]
WEBPACK_LOADER = {
'DEFAULT': {
'BUNDLE_DIR_NAME': 'bundles/',
'STATS_FILE': os.path.join(BASE_DIR, 'webpack-stats.prod.json'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
if 'AWS_ACCESS_KEY_ID' in os.environ:
AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID']
AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY']
AWS_STORAGE_BUCKET_NAME = os.environ['AWS_STORAGE_BUCKET_NAME']
AWS_S3_CUSTOM_DOMAIN = '%s.s3.amazonaws.com' % AWS_STORAGE_BUCKET_NAME
AWS_S3_OBJECT_PARAMETERS = {
'CacheControl': 'max-age=86400',
}
AWS_LOCATION = 'static'
STATICFILES_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
STATIC_URL = "https://%s/%s/" % (AWS_S3_CUSTOM_DOMAIN, AWS_LOCATION)
STATIC_URL = '/static/'
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
else:
STATIC_URL = '/static/'
STATIC_ROOT = 'static'
LOGIN_REDIRECT_URL = '/'
if os.environ.get('DJANGO_DEV') is not None:
from .settings_dev import *
|
#!/usr/bin/env python
# Copyright (c) 2012, Adobe Systems Incorporated
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Adobe Systems Incorporated nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''See readme or run with no args for usage'''
import os
import sys
import tempfile
import shutil
import struct
import zlib
import hashlib
import inspect
supportsLZMA = False
try:
import pylzma
supportsLZMA = True
except:
pass
####################################
# Helpers
####################################
class stringFile(object):
def __init__(self, data):
self.data = data
def read(self, num=-1):
result = self.data[:num]
self.data = self.data[num:]
return result
def close(self):
self.data = None
def flush(self):
pass
def consumeSwfTag(f):
tagBytes = b""
recordHeaderRaw = f.read(2)
tagBytes += recordHeaderRaw
if recordHeaderRaw == "":
raise Exception("Bad SWF: Unexpected end of file")
recordHeader = struct.unpack("BB", recordHeaderRaw)
tagCode = ((recordHeader[1] & 0xff) << 8) | (recordHeader[0] & 0xff)
tagType = (tagCode >> 6)
tagLength = tagCode & 0x3f
if tagLength == 0x3f:
ll = f.read(4)
longlength = struct.unpack("BBBB", ll)
tagLength = ((longlength[3]&0xff) << 24) | ((longlength[2]&0xff) << 16) | ((longlength[1]&0xff) << 8) | (longlength[0]&0xff)
tagBytes += ll
tagBytes += f.read(tagLength)
# print(tagType, tagBytes)
return (tagType, tagBytes)
def outputInt(o, i):
o.write(struct.pack('I', i))
def outputTelemetryTag(o, passwordClear):
lengthBytes = 2 # reserve
if passwordClear:
sha = hashlib.sha256()
sha.update(passwordClear)
passwordDigest = sha.digest()
lengthBytes += len(passwordDigest)
# Record header
code = 93
if lengthBytes >= 63:
o.write(struct.pack('<HI', code << 6 | 0x3f, lengthBytes))
else:
o.write(struct.pack('<H', code << 6 | lengthBytes))
# Reserve
o.write(struct.pack('<H', 0))
# Password
if passwordClear:
o.write(passwordDigest)
####################################
# main()
####################################
if __name__ == "__main__":
####################################
# Parse command line
####################################
if len(sys.argv) < 2:
print("Usage: %s SWF_FILE [TELEMETRY] [PASSWORD] [IMPORTASSETSCLEAR]" % os.path.basename(inspect.getfile(inspect.currentframe())))
print("\nIf TELEMETRY is provided(0=off), script will insert Telemetry tag.")
print("\nIf PASSWORD is provided(0=off), then a password will be required to view advanced telemetry in Adobe 'Monocle'.")
print("\nIf IMPORTASSETSCLEAR is provided(0=off), script will delete ImportAssets and ImportAssets2 tag.")
sys.exit(-1)
infile = sys.argv[1]
telemetry = True if len(sys.argv) >= 3 and sys.argv[2] != b'0' else False
passwordClear = sys.argv[3] if len(sys.argv) >= 4 and sys.argv[3] != b'0' else None
importAssetsClear = True if len(sys.argv) >= 5 and sys.argv[4] != b'0' else False
####################################
# Process SWF header
####################################
swfFH = open(infile, 'rb')
signature = swfFH.read(3)
swfVersion = swfFH.read(1)
struct.unpack("<I", swfFH.read(4))[0] # uncompressed length of file
if signature == b"FWS":
pass
elif signature == b"CWS":
decompressedFH = stringFile(zlib.decompressobj().decompress(swfFH.read()))
swfFH.close()
swfFH = decompressedFH
elif signature == b"ZWS":
if not supportsLZMA:
raise Exception("You need the PyLZMA package to use this script on \
LZMA-compressed SWFs. http://www.joachim-bauch.de/projects/pylzma/")
swfFH.read(4) # compressed length
decompressedFH = stringFile(pylzma.decompress(swfFH.read()))
swfFH.close()
swfFH = decompressedFH
else:
raise Exception("Bad SWF: Unrecognized signature: %s" % signature)
f = swfFH
o = tempfile.TemporaryFile()
o.write(signature)
o.write(swfVersion)
outputInt(o, 0) # FileLength - we'll fix this up later
# FrameSize - this is nasty to read because its size can vary
rs = f.read(1)
r = struct.unpack("B", rs)
rbits = (r[0] & 0xff) >> 3
rrbytes = (7 + (rbits*4) - 3) / 8
o.write(rs)
o.write(f.read((int)(rrbytes)))
o.write(f.read(4)) # FrameRate and FrameCount
####################################
# Process each SWF tag
####################################
had_telemetry = False
while True:
(tagType, tagBytes) = consumeSwfTag(f)
if tagType == 93:
# raise Exception("Bad SWF: already has EnableTelemetry tag")
if telemetry or had_telemetry:
continue
had_telemetry = True
elif tagType == 92:
raise Exception("Bad SWF: Signed SWFs are not supported")
elif tagType == 69:
# FileAttributes tag
o.write(tagBytes)
# Look ahead for Metadata tag. If present, put our tag after it
(nextTagType, nextTagBytes) = consumeSwfTag(f)
writeAfterNextTag = nextTagType == 77
if writeAfterNextTag:
o.write(nextTagBytes)
if telemetry:
had_telemetry = True
outputTelemetryTag(o, passwordClear)
# If there was no Metadata tag, we still need to write that tag out
if not writeAfterNextTag:
o.write(nextTagBytes)
(tagType, tagBytes) = consumeSwfTag(f)
if tagType == 93:
if telemetry or had_telemetry:
continue
if importAssetsClear and (tagType == 57 or tagType == 71):
print("Clear ImportAssets and ImportAssets2 at " + infile)
continue
o.write(tagBytes)
if tagType == 0:
break
####################################
# Finish up
####################################
# Fix the FileLength header
uncompressedLength = o.tell()
o.seek(4)
o.write(struct.pack("I", uncompressedLength))
o.flush()
o.seek(0)
# Copy the temp file to the outFile, compressing if necessary
outFile = open(infile, "wb")
if signature == b"FWS":
shutil.copyfileobj(o, outFile)
else:
outFile.write(o.read(8)) # File is compressed after header
if signature == b"CWS":
outFile.write(zlib.compress(o.read()))
elif signature == b"ZWS":
compressed = pylzma.compress(o.read())
outputInt(outFile, len(compressed)-5) # LZMA SWF has CompressedLength header field
outFile.write(compressed)
else:
assert(False)
outFile.close()
if telemetry:
if passwordClear:
print("Added opt-in flag with encrypted password " + passwordClear)
else:
print("Added opt-in flag with no password")
if importAssetsClear:
print("Delete ImportAssets and ImportAssets2 tag")
|
# -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Name: sfp_tool_wappalyzer
# Purpose: SpiderFoot plug-in for using the 'Wappalyzer' tool.
# Tool: https://github.com/EnableSecurity/wappalyzer
#
# Author: <NAME> <<EMAIL>>
#
# Created: 2022-04-02
# Copyright: (c) <NAME> 2022
# Licence: MIT
# -------------------------------------------------------------------------------
import os
import sys
import json
from subprocess import Popen, PIPE, TimeoutExpired
from spiderfoot import SpiderFootPlugin, SpiderFootEvent, SpiderFootHelpers
class sfp_tool_wappalyzer(SpiderFootPlugin):
meta = {
"name": "Tool - Wappalyzer",
"summary": "Wappalyzer indentifies technologies on websites.",
"flags": ["tool"],
"useCases": ["Footprint", "Investigate"],
"categories": ["Content Analysis"],
"toolDetails": {
"name": "Wappalyzer",
"description": "Wappalyzer identifies technologies on websites, including content management systems, ecommerce platforms, JavaScript frameworks, analytics tools and much more.",
"website": "https://www.wappalyzer.com/",
"repository": "https://github.com/AliasIO/Wappalyzer"
}
}
# Default options
opts = {
"node_path": "/usr/bin/node",
"wappalyzer_path": ""
}
# Option descriptions
optdescs = {
"node_path": "Path to your NodeJS binary. Must be set.",
"wappalyzer_path": "Path to your wappalyzer cli.js file. Must be set.",
}
# Target
results = None
errorState = False
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.results = self.tempStorage()
for opt in userOpts.keys():
self.opts[opt] = userOpts[opt]
def watchedEvents(self):
return ["INTERNET_NAME"]
def producedEvents(self):
return ["OPERATING_SYSTEM", "SOFTWARE_USED", "WEBSERVER_TECHNOLOGY"]
# Handle events sent to this module
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
self.debug(f"Received event, {eventName}, from {srcModuleName}")
if self.errorState:
return
if not self.opts['wappalyzer_path']:
self.error("You enabled sfp_tool_wappalyzer but did not set a path to the tool!")
self.errorState = True
return
exe = self.opts['wappalyzer_path']
if self.opts['wappalyzer_path'].endswith('/'):
exe = f"{exe}cli.js"
if not os.path.isfile(exe):
self.error(f"File does not exist: {exe}")
self.errorState = True
return
if not SpiderFootHelpers.sanitiseInput(eventData):
self.debug("Invalid input, skipping.")
return
# Don't look up stuff twice
if eventData in self.results:
self.debug(f"Skipping {eventData} as already scanned.")
return
self.results[eventData] = True
try:
args = [self.opts["node_path"], exe, f"https://{eventData}"]
p = Popen(args, stdout=PIPE, stderr=PIPE)
try:
stdout, stderr = p.communicate(input=None, timeout=60)
if p.returncode == 0:
content = stdout.decode(sys.stdin.encoding)
else:
self.error("Unable to read Wappalyzer content.")
self.error(f"Error running Wappalyzer: {stderr}, {stdout}")
return
except TimeoutExpired:
p.kill()
stdout, stderr = p.communicate()
self.debug("Timed out waiting for Wappalyzer to finish")
return
except BaseException as e:
self.error(f"Unable to run Wappalyzer: {e}")
return
try:
data = json.loads(content)
for item in data["technologies"]:
for cat in item["categories"]:
if cat["name"] == "Operating systems":
evt = SpiderFootEvent(
"OPERATING_SYSTEM",
item["name"],
self.__name__,
event,
)
elif cat["name"] == "Web servers":
evt = SpiderFootEvent(
"WEBSERVER_TECHNOLOGY",
item["name"],
self.__name__,
event,
)
else:
evt = SpiderFootEvent(
"SOFTWARE_USED",
item["name"],
self.__name__,
event,
)
self.notifyListeners(evt)
except (KeyError, ValueError) as e:
self.error(f"Couldn't parse the JSON output of Wappalyzer: {e}")
self.error(f"Wappalyzer content: {content}")
return
# End of sfp_tool_wappalyzer class
|
import aria2p
import os
import time
import telebot
from telebot import types
import subprocess
import sys
import platform
import ctypes
import re
Telegram_bot_api=os.environ.get('Telegram_bot_api')
Aria2_host=os.environ.get('Aria2_host')
Aria2_port="8080"
Aria2_secret=os.environ.get('Aria2_secret')
bot = telebot.TeleBot(Telegram_bot_api)
aria2 = aria2p.API(
aria2p.Client(
host=Aria2_host,
port=int(Aria2_port),
secret=Aria2_secret
)
)
def get_free_space_mb():
result=os.statvfs('/root/')
block_size=result.f_frsize
total_blocks=result.f_blocks
free_blocks=result.f_bfree
# giga=1024*1024*1024
giga=1000*1000*1000
total_size=total_blocks*block_size/giga
free_size=free_blocks*block_size/giga
print('total_size = %s' % int(total_size))
print('free_size = %s' % free_size)
return int(free_size)
def progessbar(new, tot):
"""Builds progressbar
Args:
new: current progress
tot: total length of the download
Returns:
progressbar as a string of length 20
"""
length = 20
progress = int(round(length * new / float(tot)))
percent = round(new/float(tot) * 100.0, 1)
bar = '=' * progress + '-' * (length - progress)
return '[%s] %s %s\r' % (bar, percent, '%')
def hum_convert(value):
value=float(value)
units = ["B", "KB", "MB", "GB", "TB", "PB"]
size = 1024.0
for i in range(len(units)):
if (value / size) < 1:
return "%.2f%s" % (value, units[i])
value = value / size
def run_rclone(dir,title,info,file_num):
Rclone_remote=os.environ.get('Remote')
Upload=os.environ.get('Upload')
name=f"{str(info.message_id)}_{str(info.chat.id)}"
if int(file_num)==1:
shell=f"rclone copy \"{dir}\" \"{Rclone_remote}:{Upload}\" -v --stats-one-line --stats=1s --log-file=\"{name}.log\" "
else:
shell=f"rclone copy \"{dir}\" \"{Rclone_remote}:{Upload}/{title}\" -v --stats-one-line --stats=1s --log-file=\"{name}.log\" "
print(shell)
cmd = subprocess.Popen(shell, stdin=subprocess.PIPE, stderr=sys.stderr, close_fds=True,
stdout=subprocess.PIPE, universal_newlines=True, shell=True, bufsize=1)
# 实时输出
temp_text=None
while True:
time.sleep(1)
fname = f'{name}.log'
with open(fname, 'r') as f: #打开文件
try:
lines = f.readlines() #读取所有行
for a in range(-1,-10,-1):
last_line = lines[a] #取最后一行
if last_line !="\n":
break
print (f"上传中\n{last_line}")
if temp_text != last_line and "ETA" in last_line:
log_time,file_part,upload_Progress,upload_speed,part_time=re.findall("(.*?)INFO.*?(\d.*?),.*?(\d+%),.*?(\d.*?s).*?ETA.*?(\d.*?s)",last_line , re.S)[0]
text=f"{title}\n" \
f"更新时间:`{log_time}`\n" \
f"上传部分:`{file_part}`\n" \
f"上传进度:`{upload_Progress}`\n" \
f"上传速度:`{upload_speed}`\n" \
f"剩余时间:`{part_time}`"
bot.edit_message_text(text=text,chat_id=info.chat.id,message_id=info.message_id,parse_mode='Markdown')
temp_text = last_line
f.close()
except Exception as e:
print(e)
f.close()
continue
if subprocess.Popen.poll(cmd) == 0: # 判断子进程是否结束
print("上传结束")
bot.send_message(text=f"{title}\n上传结束",chat_id=info.chat.id)
os.remove(f"{name}.log")
return
return
def the_download(url,message):
os.system("df -lh")
try:
download = aria2.add_magnet(url)
except Exception as e:
print(e)
if (str(e).endswith("No URI to download.")):
print("No link provided!")
bot.send_message(chat_id=message.chat.id,text="No link provided!",parse_mode='Markdown')
return None
prevmessagemag = None
info=bot.send_message(chat_id=message.chat.id,text="Downloading",parse_mode='Markdown')
while download.is_active:
try:
download.update()
print("Downloading metadata")
bot.edit_message_text(text="Downloading metadata",chat_id=info.chat.id,message_id=info.message_id,parse_mode='Markdown')
barop = progessbar(download.completed_length,download.total_length)
updateText = f"Downloading \n" \
f"'{download.name}'\n" \
f"Progress : {hum_convert(download.completed_length)}/{hum_convert(download.total_length)} \n" \
f"Peers:{download.connections}\n" \
f"Speed {hum_convert(download.download_speed)}/s\n" \
f"{barop}\n" \
f"Free:{get_free_space_mb()}GB"
if prevmessagemag != updateText:
print(updateText)
bot.edit_message_text(text=updateText,chat_id=info.chat.id,message_id=info.message_id,parse_mode='Markdown')
prevmessagemag = updateText
time.sleep(2)
except:
print("Metadata download problem/Flood Control Measures!")
bot.edit_message_text(text="Metadata download problem/Flood Control Measures!",chat_id=info.chat.id,message_id=info.message_id,parse_mode='Markdown')
try:
download.update()
except Exception as e:
if (str(e).endswith("is not found")):
print("Metadata Cancelled/Failed")
print("Metadata couldn't be downloaded")
bot.edit_message_text(text="Metadata couldn't be downloaded",chat_id=info.chat.id,message_id=info.message_id,parse_mode='Markdown')
return None
time.sleep(2)
time.sleep(2)
match = str(download.followed_by_ids[0])
downloads = aria2.get_downloads()
currdownload = None
for download in downloads:
if download.gid == match:
currdownload = download
break
print("Download complete")
markup = types.InlineKeyboardMarkup()
markup.add(types.InlineKeyboardButton(f"Resume", callback_data=f"Resume {currdownload.gid}"),
types.InlineKeyboardButton(f"Pause", callback_data=f"Pause {currdownload.gid}"),
types.InlineKeyboardButton(f"Remove", callback_data=f"Remove {currdownload.gid}"))
bot.edit_message_text(text="Download complete",chat_id=info.chat.id,message_id=info.message_id,parse_mode='Markdown', reply_markup=markup)
prevmessage = None
while currdownload.is_active or not currdownload.is_complete:
try:
currdownload.update()
except Exception as e:
if (str(e).endswith("is not found")):
print("Magnet Deleted")
print("Magnet download was removed")
bot.edit_message_text(text="Magnet download was removed",chat_id=info.chat.id,message_id=info.message_id,parse_mode='Markdown')
break
print(e)
print("Issue in downloading!")
if currdownload.status == 'removed':
print("Magnet was cancelled")
print("Magnet download was cancelled")
bot.edit_message_text(text="Magnet download was cancelled",chat_id=info.chat.id,message_id=info.message_id,parse_mode='Markdown')
break
if currdownload.status == 'error':
print("Mirror had an error")
currdownload.remove(force=True, files=True)
print("Magnet failed to resume/download!\nRun /cancel once and try again.")
bot.edit_message_text(text="Magnet failed to resume/download!\nRun /cancel once and try again.",chat_id=info.chat.id,message_id=info.message_id,parse_mode='Markdown', reply_markup=markup)
break
print(f"Magnet Status? {currdownload.status}")
if currdownload.status == "active":
try:
currdownload.update()
barop = progessbar(currdownload.completed_length,currdownload.total_length)
updateText = f"Downloading \n" \
f"'{currdownload.name}'\n" \
f"Progress : {hum_convert(currdownload.completed_length)}/{hum_convert(currdownload.total_length)} \n" \
f"Peers:{currdownload.connections}\n" \
f"Speed {hum_convert(currdownload.download_speed)}/s\n" \
f"{barop}\n" \
f"Free:{get_free_space_mb()}GB"
if prevmessage != updateText:
print(f"更新状态\n{updateText}")
bot.edit_message_text(text=updateText,chat_id=info.chat.id,message_id=info.message_id,parse_mode='Markdown', reply_markup=markup)
prevmessage = updateText
time.sleep(2)
except Exception as e:
if (str(e).endswith("is not found")):
break
print(e)
print("Issue in downloading!")
time.sleep(2)
elif currdownload.status == "paused":
try:
currdownload.update()
barop = progessbar(currdownload.completed_length,currdownload.total_length)
updateText = f"Downloading \n" \
f"'{currdownload.name}'\n" \
f"Progress : {hum_convert(currdownload.completed_length)}/{hum_convert(currdownload.total_length)} \n" \
f"Peers:{currdownload.connections}\n" \
f"Speed {hum_convert(currdownload.download_speed)}/s\n" \
f"{barop}\n" \
f"Free:{get_free_space_mb()}GB"
if prevmessage != updateText:
print(f"更新状态\n{updateText}")
bot.edit_message_text(text=updateText,chat_id=info.chat.id,message_id=info.message_id,parse_mode='Markdown', reply_markup=markup)
prevmessage = updateText
time.sleep(2)
except Exception as e:
print(e)
print("Download Paused Flood")
time.sleep(2)
time.sleep(2)
time.sleep(1)
if currdownload.is_complete:
print(currdownload.name)
try:
print("开始上传")
file_dir=f"{currdownload.dir}/{currdownload.name}"
files_num=int(len(currdownload.files))
run_rclone(file_dir,currdownload.name,info=info,file_num=files_num)
currdownload.remove(force=True,files=True)
except Exception as e:
print(e)
print("Upload Issue!")
return None
def http_download(url,message):
try:
currdownload = aria2.add_uris([url])
except Exception as e:
print(e)
if (str(e).endswith("No URI to download.")):
print("No link provided!")
bot.send_message(chat_id=message.chat.id,text="No link provided!",parse_mode='Markdown')
return None
markup = types.InlineKeyboardMarkup()
markup.add(types.InlineKeyboardButton(f"Resume", callback_data=f"Resume {currdownload.gid}"),
types.InlineKeyboardButton(f"Pause", callback_data=f"Pause {currdownload.gid}"),
types.InlineKeyboardButton(f"Remove", callback_data=f"Remove {currdownload.gid}"))
info=bot.send_message(chat_id=message.chat.id,text="Downloading",parse_mode='Markdown')
prevmessage=None
while currdownload.is_active or not currdownload.is_complete:
try:
currdownload.update()
except Exception as e:
if (str(e).endswith("is not found")):
print("url Deleted")
print("url download was removed")
bot.edit_message_text(text="url download was removed",chat_id=info.chat.id,message_id=info.message_id,parse_mode='Markdown')
break
print(e)
print("url in downloading!")
if currdownload.status == 'removed':
print("url was cancelled")
print("url download was cancelled")
bot.edit_message_text(text="Magnet download was cancelled",chat_id=info.chat.id,message_id=info.message_id,parse_mode='Markdown')
break
if currdownload.status == 'error':
print("url had an error")
currdownload.remove(force=True, files=True)
print("url failed to resume/download!.")
bot.edit_message_text(text="Magnet failed to resume/download!\nRun /cancel once and try again.",chat_id=info.chat.id,message_id=info.message_id,parse_mode='Markdown', reply_markup=markup)
break
print(f"url Status? {currdownload.status}")
if currdownload.status == "active":
try:
currdownload.update()
barop = progessbar(currdownload.completed_length,currdownload.total_length)
updateText = f"Downloading \n" \
f"'{currdownload.name}'\n" \
f"Progress : {hum_convert(currdownload.completed_length)}/{hum_convert(currdownload.total_length)} \n" \
f"Speed {hum_convert(currdownload.download_speed)}/s\n" \
f"{barop}\n" \
f"Free:{get_free_space_mb()}GB"
if prevmessage != updateText:
print(f"更新状态\n{updateText}")
bot.edit_message_text(text=updateText,chat_id=info.chat.id,message_id=info.message_id,parse_mode='Markdown', reply_markup=markup)
prevmessage = updateText
time.sleep(2)
except Exception as e:
if (str(e).endswith("is not found")):
break
print(e)
print("Issue in downloading!")
time.sleep(2)
elif currdownload.status == "paused":
try:
currdownload.update()
barop = progessbar(currdownload.completed_length,currdownload.total_length)
updateText = f"Downloading \n" \
f"'{currdownload.name}'\n" \
f"Progress : {hum_convert(currdownload.completed_length)}/{hum_convert(currdownload.total_length)} \n" \
f"Speed {hum_convert(currdownload.download_speed)}/s\n" \
f"{barop}\n" \
f"Free:{get_free_space_mb()}GB"
if prevmessage != updateText:
print(f"更新状态\n{updateText}")
bot.edit_message_text(text=updateText,chat_id=info.chat.id,message_id=info.message_id,parse_mode='Markdown', reply_markup=markup)
prevmessage = updateText
time.sleep(2)
except Exception as e:
print(e)
print("Download Paused Flood")
time.sleep(2)
time.sleep(2)
time.sleep(1)
if currdownload.is_complete:
print(currdownload.name)
try:
print("开始上传")
file_dir=f"{currdownload.dir}/{currdownload.name}"
run_rclone(file_dir,currdownload.name,info=info,file_num=1)
currdownload.remove(force=True,files=True)
except Exception as e:
print(e)
print("Upload Issue!")
return None |
<filename>multienum.py
"""MultiEnum class for enumerations for multiple equivalent names.
Full documentation is in the README. At present, this has only been tested in
CPython 3.4, although it should work for any recent version which allows
subclassing of the core int type.
"""
# pylint: disable=W0212,E1101
class MultiEnum(int):
"""Enumeration type as int sublcass with support for multiple names
:cvar sequence _members: Sequence of members defining enumerated names
:cvar sequence _fields: Names corresponding to the position within the
member sequences
:ivar str name: The default (first) name defined for the given value
>>> class SampleEnum(MultiEnum):
... _members = (("zero", "zip", "zero", "cero"),
... ("one", "ace", "une", "uno"),
... ("two", "deuce", "deux", "dos"))
... _fields = ('english', 'slang', 'french', 'spanish')
>>> val1 = SampleEnum("one")
>>> int(val1)
1
>>> str(val1)
'one'
>>> val1.name
'one'
>>> val1.spanish
'uno'
>>> val2 = SampleEnum(slang="deuce")
>>> int(val2)
2
Each instance of MultiEnum also can be iterated as name, value tuples,
making the following construction possible:
>>> enum_dict = dict(SampleEnum('two'))
>>> enum_dict['french']
'deux'
To facilitate use of MultiEnum with Django objects as an argument to the
'choices' parameter, the _choices() method is available as long as the
_fields attribute is defined on the class. By default, the first two
parameters are returned. To use a different set of fields, the
;_choice_fields; parameter can be set as a two-tuple of field names. To
use the enumerated value as one of the choices, use the virtual field name
'_enum'. A reduced set of choices can also be picked by setting the
'_choice_range' attribute to a two-tuple with a start and end range (using
the same semantics as an array range start and end index).
>>> SampleEnum._choices()
(('zero', 'zip'), ('one', 'ace'), ('two', 'deuce'))
>>> class SampleChoicesEnum(SampleEnum):
... _choice_fields = ('_enum', 'french')
... _choice_range = (1,3)
>>> SampleChoicesEnum._choices()
((1, 'une'), (2, 'deux'))
"""
_members = None
_fields = None
_ignore_case = False
@classmethod
def _resolve_value(cls, val):
if type(val) is cls:
# Already an enum
return val
if type(val) is int:
intval = val
else:
try:
if cls._ignore_case:
intval = next(i for i in range(0, len(cls._members))
if str(val).casefold() in
[m.casefold() for m in cls._members[i]])
else:
intval = next(i for i in range(0, len(cls._members))
if str(val) in cls._members[i])
except StopIteration:
raise AttributeError(
"Enumeration name '%s' not in _members" % val)
return intval
def __new__(cls, *args, **kwargs):
if cls._members is None:
raise TypeError("No _members given at definition")
if len(args) + len(kwargs) != 1:
raise TypeError(
"Enumeration creation takes exactly one parameter")
if args:
retval = cls._resolve_value(args[0])
for key, val in kwargs.items():
findex = tuple(cls._fields).index(key)
retval = tuple(m[findex] for m in cls._members).index(val)
if type(retval) is not cls:
retval = super(MultiEnum, cls).__new__(cls, retval)
retval._names = tuple(cls._members[int(retval)])
return retval
def __getattr__(self, key):
if self._fields is None:
raise AttributeError(
"Attribute missing: MultiEnum object does not have _fields defined")
if key in self._fields:
index = self._fields.index(key)
else:
raise AttributeError("Enumeration name '%s' not defined" % key)
return self._names[index]
@classmethod
def _choices(cls):
(rs, re) = getattr(cls, '_choice_range', (0, len(cls._members)))
series = []
fields = cls._fields or (0, 1)
for f in getattr(cls, '_choice_fields', (fields[0], fields[1])):
if f == '_enum':
series.append(range(0, len(cls._members)))
else:
ind = fields.index(f)
series.append(tuple(m[ind] for m in cls._members))
return tuple(zip(series[0][rs:re], series[1][rs:re]))
@property
def name(self):
"""Default name for number"""
return self._names[0]
def __iter__(self):
return (z for z in zip(self._fields, self._members[self]))
def __str__(self):
return self._names[0]
def __repr__(self):
return "'%s'" % self._names[0]
|
import re
# enquanto search retorna um objeto match do primeiro texto correspondente na string pesquisada,
# o metodo findall() retorna as strings todas as correspondencias na string pesquisada.
phoneNumRegex = re.compile(r'\d\d\d-\d\d\d-\d\d\d\d')
mo = phoneNumRegex.search('Cell: 415-555-9999 Work: 212-555-0000')
print(mo.group())
# findall nao retorna um objeto match, mas uma lista de strings - desde que nao haja grupos na expressão regular.
# cada string da lista é uma parte do texto pesquisado que correspondeu à expressão regular
phoneNumRegex = re.compile(r'\d\d\d-\d\d\d-\d\d\d\d') # nao tem nenhum grupo
print(phoneNumRegex.findall('Cell: 415-555-9999 Work: 212-555-0000'))
# se houver grupos na expressão regular, findall() retornará uma lista de tuplas.
# Cada tupla representa uma correspondência identificada,
# e seus itens serão as strings correspondentes a cada grupo da regex.
phoneNumRegex = re.compile(r'(\d\d\d)-(\d\d\d)-(\d\d\d\d)') # tem grupos
print(phoneNumRegex.findall('Cell: 415-555-9999 Work: 212-555-0000'))
# exemplos de classes de caracteres:
xmasRegex = re.compile(r'\d+\s\w+') # \d+: um ou mais digitos; \s: espaço em branco; \w+: um ou mais caracteres
xmasRegex.findall('12 drummers, 11 pipers, 10 lords, 9 ladies, 8 maids, 7 swans, 6 geese, 5 rings, 4 birds, 3 hens,'
'2 doves, 1 partridge') # findall retorna todas as strings que correspondam ao padrao em uma lista
# criando suas proprias classes de caracteres
'''Haverá ocasiões em que as classes abreviadas serão amplas demais. Podemos definir uma classe própria
Por exemplo:'''
vowelRegex = re.compile(r'[aeiouAEIOU]')
print(vowelRegex.findall('RoboCop eats baby food. BABY FOOD.'))
'''Também é possível incluir intervalos de letras ou de números usando um hífen. Por exemplo, a classe de caracteres
[a-zA-ZO-9] corresponderá a todas as letras minúsculas, maiúsculas e a números.
Ao inserir um acento circunflexo logo depois do colchete de abertura da classe de caracteres, podemos criar uma classe
negativa de caracteres'''
consonantRegex = re.compile(r'[^aeiouAEIOU]')
print(consonantRegex.findall('Robocop eats baby food. BABY FOOD.')) # mostra não-vogais. espaços inclusos
# para nao retornar espaços e pontos, podemos adicionar os mesmos junto as vogais
consonantRegex = re.compile(r'[^aeiouAEIOU .]')
print(consonantRegex.findall('Robocop eats baby food. BABY FOOD.'))
# circunflexo (^) e cifrão ($)
'''o circunflexo tbm pode ser usado no inicio para indicar que uma corresp deve ocorrer no inicio de um texto
pesquisado. da msm maneira, podemos colocar um cifrão no final para indicar que a string deve terminar com esse padrão
de regex.
podemos tbm usá-los juntos para indicar que a string toda deve corresponder à regex'''
beginsWithHello = re.compile(r'^Hello')
print(beginsWithHello.search('Hello World'))
print(beginsWithHello.search('He said hello.') is None)
# a string r'\d$' de expressão regular corresponde a strings que terminem com um caractere numerico de 0 a 9
endsWithNumber = re.compile(r'\d$')
print(endsWithNumber.search('Your number is 42'))
print(endsWithNumber.search('Your number is forty two') is None)
# a string r'^\d+$' corresponde a strings que comecem e terminem com um ou mais caracteres numéricos
wholeStringIsNum = re.compile(r'^\d+$')
print(wholeStringIsNum.search('1234567890'))
print(wholeStringIsNum.search('12345xyz67890') is None)
print(wholeStringIsNum.search('12 34567890') is None)
# Caractere-curinga . (ponto)
'''Corresponde a qualquer caractere, exceto quebras de linha'''
atRegex = re.compile(r'.at') # o ponto corresponde a um caractere. na string abaixo, flat retorna como lat por exemplo
print(atRegex.findall('The cat in the hat sat on the flat mat.'))
# correspondendo a tudo usando o ponto-asterisco
nameRegex = re.compile(r'First Name:(.*) Last Name:(.*)')
mo = nameRegex.search('First Name:Isaias Last Name:Ramos')
print(mo.group(1))
print(mo.group(2))
# correspondencias sem diferenciar letras maiusculas de minusculas
robocop = re.compile(r'robocop', re.IGNORECASE) # re.IGNORECASE pode ser usado como re.I
print(robocop.search('RoboCop is part man, part machine, all cop.').group())
print(robocop.search('ROBOCOP protects the innocent.').group())
print(robocop.search('Al, why does your programming book talk about robocop so much?').group())
# substituindo strings com metodo sub()
namesRegex = re.compile(r'Agent \w+')
print(namesRegex.sub('CENSORED', 'Agent Alice gave the secret documents to Agent Bob.'))
# administrando regexes complexas
# exemplo: podemos solicitar que uma função re.compile() ignore espaços em branco e comentarios na string de regex
# esse modo pode ser habilitado com a variavel re.VERBOSE como segundo argumento
phoneRegex = re.compile(r'''(
(\d{3}|\(\d{3}\))? # codigo de área
(\s|-|\.)? # separador
\d{3} # primeiros 3 dígitos
(\s|-|\.) # separador
\d{4} # últimos 4 dígitos
(\s*(ext|x|ext.)\s*\d{2,5})? # extensão
)''', re.VERBOSE)
print(phoneNumRegex.findall('Cell: 415-555-9999 Work: 212-555-0000'))
# observe que tudo o que está após o # na linha é ignorado
|
<gh_stars>1-10
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import time
from typing import ClassVar, Dict, List
import torch
from habitat import Config, logger
from habitat_baselines.common.tensorboard_utils import TensorboardWriter
from habitat_baselines.common.utils import poll_checkpoint_folder
class BaseTrainer:
r"""Generic trainer class that serves as a base template for more
specific trainer classes like RL trainer, SLAM or imitation learner.
Includes only the most basic functionality.
"""
supported_tasks: ClassVar[List[str]]
def train(self) -> None:
raise NotImplementedError
def eval(self) -> None:
raise NotImplementedError
def save_checkpoint(self, file_name) -> None:
raise NotImplementedError
def load_checkpoint(self, checkpoint_path, *args, **kwargs) -> Dict:
raise NotImplementedError
class BaseRLTrainer(BaseTrainer):
r"""Base trainer class for RL trainers. Future RL-specific
methods should be hosted here.
"""
device: torch.device
config: Config
video_option: List[str]
_flush_secs: int
def __init__(self, config: Config):
super().__init__()
assert config is not None, "needs config file to initialize trainer"
self.config = config
self._flush_secs = 30
@property
def flush_secs(self):
return self._flush_secs
@flush_secs.setter
def flush_secs(self, value: int):
self._flush_secs = value
def train(self) -> None:
raise NotImplementedError
def eval(self) -> None:
r"""Main method of trainer evaluation. Calls _eval_checkpoint() that
is specified in Trainer class that inherits from BaseRLTrainer
Returns:
None
"""
self.device = torch.device("cuda", self.config.TORCH_GPU_ID)
if "tensorboard" in self.config.VIDEO_OPTION:
assert (
len(self.config.TENSORBOARD_DIR) > 0
), "Must specify a tensorboard directory for video display"
if "disk" in self.config.VIDEO_OPTION:
assert (
len(self.config.VIDEO_DIR) > 0
), "Must specify a directory for storing videos on disk"
with TensorboardWriter(
self.config.TENSORBOARD_DIR, flush_secs=self.flush_secs
) as writer:
if os.path.isfile(self.config.EVAL_CKPT_PATH_DIR):
# evaluate singe checkpoint
self._eval_checkpoint(self.config.EVAL_CKPT_PATH_DIR, writer)
else:
# evaluate multiple checkpoints in order
prev_ckpt_ind = -1
while True:
current_ckpt = None
while current_ckpt is None:
current_ckpt = poll_checkpoint_folder(
self.config.EVAL_CKPT_PATH_DIR, prev_ckpt_ind
)
time.sleep(2) # sleep for 2 secs before polling again
logger.info(f"=======current_ckpt: {current_ckpt}=======")
prev_ckpt_ind += 1
self._eval_checkpoint(
checkpoint_path=current_ckpt,
writer=writer,
checkpoint_index=prev_ckpt_ind,
)
def _setup_eval_config(self, checkpoint_config: Config) -> Config:
r"""Sets up and returns a merged config for evaluation. Config
object saved from checkpoint is merged into config file specified
at evaluation time with the following overwrite priority:
eval_opts > ckpt_opts > eval_cfg > ckpt_cfg
If the saved config is outdated, only the eval config is returned.
Args:
checkpoint_config: saved config from checkpoint.
Returns:
Config: merged config for eval.
"""
config = self.config.clone()
ckpt_cmd_opts = checkpoint_config.CMD_TRAILING_OPTS
eval_cmd_opts = config.CMD_TRAILING_OPTS
try:
config.merge_from_other_cfg(checkpoint_config)
config.merge_from_other_cfg(self.config)
config.merge_from_list(ckpt_cmd_opts)
config.merge_from_list(eval_cmd_opts)
except KeyError:
logger.info("Saved config is outdated, using solely eval config")
config = self.config.clone()
config.merge_from_list(eval_cmd_opts)
if config.TASK_CONFIG.DATASET.SPLIT == "train":
config.TASK_CONFIG.defrost()
config.TASK_CONFIG.DATASET.SPLIT = "val"
config.TASK_CONFIG.SIMULATOR.AGENT_0.SENSORS = self.config.SENSORS
config.freeze()
return config
def _eval_checkpoint(
self,
checkpoint_path: str,
writer: TensorboardWriter,
checkpoint_index: int = 0,
) -> None:
r"""Evaluates a single checkpoint. Trainer algorithms should
implement this.
Args:
checkpoint_path: path of checkpoint
writer: tensorboard writer object for logging to tensorboard
checkpoint_index: index of cur checkpoint for logging
Returns:
None
"""
raise NotImplementedError
def save_checkpoint(self, file_name) -> None:
raise NotImplementedError
def load_checkpoint(self, checkpoint_path, *args, **kwargs) -> Dict:
raise NotImplementedError
|
<filename>dreamwidget/boshclient_foo.py<gh_stars>0
# Original source https://friendpaste.com/1R4PCcqaSWiBsveoiq3HSy
# Copied at 2012-11-12
# Modified to suit our needs
import httplib, sys, random
from urlparse import urlparse
from xml.dom import minidom
class BOSHClient:
def __init__(self, jid, password, bosh_service, debug=False):
"""
Initialize the client.
You must specify the Jabber ID, the corresponding password and the URL
of the BOSH service to connect to.
"""
self.debug = debug
self.connection = None
self.jid = jid
self.sid = None
self.rid = None
self.password = password
self.bosh_service = urlparse(bosh_service)
self.generate_rid()
self.log('Init RID: %s' % self.rid)
self.content_type = "text/xml; charset=utf-8"
self.headers = {
"Content-type": "text/plain; charset=UTF-8",
"Accept": "text/xml",
}
self.server_auth = []
def log(self, message):
if self.debug:
print '[DEBUG] ' + message
def generate_rid(self):
self.rid = random.randint(0, 10000000)
def init_connection(self):
self.log('Initializing connection to %s' % (self.bosh_service.netloc))
self.connection = httplib.HTTPConnection(self.bosh_service.netloc)
self.log('Connection initialized')
# TODO add exceptions handler there (URL not found etc)
def close_connection(self):
self.log('Closing connection')
self.connection.close()
self.log('Connection closed')
# TODO add execptions handler there
def wrap_stanza_body(self, stanza, more_body=''):
"""Wrap the XMPP stanza with the <body> element (required for BOSH)"""
return"<body rid='%s' sid='%s' %s xmlns='http://jabber.org/protocol/httpbind'>%s</body>" % (self.rid, self.sid, more_body, stanza)
def send_request(self, xml_stanza):
"""
Send a request to self.bosh_service.path using POST containing
xml_stanza with self.headers.
Returns the data contained in the response (only if status == 200)
Returns False if status != 200
"""
self.log('XML_STANZA:')
self.log(xml_stanza)
self.log('Sending the request')
self.connection.request("POST", self.bosh_service.path, xml_stanza, self.headers)
response = self.connection.getresponse()
data = ''
self.log('Response status code: %s' % response.status)
if response.status == 200:
data = response.read()
else:
self.log('Something wrong happened!')
return False
self.log('DATA:')
self.log(data)
return data
def request_bosh_session(self):
"""
Request a BOSH session according to
http://xmpp.org/extensions/xep-0124.html#session-request
Returns the new SID (str).
This function also fill many fields of this BOSHClient object, such as:
* sid
* server_wait
* server_auth_methods
"""
self.log('Prepare to request BOSH session')
xml_stanza = "<body rid='%s' xmlns='http://jabber.org/protocol/httpbind' to='msg.dreamschool.fi' xml:lang='en' wait='60' hold='1' window='5' content='text/xml; charset=utf-8' ver='1.6' xmpp:version='1.0' xmlns:xmpp='urn:xmpp:xbosh'/>" % (self.rid)
data = self.send_request(xml_stanza)
if not data:
return None
# This is XML. response_body contains the <body/> element of the
# response.
response_body = minidom.parseString(data).documentElement
# Get the remote Session ID
self.sid = response_body.getAttribute('sid')
self.log('sid = %s' % self.sid)
# Get the longest time (s) that the XMPP server will wait before
# responding to any request.
self.server_wait = response_body.getAttribute('wait')
self.log('wait = %s' % self.server_wait)
# Get the authid
self.authid = response_body.getAttribute('authid')
# Get the allowed authentication methods
stream_features = response_body.firstChild
auth_list = []
try:
mechanisms = stream_features.getElementsByTagNameNS('urn:ietf:params:xml:ns:xmpp-sasl', 'mechanisms')[0]
if mechanisms.hasChildNodes():
for child in mechanisms.childNodes:
auth_method = child.firstChild.data
auth_list.append(auth_method)
self.log('New AUTH method: %s' % auth_method)
self.server_auth = auth_list
else:
self.log('The server didn\'t send the allowed authentication methods')
except AttributeError:
self.log('The server didn\'t send the allowed authentication methods')
# FIXME: BIG PROBLEM THERE! AUTH METHOD MUSTN'T BE GUEST!
auth_list = ['<PASSWORD>']
self.server_auth = auth_list
return self.sid
def authenticate_xmpp(self):
"""
Authenticate the user to the XMPP server via the BOSH connection.
You MUST have the following settings set:
* self.sid
* self.jid
* self.password
* self.rid
* self.server_auth
Note also that the connection MUST be opened (see self.init_connection).
"""
self.log('Prepare the XMPP authentication')
self.send_request(self.wrap_stanza_body('='))
xml_stanza = "<body rid='%s' xmlns='http://jabber.org/protocol/httpbind' sid='%s'><auth xmlns='urn:ietf:params:xml:ns:xmpp-sasl' mechanism='DIGEST-MD5'/></body>" % (self.rid, self.sid)
self.send_request(unicode(xml_stanza))
if __name__ == '__main__':
USERNAME = sys.argv[1]
PASSWORD = sys.argv[2]
SERVICE = sys.argv[3]
request_bosh_session(USERNAME, PASSWORD, SERVICE)
def request_bosh_session(username, password, service):
c = BOSHClient(username, password, service, debug=True)
c.init_connection()
if c.request_bosh_session():
c.authenticate_xmpp()
c.close_connection()
return {'jid': c.jid, 'sid': c.sid, 'rid': c.rid}
|
<gh_stars>1-10
from enum import Enum
import re, os
MAX_FCN = 65
FUNCTION_REGEX_SOURCE = "\s*(inline|)\s*\w*(::\w*(<\w*>|)|)(&|)(\*|) \w*(::\w*(<\w*>|)|)([\[\]<>+=\-\*\/]*|)\([a-zA-Z0-9_\*<>&:, ]*\) (const|) {"
ACCEPT_COMMENT_REGEX = "\s*\/\/[\s[A-Za-z0-9,\.-]*]*$"
CLASS_FIND_REGEX = "class \w* {"
ALLOW_NO_HEADER = "allow-no-header"
ALLOW_COMMENTS = "allow-comments"
ALLOW_NO_SOURCE = "allow-no-source"
ALLOW_CLASS_DEFS = "allow-class-definitions"
ALLOW_HEADER_IMPL = "allow-impl-in-header"
ERROR_BASE = "Error: {file}"
WARNING_BASE = "Warning: {file}"
class Error(Enum):
COMMENT = 1
CLASS_IN_SOURCE = 2
FUNCTION_TOO_LONG = 3
FUNCTION_IN_HEADER = 4
class ErrorText(Enum):
CODE_WITH_COMMENT = ERROR_BASE + ":(Line {line}) Comments containing code must be deleted"
CLASS_DEF_IN_SOURCE = ERROR_BASE + ":(Line {line}) Class definitions should be in corresponding headers"
FUNCTION_TOO_LONG = ERROR_BASE + """ Function spans too many lines. Recommended to be less than """ + str(MAX_FCN) + """ (currently {line_count})
\tLine {line} | {code} """
FUNCTION_IN_HEADER = ERROR_BASE + ":(Line {line}) Function implementations should not be in header code"
HEADER_FILE_MISSING = ERROR_BASE + " Source files must have corresponding header"
SOURCE_FILE_MISSING = ERROR_BASE + " Header files must have corresponding source file"
def format(text, **kwargs):
return text.value.format(**kwargs)
def __str__(self):
return self.value
header_endings = [".h", ".hpp", ".hpp.src"]
source_endings = [".c", ".cpp", ".cpp.src", ".cc"]
def add_error(errors, line, type, meta):
if (line in errors):
errors[line].append([type, meta])
else:
errors[line] = [type, meta]
return errors
def loop(file_data, source=True):
line = 0
fcn_data = {}
errors = {}
allow_comments = ALLOW_COMMENTS in file_data
allow_class = ALLOW_CLASS_DEFS in file_data
allow_header_impl = ALLOW_HEADER_IMPL in file_data
allow_no_source = ALLOW_NO_SOURCE in file_data
for l in file_data.split("\n"):
if ("//" in l and "NOLINT" not in l and "namespace" not in l and not allow_comments):
l_ = "//" + l.split("//")[1]
if (not re.match(ACCEPT_COMMENT_REGEX, l_)):
errors = add_error(errors, line, Error.COMMENT, [line, line])
if (re.match(FUNCTION_REGEX_SOURCE, l) and "for " not in l):
header = l[:-2]
meta = identify_function(file_data, header, line)
meta.append(l)
if (meta[2] > MAX_FCN):
errors = add_error(errors, line, Error.FUNCTION_TOO_LONG, [header, meta])
fcn_data[header] = meta
if (not allow_no_source):
if (not source and not allow_header_impl):
errors = add_error(errors, line, Error.FUNCTION_IN_HEADER, [header, meta])
if (re.match(CLASS_FIND_REGEX, l) and (not allow_class and source)):
if ("default" not in l):
errors = add_error(errors, line, Error.CLASS_IN_SOURCE, [line])
line += 1
return errors, fcn_data
def identify_function(file_data, header, line):
start = file_data.find(header)
start_l = line
open = 0
end = start
stats = []
lc = 0
for li in file_data[start:].split("\n"):
count_open = li.count("{")
count_closed = li.count("}")
open += count_open - count_closed
end += len(li) + 1
line += 1
lc += 1 if (li != "") else 0
if open == 0:
break
end_l = line
stats = [start_l, end_l, lc]
return stats
def identify_class_def_in_source(file_data):
line = 0
class_defs = []
for l in file_data.split("\n"):
if (re.match(CLASS_FIND_REGEX, l)):
class_defs.append(line)
return class_defs
def check_if_exists(files):
for f in files:
if (os.path.exists(f)):
return True
return False
# def order_verify(source_file, header_file):
def launch_custom(file):
with open(file) as f:
file_data = f.read()
file_ending = "." + file.split(".")[-1]
file_base = file.split(".")[0]
check_files = []
if (file_ending in header_endings):
source = False
check_files = [file_base + c for c in source_endings]
if (not check_if_exists(check_files) and ALLOW_NO_SOURCE not in file_data.split("\n")[0]):
print (ErrorText.SOURCE_FILE_MISSING.format(file=file))
else:
source = True
check_files = [file_base + c for c in header_endings]
if (not check_if_exists(check_files) and ALLOW_NO_HEADER not in file_data.split("\n")[0]):
print (ErrorText.HEADER_FILE_MISSING.format(file=file))
errors, fc = loop(file_data, source)
# if (source == False):
# print (fc)
for e in sorted(list(errors.keys())):
meta = errors[e]
error_code = meta[0]
meta = meta[1]
if (error_code == Error.COMMENT):
print (ErrorText.CODE_WITH_COMMENT.format(file=file, line=meta[0]+1))
elif (error_code == Error.FUNCTION_TOO_LONG):
# print (meta)
line_data = meta[1]
start = line_data[0]
end = line_data[1]
code = line_data[-1]
count = line_data[2]
print (ErrorText.FUNCTION_TOO_LONG.format(file=file, line_count=count, line=start+1, code=code))
elif (error_code == Error.FUNCTION_IN_HEADER):
# print (meta)
line_data = meta[1]
start = line_data[0]
end = line_data[1]
code = line_data[-1]
count = line_data[2]
print (ErrorText.FUNCTION_IN_HEADER.format(file=file, line_count=count, line=start+1, code=code))
elif (error_code == Error.CLASS_IN_SOURCE):
print (ErrorText.CLASS_DEF_IN_SOURCE.format(file=file, line=meta[0]))
if (errors == {}):
return True
return False
|
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functionaltests.cli.base import CmdLineTestCase
from functionaltests.cli.v1.behaviors import acl_behaviors
from functionaltests.cli.v1.behaviors import container_behaviors
from functionaltests.cli.v1.behaviors import secret_behaviors
from functionaltests import utils
from testtools import testcase
ARGS_TYPE = {'short_arg_false': [False],
'short_arg_true': [True]}
@utils.parameterized_test_case
class ACLTestCase(CmdLineTestCase):
def setUp(self):
super(ACLTestCase, self).setUp()
self.secret_behaviors = secret_behaviors.SecretBehaviors()
self.container_behaviors = container_behaviors.ContainerBehaviors()
self.acl_behaviors = acl_behaviors.ACLBehaviors()
def tearDown(self):
super(ACLTestCase, self).tearDown()
self.acl_behaviors.delete_all_created_acls()
self.container_behaviors.delete_all_created_containers()
self.secret_behaviors.delete_all_created_secrets()
@utils.parameterized_dataset(ARGS_TYPE)
@testcase.attr('positive')
def test_acl_submit(self, use_short_arg):
secret_ref = self.secret_behaviors.store_secret()
container_ref = self.container_behaviors.create_container(
secret_hrefs=[secret_ref])
data = self.acl_behaviors.acl_submit(entity_ref=secret_ref,
users=['u1', 'u2'],
use_short_arg=use_short_arg)
self.assertIsNotNone(data)
self.assertIn('u1', data['Users'])
self.assertIn('u2', data['Users'])
self.assertEqual('True', data['Project Access'])
self.assertIn(secret_ref, data['Secret ACL Ref'])
data = self.acl_behaviors.acl_submit(entity_ref=container_ref,
users=['u2', 'u3'],
use_short_arg=use_short_arg)
self.assertIsNotNone(data)
self.assertIn('u3', data['Users'])
self.assertNotIn('u1', data['Users'])
self.assertEqual('True', data['Project Access'])
self.assertIn(container_ref, data['Container ACL Ref'])
@utils.parameterized_dataset(ARGS_TYPE)
@testcase.attr('positive')
def test_acl_submit_for_overwriting_existing_users(self, use_short_arg):
secret_ref = self.secret_behaviors.store_secret()
container_ref = self.container_behaviors.create_container(
secret_hrefs=[secret_ref])
data = self.acl_behaviors.acl_submit(entity_ref=secret_ref,
users=['u1', 'u2'],
project_access=False,
use_short_arg=use_short_arg)
self.assertIsNotNone(data)
self.assertIn('u1', data['Users'])
self.assertIn('u2', data['Users'])
self.assertEqual('False', data['Project Access'])
self.assertIn(secret_ref, data['Secret ACL Ref'])
data = self.acl_behaviors.acl_submit(entity_ref=container_ref,
users=[],
project_access=True,
use_short_arg=use_short_arg)
self.assertIsNotNone(data)
self.assertNotIn('u1', data['Users'])
self.assertNotIn('u2', data['Users'])
self.assertEqual('True', data['Project Access'])
self.assertIn(container_ref, data['Container ACL Ref'])
@utils.parameterized_dataset(ARGS_TYPE)
@testcase.attr('positive')
def test_acl_add(self, use_short_arg):
secret_ref = self.secret_behaviors.store_secret()
data = self.acl_behaviors.acl_submit(entity_ref=secret_ref,
project_access=False,
users=['u1', 'u2'])
self.assertIsNotNone(data)
self.assertEqual('False', data['Project Access'])
acls = self.acl_behaviors.acl_add(entity_ref=secret_ref,
users=['u2', 'u3'],
use_short_arg=use_short_arg)
data = acls[0] # get 'read' operation ACL data
self.assertIsNotNone(data)
self.assertIn('u1', data['Users'])
self.assertIn('u3', data['Users'])
self.assertEqual('False', data['Project Access'])
self.assertIn(secret_ref, data['Secret ACL Ref'])
# make sure there is no change in existing users with blank users add
acls = self.acl_behaviors.acl_add(entity_ref=secret_ref,
users=[], project_access=False,
use_short_arg=use_short_arg)
data = acls[0] # get 'read' operation ACL data
self.assertIsNotNone(data)
self.assertIn('u1', data['Users'])
self.assertIn('u2', data['Users'])
self.assertIn('u3', data['Users'])
acls = self.acl_behaviors.acl_add(entity_ref=secret_ref,
users=None, project_access=True,
use_short_arg=use_short_arg)
data = acls[0] # get 'read' operation ACL data
self.assertIsNotNone(data)
self.assertIn('u2', data['Users'])
self.assertEqual('True', data['Project Access'])
@utils.parameterized_dataset(ARGS_TYPE)
@testcase.attr('positive')
def test_acl_remove(self, use_short_arg):
secret_ref = self.secret_behaviors.store_secret()
container_ref = self.container_behaviors.create_container(
secret_hrefs=[secret_ref])
data = self.acl_behaviors.acl_submit(entity_ref=container_ref,
project_access=False,
users=['u1', 'u2'])
self.assertIsNotNone(data)
self.assertEqual('False', data['Project Access'])
acls = self.acl_behaviors.acl_remove(entity_ref=container_ref,
users=['u2', 'u3'],
use_short_arg=use_short_arg)
data = acls[0] # get 'read' operation ACL data
self.assertIsNotNone(data)
self.assertIn('u1', data['Users'])
self.assertNotIn('u2', data['Users'])
self.assertEqual('False', data['Project Access'])
self.assertIn(container_ref, data['Container ACL Ref'])
# make sure there is no change in existing users with blank users
# remove
acls = self.acl_behaviors.acl_remove(entity_ref=container_ref,
users=[], project_access=False,
use_short_arg=use_short_arg)
data = acls[0] # get 'read' operation ACL data
self.assertIsNotNone(data)
self.assertIn('u1', data['Users'])
self.assertEqual('False', data['Project Access'])
@testcase.attr('positive')
def test_acl_get(self):
secret_ref = self.secret_behaviors.store_secret()
container_ref = self.container_behaviors.create_container(
secret_hrefs=[secret_ref])
data = self.acl_behaviors.acl_submit(entity_ref=secret_ref,
users=['u1', 'u2'])
self.assertIsNotNone(data)
data = self.acl_behaviors.acl_get(entity_ref=secret_ref)
self.assertIn('u2', data['Users'])
self.assertEqual('True', data['Project Access'])
self.assertEqual(secret_ref + "/acl", data['Secret ACL Ref'])
data = self.acl_behaviors.acl_get(entity_ref=secret_ref + "///")
self.assertIn('u2', data['Users'])
self.assertEqual('True', data['Project Access'])
self.assertEqual(secret_ref + "/acl", data['Secret ACL Ref'])
data = self.acl_behaviors.acl_submit(entity_ref=container_ref,
project_access=False,
users=['u4', 'u5'])
self.assertIsNotNone(data)
data = self.acl_behaviors.acl_get(entity_ref=container_ref)
self.assertIn('u4', data['Users'])
self.assertIn('u5', data['Users'])
self.assertEqual('False', data['Project Access'])
self.assertEqual(container_ref + '/acl', data['Container ACL Ref'])
@testcase.attr('positive')
def test_acl_delete(self):
secret_ref = self.secret_behaviors.store_secret()
data = self.acl_behaviors.acl_submit(entity_ref=secret_ref,
users=['u1', 'u2'])
self.assertIsNotNone(data)
self.acl_behaviors.acl_delete(entity_ref=secret_ref)
data = self.acl_behaviors.acl_get(entity_ref=secret_ref)
self.assertEqual('[]', data['Users'])
self.assertEqual('True', data['Project Access'])
self.assertEqual(secret_ref + "/acl", data['Secret ACL Ref'])
# deleting again should be okay as secret or container always has
# default ACL settings
self.acl_behaviors.acl_delete(entity_ref=secret_ref + '////')
data = self.acl_behaviors.acl_get(entity_ref=secret_ref)
self.assertEqual('[]', data['Users'])
self.assertEqual('True', data['Project Access'])
@testcase.attr('negative')
def test_acl_entity_ref_input_with_acl_uri(self):
secret_ref = self.secret_behaviors.store_secret()
container_ref = self.container_behaviors.create_container(
secret_hrefs=[secret_ref])
data = self.acl_behaviors.acl_submit(entity_ref=secret_ref,
users=['u1', 'u2'])
self.assertIsNotNone(data)
err = self.acl_behaviors.acl_delete(entity_ref=container_ref + '/acl')
# above container ACL ref is passed instead of expected container_ref
self.assertIn('Container ACL URI', err)
err = self.acl_behaviors.acl_delete(entity_ref=secret_ref + '/acl')
# above secret ACL ref is passed instead of expected secret_ref
self.assertIn('Secret ACL URI', err)
|
<reponame>khchine5/xl<filename>lino_xl/lib/cal/ui.py
# -*- coding: UTF-8 -*-
# Copyright 2011-2018 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
from __future__ import unicode_literals
from builtins import str
from collections import OrderedDict
from django.conf import settings
from django.db import models
from lino.api import dd, rt, _
from lino import mixins
from lino.core.roles import Explorer
from lino.utils.format_date import monthname
from lino.utils.format_date import day_and_month, day_and_weekday
from lino.modlib.users.mixins import My
from lino.modlib.office.roles import OfficeUser, OfficeStaff, OfficeOperator
from lino.utils import join_elems
from etgen.html import E
from .workflows import TaskStates
from .workflows import GuestStates
from .workflows import EntryStates
from .choicelists import AccessClasses
from .mixins import daterange_text
from .utils import when_text
from .roles import CalendarReader, GuestOperator
class RemoteCalendars(dd.Table):
model = 'cal.RemoteCalendar'
required_roles = dd.login_required(OfficeStaff)
class Rooms(dd.Table):
required_roles = dd.login_required(OfficeStaff)
# required_roles = dd.login_required((OfficeStaff, CalendarReader))
model = 'cal.Room'
detail_layout = """
id name
company contact_person
description
cal.EntriesByRoom
"""
insert_layout = """
id name
company
contact_person
"""
detail_html_template = "cal/Room/detail.html"
class AllRooms(Rooms):
required_roles = dd.login_required(OfficeStaff)
class Priorities(dd.Table):
required_roles = dd.login_required(OfficeStaff)
model = 'cal.Priority'
column_names = 'name *'
class Calendars(dd.Table):
required_roles = dd.login_required(OfficeStaff)
model = 'cal.Calendar'
insert_layout = """
name
color
"""
detail_layout = """
name color id
description SubscriptionsByCalendar
"""
class Subscriptions(dd.Table):
required_roles = dd.login_required(OfficeStaff)
model = 'cal.Subscription'
order_by = ['calendar__name']
# insert_layout = """
# label
# event_type
# """
# detail_layout = """
# label user color
# event_type team other_user room
# description
# """
# class MySubscriptions(Subscriptions, ByUser):
# pass
# class SubscriptionsByCalendar(Subscriptions):
# master_key = 'calendar'
class SubscriptionsByUser(Subscriptions):
required_roles = dd.login_required(OfficeUser)
master_key = 'user'
auto_fit_column_widths = True
class SubscriptionsByCalendar(Subscriptions):
required_roles = dd.login_required(OfficeUser)
master_key = 'calendar'
auto_fit_column_widths = True
def check_subscription(user, calendar):
"Check whether the given subscription exists. If not, create it."
Subscription = rt.models.cal.Subscription
if calendar is None:
return
try:
Subscription.objects.get(user=user, calendar=calendar)
except Subscription.DoesNotExist:
sub = Subscription(user=user, calendar=calendar)
sub.full_clean()
sub.save()
class UserDetailMixin(dd.DetailLayout):
cal_left = """
event_type access_class
calendar
cal.SubscriptionsByUser
# cal.MembershipsByUser
"""
cal = dd.Panel(
"""
cal_left:30 cal.TasksByUser:60
""",
label=dd.plugins.cal.verbose_name,
required_roles=dd.login_required(OfficeUser))
class Tasks(dd.Table):
model = 'cal.Task'
required_roles = dd.login_required(OfficeStaff)
stay_in_grid = True
column_names = 'start_date summary workflow_buttons *'
order_by = ["start_date", "start_time"]
detail_layout = """
start_date due_date id workflow_buttons
summary
user project
#event_type owner created:20 modified:20
description #notes.NotesByTask
"""
insert_layout = dd.InsertLayout("""
summary
user project
""", window_size=(50, 'auto'))
params_panel_hidden = True
parameters = mixins.ObservedDateRange(
user=dd.ForeignKey(settings.SITE.user_model,
verbose_name=_("Managed by"),
blank=True, null=True,
help_text=_("Only rows managed by this user.")),
project=dd.ForeignKey(settings.SITE.project_model,
blank=True, null=True),
state=TaskStates.field(blank=True,
help_text=_("Only rows having this state.")),
)
params_layout = """
start_date end_date user state project
"""
@classmethod
def get_request_queryset(self, ar, **kwargs):
# logger.info("20121010 Clients.get_request_queryset %s",ar.param_values)
qs = super(Tasks, self).get_request_queryset(ar, **kwargs)
if ar.param_values.user:
qs = qs.filter(user=ar.param_values.user)
if settings.SITE.project_model is not None and ar.param_values.project:
qs = qs.filter(project=ar.param_values.project)
if ar.param_values.state:
qs = qs.filter(state=ar.param_values.state)
if ar.param_values.start_date:
qs = qs.filter(start_date__gte=ar.param_values.start_date)
if ar.param_values.end_date:
qs = qs.filter(start_date__lte=ar.param_values.end_date)
return qs
@classmethod
def get_title_tags(self, ar):
for t in super(Tasks, self).get_title_tags(ar):
yield t
if ar.param_values.start_date or ar.param_values.end_date:
yield str(_("Dates %(min)s to %(max)s") % dict(
min=ar.param_values.start_date or'...',
max=ar.param_values.end_date or '...'))
if ar.param_values.state:
yield str(ar.param_values.state)
# if ar.param_values.user:
# yield str(ar.param_values.user)
if settings.SITE.project_model is not None and ar.param_values.project:
yield str(ar.param_values.project)
@classmethod
def apply_cell_format(self, ar, row, col, recno, td):
"""
Enhance today by making background color a bit darker.
"""
if row.start_date == settings.SITE.today():
td.set('bgcolor', "gold")
class TasksByController(Tasks):
master_key = 'owner'
required_roles = dd.login_required(OfficeUser)
column_names = 'start_date summary workflow_buttons id'
# hidden_columns = set('owner_id owner_type'.split())
auto_fit_column_widths = True
class TasksByUser(Tasks):
master_key = 'user'
required_roles = dd.login_required(OfficeUser)
class MyTasks(Tasks):
label = _("My tasks")
required_roles = dd.login_required(OfficeUser)
column_names = 'start_date summary workflow_buttons project'
params_panel_hidden = True
default_end_date_offset = 30
"""Number of days to go into the future. The default value for
:attr:`end_date` will be :meth:`today
<lino.core.site.Site.today>` + that number of days.
"""
@classmethod
def param_defaults(self, ar, **kw):
kw = super(MyTasks, self).param_defaults(ar, **kw)
kw.update(user=ar.get_user())
kw.update(state=TaskStates.todo)
kw.update(start_date=settings.SITE.today())
kw.update(end_date=settings.SITE.today(
self.default_end_date_offset))
return kw
#if settings.SITE.project_model:
class TasksByProject(Tasks):
required_roles = dd.login_required((OfficeUser, OfficeOperator))
master_key = 'project'
column_names = 'start_date user summary workflow_buttons *'
class GuestRoles(dd.Table):
model = 'cal.GuestRole'
required_roles = dd.login_required(dd.SiteStaff, OfficeUser)
detail_layout = """
id name
#build_method #template #email_template #attach_to_email
cal.GuestsByRole
"""
class Guests(dd.Table):
model = 'cal.Guest'
# required_roles = dd.login_required((OfficeUser, OfficeOperator))
required_roles = dd.login_required(GuestOperator)
column_names = 'partner role workflow_buttons remark event *'
order_by = ['event__start_date', 'event__start_time']
stay_in_grid = True
detail_layout = """
event partner role
state remark workflow_buttons
# outbox.MailsByController
"""
insert_layout = dd.InsertLayout("""
event
partner
role
""", window_size=(60, 'auto'))
parameters = mixins.ObservedDateRange(
user=dd.ForeignKey(settings.SITE.user_model,
verbose_name=_("Responsible user"),
blank=True, null=True,
help_text=_("Only rows managed by this user.")),
project=dd.ForeignKey(settings.SITE.project_model,
blank=True, null=True),
partner=dd.ForeignKey(dd.plugins.cal.partner_model,
blank=True, null=True),
event_state=EntryStates.field(
blank=True,
verbose_name=_("Event state"),
help_text=_("Only rows having this event state.")),
guest_state=GuestStates.field(
blank=True,
verbose_name=_("Guest state"),
help_text=_("Only rows having this guest state.")),
)
params_layout = """start_date end_date user event_state guest_state
project partner"""
@classmethod
def get_request_queryset(self, ar, **kwargs):
qs = super(Guests, self).get_request_queryset(ar, **kwargs)
if isinstance(qs, list):
return qs
pv = ar.param_values
if pv.user:
qs = qs.filter(event__user=pv.user)
if settings.SITE.project_model is not None and pv.project:
qs = qs.filter(event__project=pv.project)
if pv.event_state:
qs = qs.filter(event__state=pv.event_state)
if pv.guest_state:
qs = qs.filter(state=pv.guest_state)
if pv.partner:
qs = qs.filter(partner=pv.partner)
# we test whether the *start_date* of event is within the
# given range. Filtering guests by the end_date of their event
# is currently not supported.
if pv.start_date:
qs = qs.filter(event__start_date__gte=pv.start_date)
if pv.end_date:
qs = qs.filter(event__start_date__lte=pv.end_date)
return qs
@classmethod
def get_title_tags(self, ar):
for t in super(Guests, self).get_title_tags(ar):
yield t
pv = ar.param_values
if pv.start_date or pv.end_date:
yield str(_("Dates %(min)s to %(max)s") % dict(
min=pv.start_date or'...',
max=pv.end_date or '...'))
if pv.event_state:
yield str(pv.event_state)
if pv.partner:
yield str(pv.partner)
if pv.guest_state:
yield str(pv.guest_state)
# if pv.user:
# yield str(pv.user)
if settings.SITE.project_model is not None and pv.project:
yield str(pv.project)
class AllGuests(Guests):
required_roles = dd.login_required(GuestOperator, Explorer)
class GuestsByEvent(Guests):
master_key = 'event'
required_roles = dd.login_required(GuestOperator)
# required_roles = dd.login_required(OfficeUser)
auto_fit_column_widths = True
column_names = 'partner role workflow_buttons remark *'
order_by = ['partner__name', 'partner__id']
class GuestsByRole(Guests):
master_key = 'role'
required_roles = dd.login_required(GuestOperator)
# required_roles = dd.login_required(OfficeUser)
class GuestsByPartner(Guests):
label = _("Presences")
master_key = 'partner'
required_roles = dd.login_required(GuestOperator)
# required_roles = dd.login_required(OfficeUser)
column_names = 'event__when_text workflow_buttons'
auto_fit_column_widths = True
display_mode = "summary"
order_by = ['event__start_date', 'event__start_time']
@classmethod
def param_defaults(self, ar, **kw):
kw = super(GuestsByPartner, self).param_defaults(ar, **kw)
# kw.update(event_state=EntryStates.took_place)
kw.update(end_date=dd.today(7))
return kw
@classmethod
def get_table_summary(self, obj, ar):
if ar is None:
return ''
sar = self.request_from(ar, master_instance=obj)
elems = []
fmt = rt.models.cal.EventGenerator.get_cal_entry_renderer(
day_and_month)
for guest in sar:
if len(elems):
elems.append(', ')
elems.extend(fmt(guest.event, ar))
# lbl = fmt(guest.event.start_date)
# if guest.state.button_text:
# lbl = "{0}{1}".format(lbl, guest.state.button_text)
# elems.append(ar.obj2html(guest.event, lbl))
# elems = join_elems(elems, sep=', ')
return ar.html_text(E.div(*elems))
# return E.div(class_="htmlText", *elems)
class MyPresences(Guests):
required_roles = dd.login_required(OfficeUser)
order_by = ['event__start_date', 'event__start_time']
label = _("My presences")
column_names = 'event__start_date event__start_time event_summary role workflow_buttons remark *'
params_panel_hidden = True
@classmethod
def get_request_queryset(self, ar, **kwargs):
# logger.info("20130809 MyPresences")
if ar.get_user().partner is None:
raise Warning("Action not available for users without partner")
return super(MyPresences, self).get_request_queryset(ar, **kwargs)
@classmethod
def get_row_permission(cls, obj, ar, state, ba):
if ar.get_user().partner is None:
return False
return super(MyPresences, cls).get_row_permission(
obj, ar, state, ba)
@classmethod
def param_defaults(self, ar, **kw):
kw = super(MyPresences, self).param_defaults(ar, **kw)
u = ar.get_user()
if u is not None:
kw.update(partner=u.partner)
# kw.update(guest_state=GuestStates.invited)
# kw.update(start_date=settings.SITE.today())
return kw
# @classmethod
# def get_request_queryset(self,ar):
# ar.master_instance = ar.get_user().partner
# return super(MyPresences,self).get_request_queryset(ar)
# class MyPendingInvitations(Guests):
class MyPendingPresences(MyPresences):
label = _("My pending invitations")
# filter = models.Q(state=GuestStates.invited)
column_names = 'event__when_text role workflow_buttons remark'
params_panel_hidden = True
@classmethod
def param_defaults(self, ar, **kw):
kw = super(MyPendingPresences, self).param_defaults(ar, **kw)
# kw.update(partner=ar.get_user().partner)
# kw.update(user=None)
kw.update(guest_state=GuestStates.invited)
kw.update(start_date=settings.SITE.today())
return kw
class MyGuests(Guests):
label = _("My guests")
required_roles = dd.login_required(OfficeUser)
order_by = ['event__start_date', 'event__start_time']
column_names = ("event__start_date event__start_time "
"event_summary role workflow_buttons remark *")
@classmethod
def param_defaults(self, ar, **kw):
kw = super(MyGuests, self).param_defaults(ar, **kw)
kw.update(user=ar.get_user())
kw.update(guest_state=GuestStates.invited)
kw.update(start_date=settings.SITE.today())
return kw
class EventTypes(dd.Table):
required_roles = dd.login_required(OfficeStaff)
model = 'cal.EventType'
column_names = "name *"
detail_layout = """
name
event_label
# description
start_date max_days id
# type url_template username password
#build_method #template email_template attach_to_email
is_appointment all_rooms locks_user transparent max_conflicting planner_column
EntriesByType
"""
insert_layout = dd.InsertLayout("""
name
event_label
""", window_size=(60, 'auto'))
class RecurrentEvents(dd.Table):
model = 'cal.RecurrentEvent'
required_roles = dd.login_required(OfficeStaff)
column_names = "start_date end_date name every_unit event_type *"
auto_fit_column_widths = True
order_by = ['start_date']
insert_layout = """
name
start_date end_date every_unit event_type
"""
insert_layout_width = 80
detail_layout = """
name
id user event_type
start_date start_time end_date end_time
every_unit every max_events
monday tuesday wednesday thursday friday saturday sunday
description cal.EntriesByController
"""
# ~ from lino_xl.lib.workflows import models as workflows # Workflowable
# class Components(dd.Table):
# ~ # class Components(dd.Table,workflows.Workflowable):
# workflow_owner_field = 'user'
# workflow_state_field = 'state'
# def disable_editing(self,request):
# def get_row_permission(cls,row,user,action):
# if row.rset: return False
# @classmethod
# def get_row_permission(cls,action,user,row):
# if not action.readonly:
# if row.user != user and user.level < UserLevel.manager:
# return False
# if not super(Components,cls).get_row_permission(action,user,row):
# return False
# return True
class EventDetail(dd.DetailLayout):
start = "start_date start_time"
end = "end_date end_time"
main = """
event_type summary user
start end #all_day assigned_to #duration #state
room project owner workflow_buttons
# owner created:20 modified:20
# description
GuestsByEvent #outbox.MailsByController
"""
class EventEvents(dd.ChoiceList):
verbose_name = _("Observed event")
verbose_name_plural = _("Observed events")
add = EventEvents.add_item
add('10', _("Stable"), 'stable')
add('20', _("Unstable"), 'pending')
class Events(dd.Table):
model = 'cal.Event'
required_roles = dd.login_required(OfficeStaff)
column_names = 'when_text:20 user summary event_type id *'
# hidden_columns = """
# priority access_class transparent
# owner created modified
# description
# sequence auto_type build_time owner owner_id owner_type
# end_date end_time
# """
order_by = ["start_date", "start_time", "id"]
detail_layout = EventDetail()
insert_layout = """
start_date start_time end_date end_time
summary
# room priority access_class transparent
"""
detail_html_template = "cal/Event/detail.html"
params_panel_hidden = True
parameters = mixins.ObservedDateRange(
user=dd.ForeignKey(settings.SITE.user_model,
verbose_name=_("Managed by"),
blank=True, null=True,
help_text=_("Only rows managed by this user.")),
project=dd.ForeignKey(settings.SITE.project_model,
blank=True, null=True),
event_type=dd.ForeignKey('cal.EventType', blank=True, null=True),
room=dd.ForeignKey('cal.Room', blank=True, null=True),
assigned_to=dd.ForeignKey(settings.SITE.user_model,
verbose_name=_("Assigned to"),
blank=True, null=True,
help_text=_(
"Only events assigned to this user.")),
state=EntryStates.field(blank=True,
help_text=_("Only rows having this state.")),
# unclear = models.BooleanField(_("Unclear events"))
observed_event=EventEvents.field(blank=True),
show_appointments=dd.YesNo.field(_("Appointments"), blank=True),
)
params_layout = """
start_date end_date observed_event state
user assigned_to project event_type room show_appointments
"""
# ~ next = NextDateAction() # doesn't yet work. 20121203
# fixed_states = set(EntryStates.filter(fixed=True))
# pending_states = set([es for es in EntryStates if not es.fixed])
# pending_states = set(EntryStates.filter(fixed=False))
@classmethod
def get_request_queryset(self, ar, **kwargs):
# logger.info("20121010 Clients.get_request_queryset %s",ar.param_values)
qs = super(Events, self).get_request_queryset(ar, **kwargs)
pv = ar.param_values
if pv.user:
qs = qs.filter(user=pv.user)
if pv.assigned_to:
qs = qs.filter(assigned_to=pv.assigned_to)
if settings.SITE.project_model is not None and pv.project:
qs = qs.filter(project=pv.project)
if pv.event_type:
qs = qs.filter(event_type=pv.event_type)
else:
if pv.show_appointments == dd.YesNo.yes:
qs = qs.filter(event_type__is_appointment=True)
elif pv.show_appointments == dd.YesNo.no:
qs = qs.filter(event_type__is_appointment=False)
if pv.state:
qs = qs.filter(state=pv.state)
if pv.room:
qs = qs.filter(room=pv.room)
if pv.observed_event == EventEvents.stable:
qs = qs.filter(state__in=set(EntryStates.filter(fixed=True)))
elif pv.observed_event == EventEvents.pending:
qs = qs.filter(state__in=set(EntryStates.filter(fixed=False)))
if pv.start_date:
qs = qs.filter(start_date__gte=pv.start_date)
if pv.end_date:
qs = qs.filter(start_date__lte=pv.end_date)
return qs
@classmethod
def get_title_tags(self, ar):
for t in super(Events, self).get_title_tags(ar):
yield t
pv = ar.param_values
if pv.start_date or pv.end_date:
yield daterange_text(
pv.start_date,
pv.end_date)
if pv.state:
yield str(pv.state)
if pv.event_type:
yield str(pv.event_type)
# if pv.user:
# yield str(pv.user)
if pv.room:
yield str(pv.room)
if settings.SITE.project_model is not None and pv.project:
yield str(pv.project)
if pv.assigned_to:
yield str(self.parameters['assigned_to'].verbose_name) \
+ ' ' + str(pv.assigned_to)
@classmethod
def apply_cell_format(self, ar, row, col, recno, td):
"""
Enhance today by making background color a bit darker.
"""
if row.start_date == settings.SITE.today():
td.set('bgcolor', "#bbbbbb")
@classmethod
def param_defaults(self, ar, **kw):
kw = super(Events, self).param_defaults(ar, **kw)
kw.update(start_date=settings.SITE.site_config.hide_events_before)
return kw
class AllEntries(Events):
required_roles = dd.login_required(Explorer)
class EntriesByType(Events):
master_key = 'event_type'
class ConflictingEvents(Events):
label = ' ⚔ ' # 2694
help_text = _("Show events conflicting with this one.")
master = 'cal.Event'
column_names = 'start_date start_time end_time project room user *'
@classmethod
def get_request_queryset(self, ar, **kwargs):
qs = ar.master_instance.get_conflicting_events()
if qs is None:
return rt.models.cal.Event.objects.none()
return qs
class PublicEntries(Events):
required_roles = dd.login_required(CalendarReader)
column_names = 'overview room event_type *'
filter = models.Q(access_class=AccessClasses.public)
@classmethod
def param_defaults(self, ar, **kw):
kw = super(PublicEntries, self).param_defaults(ar, **kw)
# kw.update(show_appointments=dd.YesNo.yes)
kw.update(start_date=settings.SITE.today())
# kw.update(end_date=settings.SITE.today())
return kw
class EntriesByDay(Events):
required_roles = dd.login_required((OfficeOperator, OfficeUser))
label = _("Appointments today")
column_names = 'start_time end_time duration room event_type summary owner workflow_buttons *'
auto_fit_column_widths = True
params_panel_hidden = False
@classmethod
def param_defaults(self, ar, **kw):
kw = super(EntriesByDay, self).param_defaults(ar, **kw)
kw.update(show_appointments=dd.YesNo.yes)
kw.update(start_date=settings.SITE.today())
kw.update(end_date=settings.SITE.today())
return kw
@classmethod
def create_instance(self, ar, **kw):
kw.update(start_date=ar.param_values.start_date)
return super(EntriesByDay, self).create_instance(ar, **kw)
@classmethod
def get_title_base(self, ar):
return when_text(ar.param_values.start_date)
@classmethod
def as_link(cls, ar, today, txt=None):
if ar is None:
return ''
if today is None:
today = settings.SITE.today()
if txt is None:
txt = when_text(today)
pv = dict(start_date=today)
# TODO: what to do with events that span multiple days?
pv.update(end_date=today)
target = ar.spawn(cls, param_values=pv)
return ar.href_to_request(target, txt)
# class EntriesByType(Events):
# master_key = 'type'
# class EntriesByPartner(Events):
# required = dd.login_required(user_groups='office')
# master_key = 'user'
class EntriesByRoom(Events):
"""
"""
master_key = 'room'
# from etgen.html import Table, tostring
class Year(object):
def __init__(self, year):
self.year = year
self.months = [[] for i in range(12)]
PLAIN_MODE = 0
UL_MODE = 1
TABLE_MODE = 2
class CalendarRenderer(object):
def __init__(self):
self.years = OrderedDict()
self.mode = PLAIN_MODE
def collect(self, d, evt):
if d.year in self.years:
y = self.years[d.year]
else:
y = Year(d.year)
self.years[d.year] = y
y.months[d.month-1].append(evt)
def analyze_view(self, max_months=6):
count1 = count2 = 0
nyears = 0
for y in self.years.values():
nmonths = 0
for m in y.months:
if len(m):
nmonths += 1
count1 += 1
if len(m) > 1:
count2 += 1
if nmonths:
nyears += 1
if count1 <= max_months:
self.mode = UL_MODE
elif count2:
self.mode = TABLE_MODE
else:
self.mode = PLAIN_MODE
def to_html(self, ar):
self.analyze_view()
get_rnd = rt.models.cal.EventGenerator.get_cal_entry_renderer
if self.mode == TABLE_MODE:
sep = ' '
fmt = get_rnd(day_and_weekday)
elif self.mode == UL_MODE:
sep = ' '
fmt = get_rnd(day_and_weekday)
elif self.mode == PLAIN_MODE:
sep = ', '
fmt = get_rnd(dd.fds)
def xxx(list_of_entries):
elems = []
for e in list_of_entries:
if len(elems):
elems.append(sep)
elems.extend(fmt(e, ar))
return elems
if self.mode == TABLE_MODE:
rows = []
cells = [E.th("")] + [E.th(monthname(m+1)) for m in range(12)]
# print(''.join([tostring(c) for c in cells]))
rows.append(E.tr(*cells))
for y in self.years.values():
cells = [E.td(str(y.year), width="4%")]
for m in y.months:
# every m is a list of etree elems
cells.append(E.td(*xxx(m), width="8%", **ar.renderer.cellattrs))
# print(str(y.year) +":" + ''.join([tostring(c) for c in cells]))
rows.append(E.tr(*cells))
return E.table(*rows, **ar.renderer.tableattrs)
if self.mode == UL_MODE:
items = []
for y in self.years.values():
for m, lst in enumerate(y.months):
if len(lst):
items.append(E.li(
monthname(m+1), " ", str(y.year), ": ", *xxx(lst)))
return E.ul(*items)
if self.mode == PLAIN_MODE:
elems = []
for y in self.years.values():
for lst in y.months:
if len(lst):
if len(elems):
elems.append(sep)
elems.extend(xxx(lst))
return E.p(*elems)
raise Exception("20180720")
class EntriesByController(Events):
required_roles = dd.login_required((OfficeOperator, OfficeUser))
# required_roles = dd.login_required(OfficeUser)
master_key = 'owner'
column_names = 'when_text summary workflow_buttons auto_type user event_type *'
# column_names = 'when_text:20 when_html summary workflow_buttons *'
auto_fit_column_widths = True
display_mode = "summary"
order_by = ["start_date", "start_time", "auto_type", "id"]
# order_by = ['seqno']
@classmethod
def get_table_summary(self, obj, ar):
if ar is None:
return ''
sar = self.request_from(ar, master_instance=obj)
state_coll = {}
cal = CalendarRenderer()
for evt in sar:
# if len(elems) > 0:
# elems.append(', ')
if evt.state in state_coll:
state_coll[evt.state] += 1
else:
state_coll[evt.state] = 1
cal.collect(evt.start_date, evt)
elems = [cal.to_html(ar)]
ul = []
for st in EntryStates.get_list_items():
ul.append(_("{} : {}").format(st, state_coll.get(st, 0)))
toolbar = []
toolbar += join_elems(ul, sep=', ')
# elems = join_elems(ul, sep=E.br)
ar1 = obj.do_update_events.request_from(sar)
if ar1.get_permission():
btn = ar1.ar2button(obj)
toolbar.append(btn)
ar2 = self.insert_action.request_from(sar)
if ar2.get_permission():
btn = ar2.ar2button()
toolbar.append(btn)
if len(toolbar):
toolbar = join_elems(toolbar, sep=' ')
elems.append(E.p(*toolbar))
return ar.html_text(E.div(*elems))
if settings.SITE.project_model:
class EntriesByProject(Events):
required_roles = dd.login_required((OfficeUser, OfficeOperator))
master_key = 'project'
auto_fit_column_widths = True
stay_in_grid = True
column_names = 'when_text user summary workflow_buttons'
# column_names = 'when_text user summary workflow_buttons'
insert_layout = """
start_date start_time end_time
summary
event_type
"""
class OneEvent(Events):
show_detail_navigator = False
use_as_default_table = False
required_roles = dd.login_required(
(OfficeOperator, OfficeUser, CalendarReader))
# required_roles = dd.login_required(OfficeUser)
class MyEntries(Events):
label = _("My appointments")
required_roles = dd.login_required(OfficeUser)
column_names = 'overview project #event_type #summary workflow_buttons *'
auto_fit_column_widths = True
@classmethod
def param_defaults(self, ar, **kw):
kw = super(MyEntries, self).param_defaults(ar, **kw)
kw.update(user=ar.get_user())
kw.update(show_appointments=dd.YesNo.yes)
# kw.update(assigned_to=ar.get_user())
# logger.info("20130807 %s %s",self,kw)
kw.update(start_date=dd.today())
# kw.update(end_date=settings.SITE.today(14))
return kw
@classmethod
def create_instance(self, ar, **kw):
kw.update(start_date=ar.param_values.start_date)
return super(MyEntries, self).create_instance(ar, **kw)
class MyEntriesToday(MyEntries):
label = _("My appointments today")
column_names = 'start_time end_time project event_type '\
'summary workflow_buttons *'
@classmethod
def param_defaults(self, ar, **kw):
kw = super(MyEntriesToday, self).param_defaults(ar, **kw)
kw.update(end_date=dd.today())
return kw
class MyAssignedEvents(MyEntries):
label = _("Events assigned to me")
required_roles = dd.login_required(OfficeUser)
@classmethod
def param_defaults(self, ar, **kw):
kw = super(MyAssignedEvents, self).param_defaults(ar, **kw)
kw.update(user=None)
kw.update(assigned_to=ar.get_user())
return kw
@classmethod
def get_welcome_messages(cls, ar, **kw):
sar = ar.spawn(cls)
count = sar.get_total_count()
if count > 0:
txt = _("%d events have been assigned to you.") % count
yield ar.href_to_request(sar, txt)
class OverdueAppointments(Events):
required_roles = dd.login_required(OfficeStaff)
label = _("Overdue appointments")
column_names = 'when_text user project owner event_type summary workflow_buttons *'
auto_fit_column_widths = True
params_panel_hidden = False
@classmethod
def param_defaults(self, ar, **kw):
kw = super(OverdueAppointments, self).param_defaults(ar, **kw)
kw.update(observed_event=EventEvents.pending)
kw.update(end_date=settings.SITE.today())
kw.update(show_appointments=dd.YesNo.yes)
return kw
class MyOverdueAppointments(My, OverdueAppointments):
label = _("My overdue appointments")
required_roles = dd.login_required(OfficeUser)
column_names = 'overview owner event_type workflow_buttons *'
class MyUnconfirmedAppointments(MyEntries):
required_roles = dd.login_required(OfficeUser)
label = _("Unconfirmed appointments")
column_names = 'when_text project summary workflow_buttons *'
auto_fit_column_widths = True
params_panel_hidden = False
filter = models.Q(state__in=(EntryStates.suggested, EntryStates.draft))
@classmethod
def param_defaults(self, ar, **kw):
kw = super(MyUnconfirmedAppointments, self).param_defaults(ar, **kw)
# kw.update(observed_event=EventEvents.pending)
# kw.update(state=EntryStates.draft)
kw.update(start_date=settings.SITE.today(-14))
kw.update(end_date=settings.SITE.today(14))
# kw.update(show_appointments=dd.YesNo.yes)
return kw
class EventPolicies(dd.Table):
required_roles = dd.login_required(OfficeStaff)
model = 'cal.EventPolicy'
column_names = 'name event_type max_events every every_unit monday tuesday wednesday thursday friday saturday sunday *'
# detail_layout = """
# id name
# max_events every every_unit event_type
# monday tuesday wednesday thursday friday saturday sunday
# """
|
<filename>scripts/supervised/.~c9_invoke_vRCWi.py
"""
Author: <NAME>
Note that this script only applies to Box3dReachPixel environments
"""
from sandbox.rocky.tf.envs.base import TfEnv
from rllab.envs.gym_env import GymEnv
from rllab.envs.normalized_env import normalize
from rllab.misc import logger
from railrl.predictors.dynamics_model import NoEncoder, FullyConnectedEncoder, ConvEncoder, InverseModel, ForwardModel
import argparse
import tensorflow as tf
from os import listdir
import os.path as osp
import joblib
def read_and_decode(filename_queue, obs_shape, action_shape, batch_size=128, num_threads=12, queue_capacity=20000):
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
features={
'image_raw': tf.FixedLenFeature([], tf.string),
'next_image_raw': tf.FixedLenFeature([], tf.string),
'action': tf.FixedLenFeature([4], tf.float32)
})
obs = tf.decode_raw(features['image_raw'], tf.uint8)
next_obs = tf.decode_raw(features['next_image_raw'], tf.uint8)
obs = tf.reshape(obs, obs_shape)
next_obs = tf.reshape(next_obs, obs_shape)
obs = tf.cast(obs, tf.float32) * (1. / 255) - 0.5
next_obs = tf.cast(next_obs, tf.float32) * (1. / 255) - 0.5
action = tf.cast(features['action'], tf.float32)
obs_batch, next_obs_batch, action_batch = tf.train.batch(
[obs, next_obs, action],
batch_size=batch_size,
num_threads=num_threads,
capacity=queue_capacity,
enqueue_many=False,
)
return obs_batch, next_obs_batch, action_batch
def save_snapshot(encoder, inverse_model, forward_model, tfmodel_path):
save_dict = dict(
encoder=encoder,
inverse_model=inverse_model,
forward_model=forward_model
)
joblib.dump(save_dict, tfmodel_path, compress=3)
logger.log("Saved ICM model to {}".format(tfmodel_path))
def cos_loss(A, B):
dotproduct = tf.reduce_sum(tf.multiply(tf.nn.l2_normalize(A, 1), tf.nn.l2_normalize(B,1)), axis = 1)
return 1 - tf.reduce_mean(dotproduct)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('env_name', type=str,
help="name of gym env")
parser.add_argument('dataset_path', type=str,
help="path of training and validation dataset")
parser.add_argument('--tfboard_path', type=str, default='/tmp/tfboard')
parser.add_argument('--tfmodel_path', type=str, default='/tmp/tfmodels')
# Training parameters
parser.add_argument('--val_ratio', type=float, default=0.1,
help="ratio of validation sets")
parser.add_argument('--num_itr', type=int, default=10000000)
parser.add_argument('--val_freq', type=int, default=1000)
parser.add_argument('--log_freq', type=int, default=200)
parser.add_argument('--save_freq', type=int, default=5000)
# ICM parameters
parser.add_argument('--init_lr', type=float, default=1e-4)
parser.add_argument('--forward_weight', type=float, default=0.8,
help="the ratio of forward loss vs inverse loss")
parser.add_argument('--cos_forward', action='store_true',
help="whether to use cosine forward loss")
# parser.add_argument('--norm_input', action='store_true',
# help="whether to normalize observation input")
args = parser.parse_args()
env = TfEnv(normalize(env=GymEnv(args.env_name,record_video=False, \
log_dir='/tmp/gym_test',record_log=False)))
# Get dataset
dataset_names = list(map(lambda file_name: osp.join(args.dataset_path, file_name), listdir(args.dataset_path)))
val_set_names = dataset_names[:int(len(dataset_names)*args.val_ratio)]
train_set_names = dataset_names[int(len(dataset_names)*args.val_ratio):]
train_queue = tf.train.string_input_producer(train_set_names, num_epochs=None)
val_queue = tf.train.string_input_producer(val_set_names, num_epochs=None)
train_obs, train_next_obs, train_action = read_and_decode(train_queue, env.observation_space.shape, env.action_space.shape)
val_obs, val_next_obs, val_action = read_and_decode(val_queue, env.observation_space.shape, env.action_space.shape)
# Build ICM model
# if args.norm_input:
# train_obs = train_obs * (1./255) - 0.5
# train_next_obs = train_next_obs *(1./255) - 0.5
# val_obs = val_obs * (1./255) - 0.5
# val_next_obs = val_next_obs * (1./255) - 0.5
# train_obs = tf.cast(train_obs, tf.float32) / 255.0 - 0.5
# train_next_obs = tf.cast(train_next_obs, tf.float32) / 255.0 - 0.5
# val_obs = tf.cast(val_obs, tf.float32) / 255.0 - 0.5
# val_next_obs = tf.cast(val_next_obs, tf.float32) / 255.0 - 0.5
# else:
# train_obs = tf.cast(train_obs, tf.float32)
# train_next_obs = tf.cast(train_next_obs, tf.float32)
# val_obs = tf.cast(val_obs, tf.float32)
# val_next_obs = tf.cast(val_next_obs, tf.float32)
_encoder = ConvEncoder(
feature_dim=256,
input_shape=env.observation_space.shape,
conv_filters=(64, 64, 64, 32),
conv_filter_sizes=((5,5), (5,5), (5,5), (3,3)),
conv_strides=(3, 2, 2, 2),
conv_pads=('SAME', 'SAME', 'SAME', 'SAME'),
hidden_sizes=(256,),
hidden_activation=tf.nn.elu,
)
_inverse_model = InverseModel(
feature_dim=256,
env_spec=env.spec,
hidden_sizes=(256,),
hidden_activation=tf.nn.tanh,
output_activation=tf.nn.tanh,
)
_forward_model = ForwardModel(
feature_dim=256,
env_spec=env.spec,
hidden_sizes=(256,),
hidden_activation=tf.nn.elu,
)
sess = tf.Session()
_encoder.sess = sess
_inverse_model.sess = sess
_forward_model.sess = sess
with sess.as_default():
# Initialize variables for get_copy to work
sess.run(tf.initialize_all_variables())
train_encoder1 = _encoder.get_weight_tied_copy(observation_input=train_obs)
train_encoder2 = _encoder.get_weight_tied_copy(observation_input=train_next_obs)
train_inverse_model = _inverse_model.get_weight_tied_copy(feature_input1=train_encoder1.output, feature_input2=train_encoder2.output)
train_forward_model = _forward_model.get_weight_tied_copy(feature_input=train_encoder1.output, action_input=train_action)
val_encoder1 = _encoder.get_weight_tied_copy(observation_input=val_obs)
val_encoder2 = _encoder.get_weight_tied_copy(observation_input=val_next_obs)
val_inverse_model = _inverse_model.get_weight_tied_copy(feature_input1=val_encoder1.output, feature_input2=val_encoder2.output)
val_forward_model = _forward_model.get_weight_tied_copy(feature_input=val_encoder1.output, action_input=val_action)
if args.cos_forward:
train_forward_loss = cos_loss(train_encoder2.output, train_forward_model.output)
val_forward_loss = cos_loss(val_encoder2.output, val_forward_model.output)
else:
train_forward_loss = tf.reduce_mean(tf.square(train_encoder2.output - train_forward_model.output))
val_forward_loss = tf.reduce_mean(tf.square(val_encoder2.output - val_forward_model.output))
train_inverse_loss = tf.reduce_mean(tf.square(train_action - train_inverse_model.output))
val_inverse_loss = tf.reduce_mean(tf.square(val_action - val_inverse_model.output))
train_total_loss = args.forward_weight * train_forward_loss + (1. - args.forward_weight) * train_inverse_loss
val_total_loss = args.forward_weight * val_forward_loss + (1. - args.forward_weight) * val_inverse_loss
icm_opt = tf.train.AdamOptimizer(args.init_lr).minimize(train_total_loss)
# Setup summaries
summary_writer = tf.summary.FileWriter(args.tfboard_path, graph=tf.get_default_graph())
train_inverse_loss_summ = tf.summary.scalar("train/icm_inverse_loss", train_inverse_loss)
train_forward_loss_summ = tf.summary.scalar("train/icm_forward_loss", train_forward_loss)
train_total_loss_summ = tf.summary.scalar("train/icm_total_loss", train_total_loss)
val_inverse_loss_summ = tf.summary.scalar("val/icm_inverse_loss", val_inverse_loss)
val_forward_loss_summ = tf.summary.scalar("val/icm_forward_loss", val_forward_loss)
val_total_loss_summ = tf.summary.scalar("val/icm_total_loss", val_total_loss)
train_summary_op = tf.summary.merge(
[train_inverse_loss_summ,
train_forward_loss_summ,
train_total_loss_summ])
val_summary_op = tf.summary.merge(
[val_inverse_loss_summ,
val_forward_loss_summ,
val_total_loss_summ])
logger.log("Finished creating ICM model")
sess.run(tf.initialize_all_variables())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
try:
for timestep in range(args.num_itr):
if timestep % args.log_freq == 0:
logger.log("Start itr {}".format(timestep))
_, train_summary = sess.run(
[icm_opt, train_summary_op]
)
else:
sess.run(icm_opt)
if timestep % args.log_freq == 0:
summary_writer.add_summary(train_summary, timestep)
if timestep % args.save_freq == 0:
save_snapshot(_encoder, _inverse_model, _forward_model, args.tfmodel_path)
if timestep % args.val_freq == 0:
val_summary = sess.run(
val_summary_op
)
summary_writer.add_summary(val_summary, timestep)
except KeyboardInterrupt:
print ("End training...")
pass
coord.join(threads)
sess.close()
if __name__ == "__main__":
main()
# test_img()
|
import numpy as np
import torch
from PIL import Image
from random import random
from scipy.ndimage import geometric_transform
from numpy import *
from torchvision import transforms
from mobius_transformation import Mobius
from mobius_mask import Mobius_mask
from util.cutout import Cutout
#np.random.seed(0)
class super_transformation(object):
def __init__(self,rand,interpolation,augmentation_type, normalization, dataset, n_holes, length, ifcutout, ifmask,mask_length,std,madmissable,M):
if dataset == 'cifar10' or dataset == 'cifar100' or dataset == 'svhn':
self.h = 32
self.w = 32
elif dataset == 'imagenet':
self.h = 32
self.w = 32
elif dataset == 'tiny':
self.h = 64
self.w = 64
elif dataset == 'stl10':
self.h = 96
self.w = 96
elif dataset == 'pet':
self.h = 224
self.w = 224
self.augmentation_type = augmentation_type
self.mobius = Mobius(rand,interpolation,dataset,std, madmissable,M)
self.mobius_mask = Mobius_mask(dataset,mask_length=mask_length)
self.flip = transforms.RandomHorizontalFlip()
self.totensor = transforms.ToTensor()
self.cutout = Cutout(n_holes=n_holes, length=length)
self.normalize = normalization
self.resize = transforms.Resize((self.h,self.w))
if dataset == 'cifar10' or dataset =='cifar100' or dataset == 'svhn':
self.crop = transforms.RandomCrop(32, padding=4)
elif dataset == 'imagenet':
# self.crop = transforms.RandomResizedCrop((224,224))
self.crop = transforms.RandomCrop(32, padding=4)
elif dataset == 'tiny' :
self.crop = transforms.RandomCrop(64, padding=4)
elif dataset == 'stl10' :
self.crop = transforms.RandomCrop(96, padding=12)
elif dataset == 'pet' :
self.crop = transforms.CenterCrop(224)
self.resize = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224)])
self.ifcutout = ifcutout
self.ifmask = ifmask
def __call__(self, image):
if self.augmentation_type == 'noaug':
# noaug
image = self.crop(image)
image = self.totensor(image)
image = self.normalize(image)
elif self.augmentation_type == 'onlymobius':
# 100% mobius
image = self.resize(image)
image = self.mobius(image)
image = self.add_mask(image,self.ifmask)
image = self.totensor(image)
image = self.normalize(image)
elif self.augmentation_type == 'halfmobius':
# 50% mobius 50% regular
raffle = np.random.randint(2)
if raffle == 0:
image = self.resize(image)
image = self.mobius(image)
else:
image = self.resize(image)
image = self.add_mask(image,self.ifmask)
image = self.totensor(image)
image = self.normalize(image)
elif self.augmentation_type == 'regular':
image = self.crop(image)
image = self.flip(image)
image = self.add_mask(image,self.ifmask)
image = self.totensor(image)
image = self.normalize(image)
elif self.augmentation_type == 'mix0':
# 33% mobius
raffle = random.randint(3)
if raffle == 0:
image = self.resize(image)
image = self.mobius(image)
else:
image = self.crop(image)
image = self.flip(image)
image = self.add_mask(image,self.ifmask)
image = self.totensor(image)
image = self.normalize(image)
elif self.augmentation_type == 'mix2':
# 50% mobius
raffle = random.randint(2)
if raffle == 0:
image = self.resize(image)
image = self.mobius(image)
elif raffle == 1:
image = self.crop(image)
image = self.flip(image)
image = self.add_mask(image,self.ifmask)
image = self.totensor(image)
image = self.normalize(image)
elif self.augmentation_type == 'mix3':
# 20% mobius
raffle = random.randint(10)
if raffle < 2:
image = self.resize(image)
image = self.mobius(image)
else:
image = self.crop(image)
image = self.flip(image)
image = self.add_mask(image,self.ifmask)
image = self.totensor(image)
image = self.normalize(image)
elif self.augmentation_type == 'layer20':
# 20% mobius
raffle = random.randint(10)
if raffle < 2:
image = self.resize(image)
image = self.mobius(image)
image = self.crop(image)
image = self.flip(image)
else:
image = self.crop(image)
image = self.flip(image)
image = self.add_mask(image,self.ifmask)
image = self.totensor(image)
image = self.normalize(image)
elif self.augmentation_type == 'mix4':
# 10% mobius
raffle = random.randint(10)
if raffle == 0:
image = self.resize(image)
image = self.mobius(image)
else:
image = self.crop(image)
image = self.flip(image)
image = self.add_mask(image,self.ifmask)
image = self.totensor(image)
image = self.normalize(image)
elif self.augmentation_type == 'mix5':
# 30% mobius
raffle = random.randint(10)
if raffle < 3:
image = self.resize(image)
image = self.mobius(image)
else:
image = self.crop(image)
image = self.flip(image)
image = self.add_mask(image,self.ifmask)
image = self.totensor(image)
image = self.normalize(image)
elif self.augmentation_type == 'mix6':
# 40% mobius
raffle = random.randint(10)
if raffle < 4:
image = self.resize(image)
image = self.mobius(image)
else:
image = self.crop(image)
image = self.flip(image)
image = self.add_mask(image,self.ifmask)
image = self.totensor(image)
image = self.normalize(image)
elif self.augmentation_type == 'mix7':
# 5% mobius
raffle = random.randint(20)
if raffle == 0:
image = self.resize(image)
image = self.mobius(image)
else:
image = self.crop(image)
image = self.flip(image)
image = self.add_mask(image,self.ifmask)
image = self.totensor(image)
image = self.normalize(image)
elif self.augmentation_type == 'noaug_mix1':
raffle = np.random.randint(6)
if raffle <= 1:
image = self.resize(image)
image = self.mobius(image)
elif raffle <= 4:
image = self.crop(image)
image = self.flip(image)
else:
image = self.resize(image)
image = self.add_mask(image,self.ifmask)
image = self.totensor(image)
image = self.normalize(image)
elif self.augmentation_type == 'mix' or self.augmentation_type == 'noaug_mix2':
raffle = np.random.randint(3)
if raffle == 0:
image = self.resize(image)
image = self.mobius(image)
elif raffle == 1:
image = self.crop(image)
image = self.flip(image)
else:
image = self.resize(image)
image = self.add_mask(image,self.ifmask)
image = self.totensor(image)
image = self.normalize(image)
elif self.augmentation_type == 'noaug_mix3':
raffle = np.random.randint(6)
if raffle <= 1:
#mobius
image = self.resize(image)
image = self.mobius(image)
elif raffle == 3:
#regular
image = self.crop(image)
image = self.flip(image)
else:
#noaug
image = self.resize(image)
image = self.add_mask(image,self.ifmask)
image = self.totensor(image)
image = self.normalize(image)
elif self.augmentation_type == 'noaug_mix4':
raffle = np.random.randint(5)
if raffle == 0:
#mobius
image = self.resize(image)
image = self.mobius(image)
elif raffle <= 3:
#regular
image = self.crop(image)
image = self.flip(image)
else:
#noaug
image = self.resize(image)
image = self.add_mask(image,self.ifmask)
image = self.totensor(image)
image = self.normalize(image)
elif self.augmentation_type == 'noaug_mix5':
raffle = np.random.randint(5)
if raffle == 0:
#mobius
image = self.resize(image)
image = self.mobius(image)
elif raffle <= 2:
#regular
image = self.crop(image)
image = self.flip(image)
else:
#noaug
image = self.resize(image)
image = self.add_mask(image,self.ifmask)
image = self.totensor(image)
image = self.normalize(image)
elif self.augmentation_type == 'noaug_mix6':
raffle = np.random.randint(5)
if raffle == 0:
#mobius
image = self.resize(image)
image = self.mobius(image)
elif raffle == 1 :
#regular
image = self.crop(image)
image = self.flip(image)
else:
#noaug
image = self.resize(image)
image = self.add_mask(image,self.ifmask)
image = self.totensor(image)
image = self.normalize(image)
elif self.augmentation_type == 'mask':
image = self.mobius_mask(image)
image = self.totensor(image)
image = self.normalize(image)
elif self.augmentation_type == 'two_mask':
image = self.mobius_mask(image)#1
image = self.mobius_mask(image)#2
image = self.totensor(image)
image = self.normalize(image)
elif self.augmentation_type == 'three_mask':
image = self.mobius_mask(image)#1
image = self.mobius_mask(image)#2
image = self.mobius_mask(image)#3
image = self.totensor(image)
image = self.normalize(image)
else:
print("no aug type")
if self.ifcutout:
image = self.cutout(image)
return image
def add_mask(self, image, ifmask):
if ifmask == False:
return image
else:
image == self.mobius_mask(image)
return image
|
<reponame>matthewmichihara/shiritori
from collections import namedtuple
from flask import Flask
from flask import request
from flask_cors import CORS
from google.cloud import datastore
from word import entity_to_word
from word import pick_your_word
from word import fetch_opponent_word
from word import Word
from romaji_normalizer import normalize
import random
import responses
import romkan
from opencensus.trace.ext.flask.flask_middleware import FlaskMiddleware
from opencensus.trace.exporters import stackdriver_exporter
from opencensus.trace import tracer as tracer_module
from opencensus.trace.exporters.transports.background_thread import BackgroundThreadTransport
from opencensus.trace.tracers import noop_tracer
from opencensus.trace import config_integration
SHOULD_TRACE = True
app = Flask(__name__)
exporter = stackdriver_exporter.StackdriverExporter(
project_id='redmond-211121',
transport=BackgroundThreadTransport)
CORS(app, resources={r"/api/*": {"origins": "*"}})
# https://gcloud-python.readthedocs.io/en/latest/datastore/usage.html#
client = datastore.Client()
@app.route('/api/playword', methods=['POST'])
def play_word():
tracer = get_tracer(SHOULD_TRACE)
with tracer.span(name='/api/playword') as play_word_span:
input_json = request.get_json()
raw_input_word = input_json['input_word']
if raw_input_word:
raw_input_word = raw_input_word.lower()
print('raw_input_word: {}'.format(raw_input_word))
# If this is None, match anything (first move).
should_match = input_json.get('should_match')
if not should_match:
should_match = None
print('should_match: {}'.format(should_match))
used_ids = set(input_json.get('used_ids', []))
print('used_ids: {}'.format(used_ids))
word_roma = romkan.to_roma(raw_input_word)
word_kana = romkan.to_kana(raw_input_word)
should_match_roma = None
if should_match is not None:
should_match_roma = romkan.to_roma(should_match)
print('word_roma: {} word_kana: {} should_match_roma: {}'.format(word_roma, word_kana, should_match_roma))
first_kana = normalize(word_kana[0])
last_kana = normalize(word_kana[-1])
print('first_kana: {} last_kana: {}'.format(first_kana, last_kana))
# Back to romaji. Using this as a tokenizer.
first_roma = romkan.to_roma(first_kana)
last_roma = romkan.to_roma(last_kana)
print('first_roma: {} last_roma: {}'.format(first_roma, last_roma))
your_word_entities = []
with play_word_span.span(name='fetch your word by romaji') as query_your_word_span:
# Check that input word is a valid Japanese word.
query = client.query(kind='Word3')
query.add_filter('romaji', '=', word_roma)
your_word_results = list(query.fetch())
your_word_entities = [entity_to_word(word) for word in your_word_results]
print('num your_word_entities: {}'.format(len(your_word_entities)))
if not your_word_entities:
return responses.word_not_found_response(
raw_input_word,
should_match_roma,
used_ids
)
your_word = pick_your_word(your_word_entities)
# Check that the word beginning matches the previous word ending.
word_does_not_match = should_match_roma is not None and should_match_roma != first_roma
if word_does_not_match:
return responses.word_does_not_match_previous_ending_response(
raw_input_word,
should_match_roma,
used_ids,
your_word
)
# Check that the word has not already been used.
if your_word.id in used_ids:
return responses.word_already_used_response(
raw_input_word,
should_match_roma,
used_ids,
your_word
)
opponent_word = fetch_opponent_word(last_roma, used_ids, play_word_span, client)
if not opponent_word:
return responses.no_more_words_response(
raw_input_word,
should_match_roma,
used_ids,
your_word
)
new_should_match_roma = opponent_word.last_romaji
used_ids.add(your_word.id)
used_ids.add(opponent_word.id)
return responses.success_response(
raw_input_word,
new_should_match_roma,
used_ids,
your_word,
opponent_word
)
def get_tracer(should_trace):
if (should_trace):
return tracer_module.Tracer(exporter=exporter)
return noop_tracer.NoopTracer()
if __name__ == '__main__':
app.run()
|
<filename>UIWindows/UIDataloaderWindow.py
''' Copyright [2020] Hahn-Schickard-Gesellschaft für angewandte Forschung e.V., <NAME> + <NAME>
Copyright [2021] Karlsruhe Institute of Technology, <NAME>
SPDX-License-Identifier: Apache-2.0
============================================================================================================'''
import os
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
class UIDataloaderWindow(QWidget):
"""Select a dataloader for optimization.
This GUI window has a dropdown menue to choose if your training
data is in directories or a file. Next you can search the training
data with a browse window.
"""
def __init__(self, WINDOW_WIDTH, WINDOW_HEIGHT, FONT_STYLE, parent=None):
super(UIDataloaderWindow, self).__init__(parent)
self.WINDOW_WIDTH = WINDOW_WIDTH
self.WINDOW_HEIGHT = WINDOW_HEIGHT
self.FONT_STYLE = FONT_STYLE
self.data_label = QLabel("Training data")
self.data_label.setStyleSheet("font: " + str(int(0.035*self.WINDOW_HEIGHT)) + "px " + FONT_STYLE)
self.data_label.setAlignment(Qt.AlignCenter)
self.Datapng = QLabel(self)
self.Datapng.setFixedWidth(0.25*self.WINDOW_HEIGHT)
self.Datapng.setFixedHeight(0.25*self.WINDOW_HEIGHT)
Dataimg = QPixmap(os.path.join('Images', 'Database.png'))
self.Datapng.setPixmap(Dataimg)
self.Datapng.setScaledContents(True)
self.step = QLabel(self)
self.step.setFixedHeight(0.025*self.WINDOW_HEIGHT)
self.step.setFixedWidth(0.35*self.WINDOW_WIDTH)
step_img = QPixmap(os.path.join('Images', 'GUI_progress_bar_Demonstrator', 'GUI_demonstrator_step_3.png'))
self.step.setPixmap(step_img)
self.step.setAlignment(Qt.AlignCenter)
self.step.setScaledContents(True)
self.data_path = QLabel("")
self.data_path.setFixedWidth(0.7*self.WINDOW_WIDTH)
self.data_path.setStyleSheet("font: " + str(int(0.032*self.WINDOW_HEIGHT)) + "px " + FONT_STYLE)
self.data_path.setAlignment(Qt.AlignCenter)
self.dataloader_list = QComboBox()
self.dataloader_list.setFixedWidth(0.21*self.WINDOW_WIDTH)
self.dataloader_list.addItems(["Select PATH with data", "Select FILE with data"])
self.dataloader_list.setStyleSheet("font: " + str(int(0.023*self.WINDOW_HEIGHT)) + "px " + FONT_STYLE)
self.select_data_browse = QPushButton(" Select Data... ", self)
self.select_data_browse.setFixedWidth(0.2*self.WINDOW_WIDTH)
self.select_data_browse.setFixedHeight(0.05*self.WINDOW_HEIGHT)
self.select_data_browse.setToolTip('Select the training data which the neural network requires for the optimization.\n'
'The data can be transferred in different ways:\n'
'- PATH (folder):\tImages are to be used as training data. In the given path there are\n'
'\t\tsubfolders containing the name of the different classes of the neural\n'
'\t\tnetwork and the corresponding images.\n'
'- FILE (*.csv):\tThe data is stored in a CSV file.\n'
'- FILE (*.py):\tThe data is loaded and returned in a Python script. It\n'
'\t\tis important that the Python script contains the function\n'
'\t\tget_data() with the return values x_train, y_train, x_test,\n'
'\t\ty_test (training data, training label, test data, test label).\n'
'\t\tThe return values here are Numpy arrays.')
self.select_data_browse.setStyleSheet("""QPushButton {
font: """ + str(int(0.035*self.WINDOW_HEIGHT)) + """px """ + FONT_STYLE + """}
QPushButton::hover {
background-color : rgb(10, 100, 200)}
QToolTip {
font: """ + str(int(0.025*self.WINDOW_HEIGHT)) + """px """ + FONT_STYLE + """;
background-color : rgb(53, 53, 53);
color: white;
border: black solid 1px}""")
self.Back = QPushButton(self)
self.Back.setIcon(QIcon(os.path.join('Images', 'back_arrow.png')))
self.Back.setIconSize(QSize(0.04*self.WINDOW_HEIGHT, 0.04*self.WINDOW_HEIGHT))
self.Back.setFixedHeight(0.05*self.WINDOW_HEIGHT)
self.Next = QPushButton(self)
self.Next.setIcon(QIcon(os.path.join('Images', 'next_arrow.png')))
self.Next.setIconSize(QSize(0.04*self.WINDOW_HEIGHT, 0.04*self.WINDOW_HEIGHT))
self.Next.setFixedHeight(0.05*self.WINDOW_HEIGHT)
self.horizontal_box = []
self.horizontal_box.append(QHBoxLayout())
self.horizontal_box[0].addWidget(self.data_label)
self.horizontal_box[0].setAlignment(Qt.AlignTop)
self.horizontal_box.append(QHBoxLayout())
self.horizontal_box[1].addItem(QSpacerItem(0.2*self.WINDOW_WIDTH, 0.2*self.WINDOW_HEIGHT))
self.horizontal_box.append(QHBoxLayout())
self.horizontal_box[2].addStretch()
self.horizontal_box[2].addWidget(self.Datapng)
self.horizontal_box[2].addStretch()
self.horizontal_box.append(QHBoxLayout())
self.horizontal_box[3].addItem(QSpacerItem(0.03*self.WINDOW_WIDTH, 0.03*self.WINDOW_HEIGHT))
self.horizontal_box.append(QHBoxLayout())
self.horizontal_box[4].addStretch()
self.horizontal_box[4].addWidget(self.data_path)
self.horizontal_box[4].addStretch()
self.horizontal_box.append(QHBoxLayout())
self.horizontal_box[5].addStretch()
self.horizontal_box[5].addWidget(self.dataloader_list)
self.horizontal_box[5].addStretch()
self.horizontal_box.append(QHBoxLayout())
self.horizontal_box[6].addStretch()
self.horizontal_box[6].addWidget(self.select_data_browse)
self.horizontal_box[6].addStretch()
self.horizontal_box.append(QHBoxLayout())
self.horizontal_box[7].addItem(QSpacerItem(0.2*self.WINDOW_WIDTH, 0.2*self.WINDOW_HEIGHT))
self.horizontal_box.append(QHBoxLayout())
sublayout = QGridLayout()
sublayout.addWidget(self.Back, 0, 0, Qt.AlignLeft)
sublayout.addWidget(self.step, 0, 1)
sublayout.addWidget(self.Next, 0, 2, Qt.AlignRight)
self.horizontal_box[8].addLayout(sublayout)
self.horizontal_box[8].setAlignment(Qt.AlignBottom)
self.vertical_box = QVBoxLayout()
for i in range(0,len(self.horizontal_box)):
self.vertical_box.addLayout(self.horizontal_box[i])
self.setLayout(self.vertical_box) |
<reponame>heynemann/ea
import random
import math
from deap import base, tools
SMART_ALGO = False
def drawBoard(board):
# This function prints out the board that it was passed.
board = [item or ' ' for item in board]
message = ""
# "board" is a list of 10 strings representing the board (ignore index 0)
message += (' ' + board[6] + ' | ' + board[7] + ' | ' + board[8] + '\n')
message += ('-----------\n')
message += (' ' + board[3] + ' | ' + board[4] + ' | ' + board[5] + '\n')
message += ('-----------\n')
message += (' ' + board[0] + ' | ' + board[1] + ' | ' + board[2] + '\n')
return message
def get_game_board(game_choices):
board = [None] * 9
player = game_choices.player_starts and "O" or "X"
iteration = 0
number_of_moves = 0
while True:
if iteration > len(game_choices) - 2:
break
if player == "X":
move = getComputerMove(board)
board[move] = player
player = "O"
else:
iteration += 1
move = max(0, min(int(math.floor(game_choices[iteration])), 8))
while board[move] is not None and not isBoardFull(board) and iteration <= len(game_choices) - 2:
move = max(0, min(int(math.floor(game_choices[iteration])), 8))
iteration += 1
if board[move] is None:
board[move] = player
number_of_moves += 1
player = "X"
if isWinner(board, "O") or isWinner(board, "X"):
break
if isBoardFull(board):
break
return board, number_of_moves
def isBoardFull(board):
# Return True if every space on the board has been taken. Otherwise return False.
for i in range(0, 9):
if isSpaceFree(board, i):
return False
return True
def getBoardCopy(board):
# Make a duplicate of the board list and return it the duplicate.
dupeBoard = []
for i in board:
dupeBoard.append(i)
return dupeBoard
def isSpaceFree(board, move):
# Return true if the passed move is free on the passed board.
return board[move] is None
def makeMove(board, letter, move):
board[move] = letter
def isWinner(bo, le):
# Given a board and a player's letter, this function returns True if that player has won.
# We use bo instead of board and le instead of letter so we don't have to type as much.
return (
(bo[6] == le and bo[7] == le and bo[8] == le) or # across the top
(bo[3] == le and bo[4] == le and bo[5] == le) or # across the middle
(bo[0] == le and bo[1] == le and bo[2] == le) or # across the bottom
(bo[6] == le and bo[3] == le and bo[0] == le) or # down the left side
(bo[7] == le and bo[4] == le and bo[1] == le) or # down the middle
(bo[8] == le and bo[5] == le and bo[2] == le) or # down the right side
(bo[6] == le and bo[4] == le and bo[2] == le) or # diagonal
(bo[8] == le and bo[4] == le and bo[0] == le)
) # diagonal
def chooseRandomMoveFromList(board, movesList):
# Returns a valid move from the passed list on the passed board.
# Returns None if there is no valid move.
possibleMoves = []
for i in movesList:
if isSpaceFree(board, i):
possibleMoves.append(i)
if len(possibleMoves) != 0:
return random.choice(possibleMoves)
else:
return None
def getComputerMove(board): # NOQA
computerLetter = 'X'
playerLetter = 'O'
# Here is our algorithm for our Tic Tac Toe AI:
# First, check if we can win in the next move
for i in range(0, 9):
copy = getBoardCopy(board)
if isSpaceFree(copy, i):
makeMove(copy, computerLetter, i)
if isWinner(copy, computerLetter):
return i
# Check if the player could win on his next move, and block them.
for i in range(0, 9):
copy = getBoardCopy(board)
if isSpaceFree(copy, i):
makeMove(copy, playerLetter, i)
if isWinner(copy, playerLetter):
return i
if SMART_ALGO:
move = chooseRandomMoveFromList(board, [0, 2, 6, 8])
if move is not None:
return move
else:
for i in range(0, 4):
# Try to take one of the corners, if they are free.
move = [0, 2, 6, 8][i]
if move is not None:
return move
# Try to take the center, if it is free.
if isSpaceFree(board, 4):
return 4
if SMART_ALGO:
return chooseRandomMoveFromList(board, [1, 3, 5, 7])
else:
for i in range(0, 4):
# Try to take one of the corners, if they are free.
move = [1, 3, 5, 7][i]
if move is not None:
return move
class Fitness(float):
def __init__(self, *args, **kw):
super(Fitness, self).__init__(*args, **kw)
self.board = None
self.iterations = 0
class Individual(list):
def __init__(self, *args, **kw):
super(Individual, self).__init__(*args, **kw)
self.fitness = None
self.player_starts = True
def __str__(self):
if self.fitness is None or self.fitness.board is None:
return "No game to be found for this individual"
else:
return drawBoard(self.fitness.board)
class GameSolver:
IND_SIZE = 10
CXPB, MUTPB = 0.5, 0.0
MUTATION_POSITIONS = 1
MUTATION_RANGE = 2
def __init__(self):
self.initialize_toolbox()
def initialize_toolbox(self):
self.toolbox = base.Toolbox()
self.toolbox.register("individual", self.get_random_game)
self.toolbox.register("population", tools.initRepeat, list, self.toolbox.individual)
self.toolbox.register("mate", self.mate_games)
self.toolbox.register("mutate", self.mutate_game)
self.toolbox.register("select", self.select_parents)
self.toolbox.register("evaluate", self.determine_fitness)
def mate_games(self, parent1, parent2):
choices = []
choices2 = []
for index in range(len(parent1)):
parent = random.choice([parent1, parent2])
choices.append(parent[index])
for index in range(len(parent1)):
parent = random.choice([parent1, parent2])
choices2.append(parent[index])
starts = random.choice([parent1, parent2]).player_starts
starts2 = random.choice([parent1, parent2]).player_starts
ind = Individual(choices)
ind.player_starts = starts
fitness = self.determine_fitness(ind)
if fitness > parent1.fitness:
parent1[:] = choices
parent1.player_starts = starts
ind = Individual(choices2)
ind.player_starts = starts2
fitness = self.determine_fitness(ind)
if fitness > parent2.fitness:
parent2[:] = choices2
parent2.player_starts = starts2
def mutate_game(self, game):
for i in range(self.MUTATION_POSITIONS):
position = random.randint(0, 8)
choice = game[position]
increment = ((random.random() * 2) - 1) * self.MUTATION_RANGE
new_choice = int(choice + increment)
if new_choice > 8:
new_choice -= 9
if new_choice < 0:
new_choice = 9 + new_choice
game[position] = new_choice
def select_parents(self, population, individuals):
return list(reversed(sorted(population, key=lambda item: item.fitness)))[:individuals]
def determine_fitness(self, game_choices):
board, iterations = get_game_board(game_choices)
distance = 5 - iterations
won = isWinner(board, "O") and 95.0 or 0.0
fit = Fitness((distance + won) / 100.0)
fit.board = board
fit.iterations = iterations
return fit
def get_random_game(self):
choices = [random.randint(0, 8) for i in range(50)]
ind = Individual(choices)
ind.player_starts = [random.randint(0, 1)] == 0
return ind
def get_top_solutions(self, population_size, generations):
pop = self.toolbox.population(n=population_size)
# Evaluate the entire population
fitnesses = map(self.toolbox.evaluate, pop)
for ind, fit in zip(pop, fitnesses):
ind.fitness = fit
for g in xrange(generations):
pop = list(reversed(sorted(pop)))
print "============= GENERATION %d (top: %.2f%%) ==============" % (g, (pop[0].fitness * 100))
offspring = self.toolbox.select(pop, len(pop))
offspring = map(self.toolbox.clone, offspring)
for child1, child2 in zip(offspring[::2], offspring[1::2]):
if random.random() < self.CXPB:
self.toolbox.mate(child1, child2)
child1.fitness = None
child2.fitness = None
for mutant in offspring:
if random.random() < self.MUTPB:
self.toolbox.mutate(mutant)
mutant.fitness = None
invalid_ind = [ind for ind in offspring if ind.fitness is None]
fitnesses = map(self.toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness = fit
pop[:] = offspring
return pop[:10]
def main():
solver = GameSolver()
for game in solver.get_top_solutions(population_size=100, generations=1000):
print
print
print game
if __name__ == "__main__":
main()
|
import configparser
# CONFIG
config = configparser.ConfigParser()
config.read('dwh.cfg')
dwh_role_arn = config.get("IAM_ROLE", "ARN")
log_data = config['S3']['LOG_DATA']
log_jsonpath = config['S3']['LOG_JSONPATH']
song_data = config['S3']['SONG_DATA']
# DROP TABLES
staging_events_table_drop = "DROP TABLE IF EXISTS staging_events;"
staging_songs_table_drop = "DROP TABLE IF EXISTS staging_songs;"
songplay_table_drop = "DROP TABLE IF EXISTS songplays;"
user_table_drop = "DROP TABLE IF EXISTS \"users\";"
song_table_drop = "DROP TABLE IF EXISTS songs;"
artist_table_drop = "DROP TABLE IF EXISTS artists;"
time_table_drop = "DROP TABLE IF EXISTS time;"
# CREATE TABLES
staging_events_table_create = ("""
CREATE TABLE IF NOT EXISTS staging_events (
artist TEXT,
auth TEXT,
firstName TEXT,
gender char,
itemInSession INT,
lastName TEXT,
length FLOAT8,
level TEXT,
location TEXT,
method varchar(3),
page TEXT,
registration BIGINT,
sessionId INT,
song TEXT,
status INT,
ts BIGINT,
userAgent TEXT,
userId int
);
""")
staging_songs_table_create = ("""
CREATE TABLE IF NOT EXISTS staging_songs (
num_songs INT,
artist_id TEXT,
artist_latitude FLOAT8,
artist_longitude FLOAT8,
artist_location TEXT,
artist_Name TEXT,
song_id TEXT,
title TEXT,
duration FLOAT4,
year INT
);
""")
songplay_table_create = ("""
CREATE TABLE IF NOT EXISTS songplays (
songplay_id INT IDENTITY (1, 1),
start_time TIMESTAMP,
user_id INT,
level TEXT,
song_id TEXT,
artist_id TEXT,
session_id INT,
location TEXT,
user_agent TEXT,
PRIMARY KEY (songplay_id),
FOREIGN KEY (start_time) REFERENCES time(start_time),
FOREIGN KEY (user_id) REFERENCES users(user_id),
FOREIGN KEY (song_id) REFERENCES songs(song_id),
FOREIGN KEY (artist_id) REFERENCES artists(artist_id)
);
""")
user_table_create = ("""
CREATE TABLE IF NOT EXISTS users (
user_id INT,
first_name TEXT,
last_name TEXT,
gender TEXT,
level TEXT,
PRIMARY KEY (user_id)
) DISTSTYLE ALL;
""")
song_table_create = ("""
CREATE TABLE IF NOT EXISTS songs (
song_id TEXT,
title TEXT,
artist_id TEXT,
year SMALLINT,
duration NUMERIC,
PRIMARY KEY (song_id)
) DISTSTYLE ALL;
""")
artist_table_create = ("""
CREATE TABLE IF NOT EXISTS artists (
artist_id TEXT,
name TEXT,
location TEXT,
latitude FLOAT8,
longitude FLOAT8,
PRIMARY KEY (artist_id)
) DISTSTYLE ALL;
""")
time_table_create = ("""
CREATE TABLE IF NOT EXISTS time (
start_time TIMESTAMP,
hour SMALLINT,
day SMALLINT,
week SMALLINT,
month SMALLINT,
year SMALLINT,
weekday TEXT,
PRIMARY KEY (start_time)
) DISTSTYLE ALL;
""")
# STAGING TABLES
staging_events_copy = ("""
COPY staging_events
FROM {}
iam_role {}
FORMAT AS json {};
""").format(log_data, dwh_role_arn, log_jsonpath)
staging_songs_copy = ("""
COPY staging_songs
FROM {}
iam_role {}
FORMAT AS json 'auto';
""").format(song_data, dwh_role_arn)
# FINAL TABLES
songplay_table_insert = ("""
INSERT INTO songplays (start_time, user_id, level, song_id, artist_id, session_id, location, user_agent)
SELECT
TIMESTAMP 'epoch' + (se.ts / 1000) * INTERVAL '1 second' as start_time,
se.userId,
se.level,
ss.song_id,
ss.artist_id,
se.sessionId,
se.location,
se.userAgent
FROM staging_songs ss
JOIN staging_events se ON se.artist = ss.artist_name AND ss.title = se.song
WHERE se.page = 'NextSong';
""")
user_table_insert = ("""
INSERT INTO users (user_id, first_name, last_name, gender, level)
SELECT
DISTINCT userId,
firstName,
lastName,
gender,
level
FROM
staging_events
WHERE userId IS NOT NULL
AND page = 'NextSong';
""")
song_table_insert = ("""
INSERT INTO songs (song_id, title, artist_id, year, duration)
SELECT
DISTINCT song_id,
title,
artist_id,
year,
duration
FROM staging_songs
WHERE song_id IS NOT NULL;
""")
artist_table_insert = ("""
INSERT INTO artists (artist_id, name, location, latitude, longitude)
SELECT
DISTINCT artist_id,
artist_name,
artist_location,
artist_latitude,
artist_longitude
FROM staging_songs
WHERE artist_id IS NOT NULL;
""")
time_table_insert = ("""
INSERT INTO "time" (start_time, hour, day, week, month, year, weekday)
SELECT DISTINCT TIMESTAMP 'epoch' + (ts/1000) * INTERVAL '1 second' as start_time,
EXTRACT(HOUR FROM start_time) AS hour,
EXTRACT(DAY FROM start_time) AS day,
EXTRACT(WEEKS FROM start_time) AS week,
EXTRACT(MONTH FROM start_time) AS month,
EXTRACT(YEAR FROM start_time) AS year,
to_char(start_time, 'Day') AS weekday
FROM staging_events;
""")
# QUERY LISTS
create_table_queries = [staging_events_table_create, staging_songs_table_create, user_table_create, song_table_create,
artist_table_create, time_table_create, songplay_table_create]
drop_table_queries = [staging_events_table_drop, staging_songs_table_drop, songplay_table_drop, user_table_drop,
song_table_drop, artist_table_drop, time_table_drop]
copy_table_queries = [staging_events_copy, staging_songs_copy]
insert_table_queries = [songplay_table_insert, user_table_insert, song_table_insert, artist_table_insert,
time_table_insert]
|
<filename>tests/test_checkParamTableModel.py
from unittest import TestCase
import logging
import os
import systemcheck.models as models
from systemcheck.checks.models.checks import Check
from systemcheck.gui.models import GenericTreeModel
from systemcheck.models.meta.base import engine_from_config, scoped_session, sessionmaker
from systemcheck.systems.ABAP.plugins.actions.check_abap_count_table_entries import CheckAbapCountTableEntries, \
CheckAbapCountTableEntries__params
from PyQt5 import QtCore, QtWidgets, QtGui
import systemcheck_tools
from systemcheck.checks.gui.widgets.check_parameterEditor_widget import CheckParameterTableModel
class TestCheckParamTableModel(TestCase):
PATH = r'TestCheckParamTableModel.sqlite'
def setUp(self):
self.logger = logging.getLogger('{}.{}'.format(__name__, self.__class__.__name__))
self.dbconfig = {'sqlalchemy.echo': False,
'sqlalchemy.url': 'sqlite:///' + self.PATH, }
if os.path.exists(self.PATH):
os.remove(self.PATH)
self.engine = engine_from_config(self.dbconfig)
models.meta.base.Base.metadata.create_all(self.engine)
self.session_factory = sessionmaker(bind=self.engine)
self.session = scoped_session(self.session_factory)
if self.session.query(Check).filter(Check.parent_id == None).count() == 0:
self.session.add(Check(name='RootNode'))
self.session.commit()
def tearDown(self):
self.session.close()
if os.path.exists(self.PATH):
os.remove(self.PATH)
def populateTree(self):
systemcheck_tools.populateChecksTree(self.session)
self.check=self.session.query(CheckAbapCountTableEntries).filter_by(name='Clients 001 and 066 removed').one()
def test_rowCount(self):
self.populateTree()
model = CheckParameterTableModel(self.check)
self.assertEqual(model.rowCount(), 2)
def test_columnCount(self):
self.populateTree()
model = CheckParameterTableModel(self.check)
self.assertEqual(model.columnCount(), 1)
def test_data(self):
test_data = [['Client 001'],
['Client 066']]
self.populateTree()
model = CheckParameterTableModel(self.check)
for rownr in range(model.rowCount()):
for colnr in range(model.columnCount()):
idx = model.index(rownr, colnr)
self.assertEqual(model.data(idx), test_data[rownr][colnr])
def test_setData(self):
test_data = [['Client 001_modified'],
['Client 066_modified']]
self.populateTree()
model = CheckParameterTableModel(self.check)
for rownr in range(model.rowCount()):
for colnr in range(model.columnCount()):
idx = model.index(rownr, colnr)
model.setData(idx, test_data[rownr][colnr], QtCore.Qt.EditRole )
for rownr in range(model.rowCount()):
for colnr in range(model.columnCount()):
idx = model.index(rownr, colnr)
self.assertEqual(model.data(idx), test_data[rownr][colnr])
def test_insertRows(self):
new_data = ['999', 'MANDT999', "MANDT EQ '001' 999", 0, 'EQ999']
self.populateTree()
model = CheckParameterTableModel(self.check)
self.assertEqual(model.rowCount(), 2)
model.insertRows(0, 10)
self.assertEqual(model.rowCount(), 12)
def test_removeRows(self):
new_data = ['999', 'MANDT999', "MANDT EQ '001' 999", 0, 'EQ999']
self.populateTree()
model = CheckParameterTableModel(self.check)
self.assertEqual(model.rowCount(), 2)
model.removeRows(1, 0) |
from django.views.generic import View
from games.models import GameTime
from django.contrib.auth import get_user_model
from base_user.tools.common import game_session_generate
from games.models import LeaderBoard
import facebook
User = get_user_model()
class UserGameSessionIdCheck(View):
"""
Check user session id for game
"""
def get_context_data(self, **kwargs):
context = {}
try:
user = User.objects.get(id=self.request.user.id)
if user.game_session:
game_list = GameTime.objects.filter(game_session=user.game_session)
if game_list:
for game in game_list:
game.done = True
game.last_done = True
game.save()
game.player.game_session = game_session_generate()
game.player.save()
else:
pass
else:
pass
except:
pass
return context
# def CheckLeaderBorad(game):
# try:
# if not game.player.hide_leaderboard:
# leader = LeaderBoard.objects.filter(games__player=game.player).last()
# count = LeaderBoard.objects.filter(games__player=game.player).count()
# if count > 1:
# for obj in LeaderBoard.objects.filter(games__player=game.player)[:count-1]:
# obj.delete()
# if leader.games.answer_count > game.answer_count:
# pass
# else:
# if leader.games.duration > game.duration:
# leader.games = game
# leader.save()
# else:
# pass
# else:
# pass
# except:
# if not game.player.hide_leaderboard:
# if game.answer_count > 7:
# leader = LeaderBoard(games=game)
# leader.save()
# else:
# pass
# else:
# pass
def CheckLeaderBorad(game):
try:
if not game.player.hide_leaderboard:
leader = LeaderBoard.objects.filter(player=game.player).last()
count = LeaderBoard.objects.filter(player=game.player).count()
if count > 1:
for obj in LeaderBoard.objects.filter(player=game.player)[:count-1]:
obj.delete()
if game.answer_count > 7:
leader.duration += game.duration
leader.score += game.answer_count
leader.save()
else:
pass
else:
pass
except:
if not game.player.hide_leaderboard:
if game.answer_count > 7:
leader = LeaderBoard(
player=game.player,
duration=game.duration,
score=game.answer_count
)
leader.save()
else:
pass
else:
pass
def increase_point(user):
result = user.count_point()
user.point = result[0]
user.point_month = result[1]
user.point_week = result[2]
user.save()
"""
Check User Friends Have also use app
from django.contrib.auth import get_user_model
import facebook
User = get_user_model()
player = User.objects.filter(id=user_id)
facebook_user = player.social_auth.get(provider='facebook')
access_token = facebook_user.get_access_token("facebook") # get user access token
facebook_uuid = facebook_user.uid
graph = facebook.GraphAPI(access_token)
resp = graph.get_object(facebook_uuid + '/friends') # get friends data
data = resp['data']
"""
class FacebookFriendsList(object):
def __init__(self, user):
self.user = user
def get_user_friends(self):
try:
facebook_user = self.user.social_auth.get(provider='facebook')
access_token = facebook_user.get_access_token("facebook") # get user access token
facebook_uuid = facebook_user.uid
graph = facebook.GraphAPI(access_token)
resp = graph.get_object(facebook_uuid + '/friends') # get friends data
data = resp['data']
return data
except:
return None |
import pytest
import responses
from django.forms import model_to_dict
from django.test import Client
from model_bakery import baker
from pollaris.app.models import SearchLog
from pollaris.app.tests.test_utils import (
GOOGLE_GEOCODE_URL_GENERIC,
SMARTY_URL_GENERIC,
check_search_log,
get_string_search,
google_geocode_bad_response,
google_geocode_response,
google_geocode_response_no_zip9,
google_geocode_response_nv,
post_string_search,
smarty_streets_response,
)
ADDRESS_STRING = "123 North Main ST boston MA"
SAMPLE_ZIP9 = "021293533"
@pytest.fixture
def address_string_request():
return {"search_string": ADDRESS_STRING}
# Tests for shortcode/QC API that takes address string, parses address, and looks up polling location for address
@pytest.mark.django_db
@responses.activate
def test_search_by_string_success(address_string_request, google_geocode_response):
"""Search by string; Google returns zip9; find match"""
precinct = baker.make("app.Precinct")
ztp = baker.make("app.Zip9ToPrecinct", precinct=precinct, zip9=SAMPLE_ZIP9)
pl = baker.make("app.PollingLocation")
ppl = baker.make("app.PrecinctToPollingLocation", location=pl, precinct=precinct)
responses.add(
responses.GET,
GOOGLE_GEOCODE_URL_GENERIC,
json=google_geocode_response,
status=200,
)
response = post_string_search(address_string_request)
assert response.status_code == 200
json_body = response.json()
assert json_body["match_type"] == "MATCH_ZIP9"
check_search_log(True)
assert json_body["home_address"]
assert json_body["result_url"]
@pytest.mark.django_db
@responses.activate
def test_search_by_string_ss_fallback_success(
address_string_request, smarty_streets_response, google_geocode_response_no_zip9
):
"""Search by string; Google doesn't return zip9; no address match; smartystreets fallback finds zip9; find match"""
precinct = baker.make("app.Precinct")
ztp = baker.make("app.Zip9ToPrecinct", precinct=precinct, zip9=SAMPLE_ZIP9)
pl = baker.make("app.PollingLocation")
ppl = baker.make("app.PrecinctToPollingLocation", location=pl, precinct=precinct)
responses.add(
responses.GET,
GOOGLE_GEOCODE_URL_GENERIC,
json=google_geocode_response_no_zip9,
status=200,
)
responses.add(
responses.GET, SMARTY_URL_GENERIC, json=smarty_streets_response, status=200
)
response = post_string_search(address_string_request)
assert response.status_code == 200
json_body = response.json()
assert json_body["match_type"] == "MATCH_ZIP9"
check_search_log(True)
assert json_body["home_address"]
assert json_body["result_url"]
@pytest.mark.django_db
@responses.activate
def test_search_by_string_failure(google_geocode_bad_response):
address_str = "123 alkdfjaskdfjjk st"
request = {"search_string": address_str}
responses.add(
responses.GET,
GOOGLE_GEOCODE_URL_GENERIC,
json=google_geocode_bad_response,
status=200,
)
response = post_string_search(request)
assert response.status_code == 400
json_body = response.json()
assert json_body["error_message"] == "Could not parse address"
check_search_log(False)
@pytest.mark.django_db
@responses.activate
def test_search_metadata(google_geocode_response):
"""Make sure metadata from GET request gets into search log correctly"""
precinct = baker.make("app.Precinct")
ztp = baker.make("app.Zip9ToPrecinct", precinct=precinct, zip9=SAMPLE_ZIP9)
pl = baker.make("app.PollingLocation")
ppl = baker.make("app.PrecinctToPollingLocation", location=pl, precinct=precinct)
responses.add(
responses.GET,
GOOGLE_GEOCODE_URL_GENERIC,
json=google_geocode_response,
status=200,
)
url = "/api/v1/search/string?search_string=fake%20address&phone_number=1112223333&email=<EMAIL>&source=mc&asdf=1234"
response = Client().get(url)
assert response.status_code == 200
search_log = check_search_log(True)
assert search_log.search_string == "fake address"
assert search_log.heap_id == "1112223333"
assert search_log.source == "mc"
other_data = search_log.other_data
assert other_data["email"] == "<EMAIL>"
assert other_data["phone_number"] == "1112223333"
assert other_data["asdf"] == "1234"
@pytest.mark.django_db
@responses.activate
def test_get_search_by_string_success(address_string_request, google_geocode_response):
"""Successful search with a GET request"""
precinct = baker.make("app.Precinct")
ztp = baker.make("app.Zip9ToPrecinct", precinct=precinct, zip9=SAMPLE_ZIP9)
pl = baker.make("app.PollingLocation")
ppl = baker.make("app.PrecinctToPollingLocation", location=pl, precinct=precinct)
responses.add(
responses.GET,
GOOGLE_GEOCODE_URL_GENERIC,
json=google_geocode_response,
status=200,
)
response = get_string_search(address_string_request)
assert response.status_code == 200
json_body = response.json()
assert json_body["match_type"] == "MATCH_ZIP9"
check_search_log(True)
assert json_body["home_address"]
assert json_body["result_url"]
@pytest.mark.django_db
@responses.activate
def test_get_search_by_string_invalid_address(google_geocode_bad_response):
"""Failed search with a GET request"""
precinct = baker.make("app.Precinct")
ztp = baker.make("app.Zip9ToPrecinct", precinct=precinct, zip9=SAMPLE_ZIP9)
pl = baker.make("app.PollingLocation")
ppl = baker.make("app.PrecinctToPollingLocation", location=pl, precinct=precinct)
responses.add(
responses.GET,
GOOGLE_GEOCODE_URL_GENERIC,
json=google_geocode_bad_response,
status=200,
)
address_str = "alkdfjaskdfjjkljdflsdkj"
request = {"search_string": address_str}
response = get_string_search(request)
assert response.status_code == 400
# Early vote and regular location search -- from search string
@pytest.mark.django_db
@responses.activate
def test_search_string_success(google_geocode_response_nv):
precinct = baker.make("app.Precinct")
ztp = baker.make("app.Zip9ToPrecinct", zip9="891455373", precinct=precinct)
el0 = baker.make("app.EarlyVoteLocation", latitude="35", longitude="-115")
el1 = baker.make("app.EarlyVoteLocation", latitude="37", longitude="-117")
el2 = baker.make("app.EarlyVoteLocation", latitude="34", longitude="-114")
el3 = baker.make("app.EarlyVoteLocation", latitude="36", longitude="-116")
baker.make("app.PrecinctToEVLocation", location=el0, precinct=precinct)
baker.make("app.PrecinctToEVLocation", location=el1, precinct=precinct)
baker.make("app.PrecinctToEVLocation", location=el2, precinct=precinct)
baker.make("app.PrecinctToEVLocation", location=el3, precinct=precinct)
pl0 = baker.make("app.PollingLocation", latitude="35", longitude="-115")
pl1 = baker.make("app.PollingLocation", latitude="37", longitude="-117")
pl2 = baker.make("app.PollingLocation", latitude="34", longitude="-114")
pl3 = baker.make("app.PollingLocation", latitude="36", longitude="-116")
baker.make("app.PrecinctToPollingLocation", location=pl0, precinct=precinct)
baker.make("app.PrecinctToPollingLocation", location=pl1, precinct=precinct)
baker.make("app.PrecinctToPollingLocation", location=pl2, precinct=precinct)
baker.make("app.PrecinctToPollingLocation", location=pl3, precinct=precinct)
responses.add(
responses.GET,
GOOGLE_GEOCODE_URL_GENERIC,
json=google_geocode_response_nv,
status=200,
)
addr_string = "425 warmside dr, las vegas nv 89147"
response = get_string_search({"search_string": addr_string})
assert response.status_code == 200
json_body = response.json()
assert len(json_body["errors"]) == 0
check_search_log(success=True)
assert json_body["home_address"]
assert json_body["result_url"]
# Check that early vote locations returned are in correct sorted order
assert len(json_body["early_vote_locations"]) == 4
early_locations = json_body["early_vote_locations"]
assert float(early_locations[0].get("latitude")) == 34
assert float(early_locations[1].get("latitude")) == 35
assert float(early_locations[2].get("latitude")) == 36
assert float(early_locations[3].get("latitude")) == 37
# Check regular polling locations
assert len(json_body["polling_locations"]) == 4
polling_locations = json_body["polling_locations"]
assert float(polling_locations[0].get("latitude")) == 34
assert float(polling_locations[1].get("latitude")) == 35
assert float(polling_locations[2].get("latitude")) == 36
assert float(polling_locations[3].get("latitude")) == 37
@pytest.mark.django_db
@responses.activate
def test_get_search_google_down(address_string_request, smarty_streets_response):
"""Failed search with a GET request"""
precinct = baker.make("app.Precinct")
ztp = baker.make("app.Zip9ToPrecinct", precinct=precinct, zip9=SAMPLE_ZIP9)
pl = baker.make("app.PollingLocation")
ppl = baker.make("app.PrecinctToPollingLocation", location=pl, precinct=precinct)
# Google throws a 400 error, which only happens if something is wrong
responses.add(responses.GET, GOOGLE_GEOCODE_URL_GENERIC, status=400)
# Should fall back to this SS backup
responses.add(
responses.GET, SMARTY_URL_GENERIC, json=smarty_streets_response, status=200
)
response = post_string_search(address_string_request)
assert response.status_code == 200
json_body = response.json()
assert json_body["match_type"] == "MATCH_ZIP9"
check_search_log(True)
assert json_body["home_address"]
|
"""
FIXME module add docstrings
"""
from unittest import TestCase, main
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.svm import SVC
from easy_gscv.models import GSCV
# pylint: disable=C0103
# 'x' and 'y' are very descriptive in the realm of datascience.
# pylint: disable=W0212
# Method call required for testing
class TestProperties(TestCase):
"""
Make all model properties work as they should.
"""
def setUp(self) -> None:
# Get data
from sklearn import datasets
iris = datasets.load_iris()
self.x = iris['data']
self.y = iris['target']
self.clf = KNeighborsClassifier()
self.model = GSCV(self.clf, self.x, self.y)
self.model_dict = {
'KNeighborsClassifier': KNeighborsClassifier(),
'RandomForestClassifier': RandomForestClassifier(),
'GradientBoostingClassifier': GradientBoostingClassifier(),
'MLPClassifier': MLPClassifier(),
'LogisticRegression': LogisticRegression(),
'SVC': SVC()
}
def test_classifiers(self) -> None:
"""
Test that the 'classifiers' property returns
a list of valid classifiers
"""
self.assertEqual(
self.model.classifiers,
[key for key, value in self.model_dict.items()]
)
class TestCLFTypes(TestCase):
"""
Test that the clf argument can handle multiple types.
"""
def setUp(self) -> None:
# Get data
from sklearn import datasets
iris = datasets.load_iris()
self.x = iris['data']
self.y = iris['target']
def test_sklearn_classifier(self) -> None:
"""
Test that object accepts a sklearn classifier
"""
clf = KNeighborsClassifier()
model = GSCV(clf, self.x, self.y)
self.assertEqual(
type(model._get_model(clf)), type(KNeighborsClassifier())
)
def test_string(self) -> None:
"""
Test that object accepts a string
"""
clf = 'KNeighborsClassifier'
model = GSCV(clf, self.x, self.y)
self.assertEqual(
type(model._get_model(clf)), type(KNeighborsClassifier())
)
class TestExceptions(TestCase):
"""
Make sure that the exceptions trigger when they should.
"""
def setUp(self) -> None:
# Get data
from sklearn import datasets
iris = datasets.load_iris()
self.x = iris['data']
self.y = iris['target']
class Nothing:
"""Nonsense object that should not pass"""
def __init__(self, a):
self.a = a
self.wrong_object = Nothing(a='horse')
self.wrong_clf = DecisionTreeClassifier()
self.valid_clf = KNeighborsClassifier()
def test_check_model_not_a_model(self) -> None:
"""
Make sure that using a type that is not a model
raises the correct error message.
"""
with self.assertRaises(ValueError):
GSCV('doesnotexist', self.x, self.y)
def test_check_model_not_a_sklearn_model(self) -> None:
"""
Make sure that using a non-sklearn model raises
the correct error message.
"""
with self.assertRaises(TypeError):
GSCV(self.wrong_object, self.x, self.y)
def test_check_wrong_scikit_model(self) -> None:
"""
Make sure that using a non-valid sklearn classifier
raises the correct error message
"""
with self.assertRaises(ValueError):
GSCV(self.wrong_clf, self.x, self.y)
class TestKNeighborsClassifier(TestCase):
"""
Make sure that the model is created and the methods work
"""
def setUp(self) -> None:
# Get data
from sklearn import datasets
iris = datasets.load_iris()
self.x = iris['data']
self.y = iris['target']
self.valid_clf = KNeighborsClassifier()
def test_default_params(self) -> None:
"""
Make sure that the correct default parameters
are selected for the model
"""
model = GSCV(self.valid_clf, self.x, self.y)
self.assertEqual(
model.params, {
'n_neighbors': [3, 5, 8, 10, 15],
'weights': ['uniform', 'distance'],
}
)
def test_custom_params(self) -> None:
"""Test that custom params override the default ones"""
model = GSCV(self.valid_clf, self.x, self.y, params={
'n_neighbors': [3, 15],
'weights': ['uniform'],
})
self.assertEqual(
model.params, {
'n_neighbors': [3, 15],
'weights': ['uniform'],
}
)
def test_create(self) -> None:
"""Test that the create method returns a value"""
model = GSCV(self.valid_clf, self.x, self.y)
result = model.create()
self.assertTrue(result is not None)
def test_score(self) -> None:
"""Test that the create method returns a value"""
model = GSCV(self.valid_clf, self.x, self.y)
score = model.score()
self.assertTrue(score is not None)
self.assertTrue(0 <= score <= 1)
def test_get_best_estimator(self) -> None:
"""Test that the 'get_best_estimator' method returns a value"""
model = GSCV(self.valid_clf, self.x, self.y)
best_model = model.get_best_estimator()
self.assertTrue(best_model is not None)
def test_get_fit_details(self) -> None:
"""Test that the 'get_fit_details' method returns a value"""
model = GSCV(self.valid_clf, self.x, self.y)
fit_details = model.get_fit_details()
self.assertTrue(fit_details is not None)
class TestLogisticRegression(TestCase):
""" Make sure that the model is created and the methods work """
def setUp(self) -> None:
# Get data
from sklearn import datasets
iris = datasets.load_iris()
self.x = iris['data']
self.y = iris['target']
self.valid_clf = LogisticRegression()
def test_default_params(self) -> None:
"""
Make sure that the correct default parameters
are selected for the model
"""
model = GSCV(self.valid_clf, self.x, self.y)
self.assertEqual(
model.params, {
'C': [0.01, 1, 100],
'penalty': ['l1', 'l2']
}
)
def test_custom_params(self) -> None:
"""Test that custom params override the default ones"""
model = GSCV(self.valid_clf, self.x, self.y, params={
'C': [1, 100],
'penalty': ['l2']
})
self.assertEqual(
model.params, {
'C': [1, 100],
'penalty': ['l2']
}
)
def test_create(self) -> None:
"""
Test that the create method returns a value
"""
model = GSCV(self.valid_clf, self.x, self.y)
result = model.create()
self.assertTrue(result is not None)
def test_score(self) -> None:
"""
Test that the create method returns a value
"""
model = GSCV(self.valid_clf, self.x, self.y)
score = model.score()
self.assertTrue(score is not None)
self.assertTrue(0 <= score <= 1)
def test_get_best_estimator(self) -> None:
"""
Test that the 'get_best_estimator' method returns a value
"""
model = GSCV(self.valid_clf, self.x, self.y)
best_model = model.get_best_estimator()
self.assertTrue(best_model is not None)
def test_get_fit_details(self) -> None:
"""
Test that the 'get_fit_details' method returns a value
"""
model = GSCV(self.valid_clf, self.x, self.y)
fit_details = model.get_fit_details()
self.assertTrue(fit_details is not None)
class TestMLPClassifier(TestCase):
"""
Make sure that the model is created and the methods work
"""
def setUp(self) -> None:
# Get data
from sklearn import datasets
iris = datasets.load_iris()
self.x = iris['data']
self.y = iris['target']
self.valid_clf = MLPClassifier()
def test_default_params(self) -> None:
"""
Make sure that the correct default parameters
are selected for the model
"""
model = GSCV(self.valid_clf, self.x, self.y)
self.assertEqual(
model.params, {
'hidden_layer_sizes': [
(10,),
(10, 10),
(10, 10, 10)
],
'alpha': [0.0001, 0.01, 0.1, 1],
'solver': ['lbfgs'],
}
)
def test_custom_params(self) -> None:
"""Test that custom params override the default ones"""
model = GSCV(self.valid_clf, self.x, self.y, params={
'alpha': [0.0001, 0.01, 0.1, 1]
})
self.assertEqual(
model.params, {
'alpha': [0.0001, 0.01, 0.1, 1]
}
)
def test_create(self) -> None:
"""
Test that the create method returns a value
"""
model = GSCV(self.valid_clf, self.x, self.y, params={
'alpha': [0.0001, 0.01, 0.1, 1]
})
result = model.create()
self.assertTrue(result is not None)
def test_score(self) -> None:
"""
Test that the create method returns a value
"""
model = GSCV(self.valid_clf, self.x, self.y, params={
'alpha': [0.0001, 0.01, 0.1, 1]
})
score = model.score()
self.assertTrue(score is not None)
self.assertTrue(0 <= score <= 1)
def test_get_best_estimator(self) -> None:
"""
Test that the 'get_best_estimator' method returns a value
"""
model = GSCV(self.valid_clf, self.x, self.y, params={
'alpha': [0.0001, 0.01, 0.1, 1]
})
best_model = model.get_best_estimator()
self.assertTrue(best_model is not None)
def test_get_fit_details(self) -> None:
"""
Test that the 'get_fit_details' method returns a value
"""
model = GSCV(self.valid_clf, self.x, self.y, params={
'alpha': [0.0001, 0.01, 0.1, 1]
})
fit_details = model.get_fit_details()
self.assertTrue(fit_details is not None)
class TestRandomForestClassifier(TestCase):
"""
Make sure that the model is created and the methods work
"""
def setUp(self) -> None:
# Get data
from sklearn import datasets
iris = datasets.load_iris()
self.x = iris['data']
self.y = iris['target']
self.valid_clf = RandomForestClassifier()
# def test_default_params(self):
# model = GSCV(self.valid_clf, self.x, self.y, n_jobs=-1)
# self.assertEqual(
# model.params, {
# 'n_estimators': [100, 500, 1000],
# 'max_features': ['sqrt', 'log2', None],
# 'max_depth': [None, 3, 5],
# }
# )
def test_custom_params(self) -> None:
"""Test that custom params override the default ones"""
model = GSCV(self.valid_clf, self.x, self.y, n_jobs=-1, params={
'n_estimators': [100],
'max_features': ['sqrt'],
'max_depth': [3],
})
self.assertEqual(
model.params, {
'n_estimators': [100],
'max_features': ['sqrt'],
'max_depth': [3],
}
)
def test_create(self) -> None:
"""
Test that the create method returns a value
"""
model = GSCV(self.valid_clf, self.x, self.y, n_jobs=-1, params={
'n_estimators': [100],
'max_features': ['sqrt'],
'max_depth': [3],
})
result = model.create()
self.assertTrue(result is not None)
def test_score(self) -> None:
"""
Test that the create method returns a value
"""
model = GSCV(self.valid_clf, self.x, self.y, n_jobs=-1, params={
'n_estimators': [100],
'max_features': ['sqrt'],
'max_depth': [3],
})
score = model.score()
self.assertTrue(score is not None)
self.assertTrue(0 <= score <= 1)
def test_get_best_estimator(self) -> None:
"""
Test that the 'get_best_estimator' method returns a value
"""
model = GSCV(self.valid_clf, self.x, self.y, n_jobs=-1, params={
'n_estimators': [100],
'max_features': ['sqrt'],
'max_depth': [3],
})
best_model = model.get_best_estimator()
self.assertTrue(best_model is not None)
def test_get_fit_details(self) -> None:
"""
Test that the 'get_fit_details' method returns a value
"""
model = GSCV(self.valid_clf, self.x, self.y, n_jobs=-1, params={
'n_estimators': [100],
'max_features': ['sqrt'],
'max_depth': [3],
})
fit_details = model.get_fit_details()
self.assertTrue(fit_details is not None)
class TestGradientBoostingClassifier(TestCase):
"""
Make sure that the model is created and the methods work
"""
def setUp(self) -> None:
# Get data
from sklearn import datasets
iris = datasets.load_iris()
self.x = iris['data']
self.y = iris['target']
self.valid_clf = GradientBoostingClassifier()
# def test_default_params(self) -> None:
# model = GSCV(self.valid_clf, self.x, self.y, n_jobs=-1)
# self.assertEqual(
# model.params, {
# 'n_estimators': [100, 500, 1000],
# 'learning_rate': [0.1, 0.5, 1],
# 'max_depth': [1, 3, 5],
# 'max_features': ['sqrt', 'log2', None],
# }
# )
def test_custom_params(self) -> None:
"""Test that custom params override the default ones"""
model = GSCV(self.valid_clf, self.x, self.y, n_jobs=-1, params={
'n_estimators': [100],
'learning_rate': [0.1, 0.5],
'max_depth': [1, 3],
'max_features': ['sqrt'],
})
self.assertEqual(
model.params, {
'n_estimators': [100],
'learning_rate': [0.1, 0.5],
'max_depth': [1, 3],
'max_features': ['sqrt'],
}
)
def test_create(self) -> None:
"""Test that the create method returns a value"""
model = GSCV(self.valid_clf, self.x, self.y, n_jobs=-1, params={
'n_estimators': [100],
'learning_rate': [0.1],
'max_depth': [1],
'max_features': ['sqrt'],
})
result = model.create()
self.assertTrue(result is not None)
def test_score(self) -> None:
"""Test that the create method returns a value"""
model = GSCV(self.valid_clf, self.x, self.y, n_jobs=-1, params={
'n_estimators': [100],
'learning_rate': [0.1],
'max_depth': [1],
'max_features': ['sqrt'],
})
score = model.score()
self.assertTrue(score is not None)
self.assertTrue(0 <= score <= 1)
def test_get_best_estimator(self) -> None:
"""Test that the 'get_best_estimator' method returns a value"""
model = GSCV(self.valid_clf, self.x, self.y, n_jobs=-1, params={
'n_estimators': [100],
'learning_rate': [0.1],
'max_depth': [1],
'max_features': ['sqrt'],
})
best_model = model.get_best_estimator()
self.assertTrue(best_model is not None)
def test_get_fit_details(self) -> None:
"""Test that the 'get_fit_details' method returns a value"""
model = GSCV(self.valid_clf, self.x, self.y, n_jobs=-1, params={
'n_estimators': [100],
'learning_rate': [0.1],
'max_depth': [1],
'max_features': ['sqrt'],
})
fit_details = model.get_fit_details()
self.assertTrue(fit_details is not None)
class TestSVMClassifier(TestCase):
"""
Make sure that the model is created and the methods work
"""
def setUp(self) -> None:
# Get data
from sklearn import datasets
iris = datasets.load_iris()
self.x = iris['data']
self.y = iris['target']
self.valid_clf = SVC()
def test_default_params(self) -> None:
model = GSCV(self.valid_clf, self.x, self.y, n_jobs=-1)
self.assertEqual(
model.params, [
{
'kernel': ['rbf'],
'C': [0.1, 1, 100, 1000],
'gamma': [0.01, 0.1, 1, 10, 'auto']
},
{
'kernel': ['poly'],
'degree': [1, 2, 3, 4],
'coef0': [0.0, 1],
'C': [0.1, 1, 100, 1000],
'gamma': [0.01, 0.1, 1, 10, 'auto']
},
]
)
def test_custom_params(self) -> None:
"""Test that custom params override the default ones"""
model = GSCV(self.valid_clf, self.x, self.y, n_jobs=-1, params=[
{
'kernel': ['rbf'],
'C': [100, 1000],
'gamma': [0.01, 'auto']
},
]
)
self.assertEqual(
model.params, [
{
'kernel': ['rbf'],
'C': [100, 1000],
'gamma': [0.01, 'auto']
},
]
)
def test_create(self) -> None:
"""Test that the create method returns a value"""
model = GSCV(self.valid_clf, self.x, self.y, n_jobs=-1)
result = model.create()
self.assertTrue(result is not None)
def test_score(self) -> None:
"""Test that the create method returns a value"""
model = GSCV(self.valid_clf, self.x, self.y, n_jobs=-1)
score = model.score()
self.assertTrue(score is not None)
self.assertTrue(0 <= score <= 1)
def test_get_best_estimator(self) -> None:
"""Test that the 'get_best_estimator' method returns a value"""
model = GSCV(self.valid_clf, self.x, self.y, n_jobs=-1)
best_model = model.get_best_estimator()
self.assertTrue(best_model is not None)
def test_get_fit_details(self) -> None:
"""Test that the 'get_fit_details' method returns a value"""
model = GSCV(self.valid_clf, self.x, self.y, n_jobs=-1)
fit_details = model.get_fit_details()
self.assertTrue(fit_details is not None)
if __name__ == '__main__':
print('\n\n')
print("Running tests!")
print(
'Warning: Despite the use of a (relatively) test small dataset '
'these tests can take up to 2 minutes to complete on a relativly '
'modern computer due to the number of models being trained.'
'The models have been set up to utilize all available cpu cores '
'to speed up the process and the more computationally models have '
'their "test_default_params" method tests disabled, while their '
'other tests use lighter-than-default params.'
'\n\nResetting these values to their default parameters '
'will increase the runtime significantly'
)
print('\n\n')
main()
|
<filename>trRosetta/features.py<gh_stars>10-100
import numpy as np
import tensorflow as tf
# reweight MSA based on cutoff
def reweight(msa1hot, cutoff):
with tf.name_scope('reweight'):
id_min = tf.cast(tf.shape(msa1hot)[1], tf.float32) * cutoff
id_mtx = tf.tensordot(msa1hot, msa1hot, [[1,2], [1,2]])
id_mask = id_mtx > id_min
w = 1.0/tf.reduce_sum(tf.cast(id_mask, dtype=tf.float32),-1)
return w
# 1d features
def get_features1d(msa1hot,insertions,w):
nc = tf.shape(msa1hot)[1]
beff = tf.reduce_sum(w)
msw = w[:,None,None]*msa1hot
f_i = tf.reduce_sum(msw, axis=0) / beff + 1e-9
h_i = tf.reduce_sum( -f_i * tf.math.log(f_i), axis=1)
n_i = tf.math.log(tf.reduce_sum(msw[:,:,:20],axis=[0,2]))
ins = tf.reduce_mean(tf.cast(insertions>0,dtype=tf.float32),axis=0)
f1d = tf.concat([msa1hot[0,:,:20], f_i, h_i[:,None], n_i[:,None], ins[:,None]], axis=1)
f1d = tf.reshape(f1d, [1,nc,44])
return f1d
# 2d features
def get_features2d(msa1hot, weights, penalty):
nr = tf.shape(msa1hot)[0]
nc = tf.shape(msa1hot)[1]
ns = tf.shape(msa1hot)[2]
neff = tf.reduce_sum(weights)
with tf.name_scope('covariance'):
x = tf.reshape(msa1hot, (nr, nc * ns))
num_points = tf.reduce_sum(weights) - tf.sqrt(tf.reduce_mean(weights))
mean = tf.reduce_sum(x * weights[:,None], axis=0, keepdims=True) / num_points
x = (x - mean) * tf.sqrt(weights[:,None])
cov = tf.matmul(tf.transpose(x), x)/num_points
with tf.name_scope('inv_convariance'):
cov_reg = cov + tf.eye(nc * ns) * penalty / tf.sqrt(neff)
inv_cov = tf.linalg.inv(cov_reg)
x1 = tf.reshape(inv_cov,(nc, ns, nc, ns))
x2 = tf.transpose(x1, [0,2,1,3])
features = tf.reshape(x2, (nc, nc, ns * ns))
x3 = tf.sqrt(tf.reduce_sum(tf.square(x1[:,:-1,:,:-1]),(1,3))) * (1-tf.eye(nc))
apc = tf.reduce_sum(x3,0,keepdims=True) * tf.reduce_sum(x3,1,keepdims=True) / tf.reduce_sum(x3)
contacts = (x3 - apc) * (1-tf.eye(nc))
with tf.name_scope('misc_2d'):
f_ij = tf.tensordot(weights[:,None,None]*msa1hot, msa1hot, [[0],[0]]) / neff + 1e-9
gaps = f_ij[:,20,:,20]
h_ij = tf.reduce_sum( -f_ij * tf.math.log(f_ij), axis=[1,3])
f2d = tf.concat([features, contacts[:,:,None], h_ij[:,:,None], gaps[:,:,None]], axis=2)
f2d = tf.reshape(f2d, [1,nc,nc,444])
return f2d
# network inputs
def get_features(msa, ins, idx, params):
nrow = tf.shape(msa)[0]
ncol = tf.shape(msa)[1]
# convert msa to 1-hot
msa1hot = tf.one_hot(msa, 21, dtype=tf.float32)
# extract 1d and 2d features from the msa
w = reweight(msa1hot, params['WMIN'])
f1d = get_features1d(msa1hot,ins,w)
f2d = tf.cond(nrow>1, lambda: get_features2d(msa1hot, w, params['DCAREG']),
lambda: tf.zeros([1,ncol,ncol,444], tf.float32))
# get sequence separation
seqsep = tf.abs(idx[:,None]-idx[None,:])+1
seqsep = tf.math.log(tf.cast(seqsep,dtype=tf.float32))
# stack all features together
inputs = tf.concat([tf.tile(f1d[:,:,None,:], [1,1,ncol,1]),
tf.tile(f1d[:,None,:,:], [1,ncol,1,1]),
f2d,
seqsep[None,:,:,None]], axis=-1)
inputs = tf.reshape(inputs, [1,ncol,ncol,2*44+444+1])
return inputs
|
import os, ntpath
from treelib import Tree
from gensim.models import KeyedVectors
import matplotlib.pyplot as plt
import numpy as np
plt.style.use('seaborn-paper')
class ParentChildEvaluate:
"""
Class to perform intrinsic evaluation of embeddings using the hierarchical relation of parent/child domains
1) parse ParendChildTreeFile.txt from interpro
2) for each child of root
nn = ask embeddings model to give M nearest neighbors
calculate_precision_atM(child.descendants, nn)
calculate_recall_atN(child.descendants, nn)
3) plot histogram of precision and recall
#Credits: https://medium.com/@m_n_malaeb/recall-and-precision-at-k-for-recommender-systems-618483226c54
"""
def __init__(self, data_path):
"""
ParentChildEvaluate class init
Parameters
----------
data_path : str
full data path
Returns
-------
None
"""
print("ParentChildEvaluate")
self.data_path = data_path
self.tree = Tree()
def get_model_name(self):
"""
Get embedding model name
Parameters
----------
Returns
-------
str
embedding model name
"""
return ntpath.basename(self.model_file)
def load_emb_model(self, model_file, is_model_binary):
"""
Load embedding model
Parameters
----------
model_file : str
model file name
is_model_binary : bool
model is saved in binary format (True), otherwise (False)
Returns
-------
None
"""
self.model_file = model_file
self.emb_model = KeyedVectors.load_word2vec_format(model_file, binary=is_model_binary)
def parse_parent_child_file(self, parent_child_file_name, out_path, output_file_name, save_parsed_tree=False):
"""
Parse the parent child file
Parameters
----------
parent_child_file_name : str
parent child file name
out_path : str
output data path
output_file_name : str
output file name
save_parsed_tree : bool
after parsing save parsed tree (True), otherwise (False)
Returns
-------
None
"""
previous_num_minus_signs = 0
last_interpro_id = None
self.tree.create_node("INTERPRO", "INTERPRO")
current_parent = "INTERPRO"
with open(parent_child_file_name, 'r') as parent_child_file:
for line in parent_child_file:
line = line.strip()
current_num_minus_signs = line[0:line.find("IPR")].count("--")
double_colon_split = line.strip("--").split("::")
interpro_id = double_colon_split[0]
assert interpro_id[
0:3] == "IPR", "AssertionError: {} \n interpro id should start with IPR and has length of 9.".format(
interpro_id)
if current_num_minus_signs == 0:
# assert child not in the tree
current_parent = "INTERPRO"
self.tree.create_node(interpro_id, interpro_id, parent=current_parent)
else:
# check if you are still with current parent or you need to create a new one
if current_num_minus_signs == previous_num_minus_signs: # same level as last parent
self.tree.create_node(interpro_id, interpro_id, parent=current_parent)
elif current_num_minus_signs > previous_num_minus_signs: # one level down from last parent -> create new parent
current_parent = last_interpro_id
self.tree.create_node(interpro_id, interpro_id, parent=current_parent)
else: # one level up from last parent -> get parent of the current parent
if current_parent == "INTERPRO": # if one level up is the root then your papa is the root
papa = "INTERPRO"
else: # if one level up is not the root then get the parent of your parent (papa)
papa = self.tree[current_parent].bpointer
self.tree.create_node(interpro_id, interpro_id, parent=papa)
current_parent = papa
previous_num_minus_signs = current_num_minus_signs
last_interpro_id = interpro_id
# quick test
# for interpro_node in self.tree.children("IPR000549"):
# print(interpro_node.identifier)
# self.tree.show()
if save_parsed_tree:
self.tree.save2file(filename=os.path.join(out_path, output_file_name))
def get_nn_calculate_precision_recall_atN(self, N, plot_histograms, save_diagnostics):
"""
Get nearest domain vector for each domains and calculate recall based on the ground truth (parsed tree)
Parameters
----------
N : int
number of nearest domain vector,
if N==100 then retrieve as many as the children of a domain in the parsed tree
plot_histograms : bool
plot histograms for performance metrics (True), otherwise (False)
save_diagnostics : bool
save diagnostic plots for domain with low recall
Returns
-------
None
"""
print("Get NN and calculate precision and recall at {}".format(N))
recalls_n = []
precisions_n = []
interpros_recall0 = []
interpros_num_children_recall0 = []
if N == 100:
retrieve_all_children = True
else:
retrieve_all_children = False
for interpro_node in self.tree.children("INTERPRO"):
recall_n = 0.0
precision_n = 0.0
all_children = self.tree.subtree(interpro_node.identifier).all_nodes()
assert interpro_node in all_children, "AssertionError: parent {} is not in the set of all children.".format(
interpro_node.identifier)
all_children.remove(interpro_node)
if retrieve_all_children:
N = len(all_children)
if self.emb_model.__contains__(interpro_node.identifier):
nearest_neighbor_ids = set(
[nn[0] for nn in self.emb_model.most_similar(positive=interpro_node.identifier, topn=N)])
else:
print("Model does not contain this id.")
continue
true_positives = set([child.identifier for child in all_children]).intersection(nearest_neighbor_ids)
assert len(all_children) > 0 and len(
nearest_neighbor_ids) == N, "AssertionError: For parent {} all children should be > 0 and nearest neighbors should be equal to N.".format(
interpro_node.identifier)
recall_n = len(true_positives) / len(all_children)
precision_n = len(true_positives) / len(nearest_neighbor_ids)
assert 0.0 <= recall_n <= 1.0 and 0.0 <= precision_n <= 1.0, "AssertionError: For parent {} recall or precision is not at (0,1]".format(
interpro_node.identifier)
recalls_n.append(recall_n)
precisions_n.append(precision_n)
if recall_n == 0.0:
interpros_recall0.append(interpro_node.identifier)
interpros_num_children_recall0.append(len(all_children))
if retrieve_all_children: # for printing in title
N = 100
if plot_histograms:
if retrieve_all_children:
self.plot_histogram(recalls_n, "Recall", "Recall", "Number of Interpro domains", "recall")
else:
self.plot_histogram(recalls_n, "Recall@{}".format(N), "Recall", "Number of Interpro domains",
"recall_{}".format(N))
self.plot_histogram(precisions_n, "Precision@{}".format(N), "Precision", "Number of Interpro domains",
"precision_{}".format(N))
if retrieve_all_children:
avg_recall = sum(recalls_n) / len(recalls_n)
print("Average recall at 100: {:.3f}".format(avg_recall))
if save_diagnostics:
self.save_diagnostics_recall0(interpros_recall0, interpros_num_children_recall0)
def save_diagnostics_recall0(self, interpros_recall0, interpros_num_children_recall0):
"""
Save diagnostics histogram for domains with recall of 0
Parameters
----------
interpros_recall0 : list of str
interpro ids with recall 0
interpros_num_children_recall0 : list of str
number of children of each interpro id, found from the parsed tree, with recall 0
Returns
-------
None
"""
print("Saving diagnostics for intepro domains with recall 0")
with open(os.path.join(self.data_path, self.get_model_name() + "_interpros_recall0" + ".txt"),
"w") as interpros_recall0_file:
# write file with names of interpro having recall 0
interpros_recall0_file.write("\n".join(interpros_recall0))
# plot histogram of number of children for interpro parents with recall 0
self.plot_histogram(interpros_num_children_recall0, None,
"Number of Intepro domains", "Number of children", "hist")
def plot_histogram(self, performance_N, title, xlabel, ylabel, out_suffix):
"""
Plot histogram for performance metric and also for the number of children
Parameters
----------
performance_N : list of float
performance metric value per parent domain
title : str
histogram title (if not None)
xlabel : str
label x
ylabel : str
label y
out_suffix : str
histogram output file name suffix
Returns
-------
None
"""
# plot the histogram of lengths
fig = plt.figure()
plt.hist(performance_N, color='g', align='left', edgecolor='k', alpha=0.8)
plt.xlabel(xlabel, fontsize=14)
plt.ylabel(ylabel, fontsize=14)
if title is not None:
plt.title(title, fontsize=14)
plt.xticks(np.arange(0, 1.1, 0.1))
hist_name = self.get_model_name() + "_" + out_suffix + ".png"
fig.savefig(os.path.join(self.data_path, hist_name), bbox_inches='tight', dpi=600)
|
import itertools
import datetime
import pandas as pd
import numpy as np
import requests
import math
import os
import warnings
import torch
import multiprocessing
import GPUtil
from torch import nn
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler, StandardScaler
import horovod.torch as hvd
def read_file_from_aws():
url = "https://medium-post-data.s3.amazonaws.com/data_es.csv"
response = requests.get(url, stream = True)
text_file = open("data_es.csv","wb")
for chunk in response.iter_content(chunk_size=1024):
text_file.write(chunk)
read_file_from_aws()
x_cols = ['close', 'ask', 'bid', 'md_0_ask', 'md_0_bid',
'md_1_ask', 'md_1_bid', 'md_2_ask', 'md_2_bid', 'md_3_ask', 'md_3_bid',
'md_4_ask', 'md_4_bid', 'md_5_ask', 'md_5_bid', 'md_6_ask', 'md_6_bid',
'md_7_ask', 'md_7_bid', 'md_8_ask', 'md_8_bid', 'md_9_ask', 'md_9_bid']
y_cols = ['close']
def reshape_and_scale_data_for_training(data, window_size, x_cols, y_cols, y_len = 1, scale=True, scaler=MinMaxScaler, test_size=0.2, backend='keras'):
"""
given an instance of ```pandas.DataFrame``` and a window size, this reshapes the data and scales it for training. You can
pass in the column names of your x's and y's.
"""
def cols_to_indices(data, cols):
return [list(data.columns).index(i) for i in cols]
def get_new_pos(i, *indices):
originals = sorted(list(set([i for i in itertools.chain(*indices)])))
return originals.index(i)
_l = len(data)
_x = []
_y = []
x_indices = cols_to_indices(data, x_cols)
y_indices = cols_to_indices(data, y_cols)
s = None
if scale:
s = scaler()
data = data.iloc[:, list(set(x_indices + y_indices))]
data = s.fit_transform(data)
else:
# convert this to an np array to conform to the index access pattern below
data = np.array(data.iloc[:, list(set(x_indices + y_indices))])
xs = np.array(data[:, [get_new_pos(i, x_indices, y_indices) for i in x_indices]])
ys = np.array(data[:, [get_new_pos(i, x_indices,y_indices) for i in y_indices]])
for i in range(0, _l - window_size):
_x.append(xs[i:i + window_size])
_y.append(ys[i + window_size:((i + window_size) + y_len)])
x_train, x_test, y_train, y_test= train_test_split(_x, _y, test_size=test_size, shuffle=False)
if backend == 'keras':
return np.array(x_train), np.array(x_test), np.array(y_train), np.array(y_test), s
if backend == 'torch':
return torch.Tensor(x_train), torch.Tensor(x_test), torch.Tensor(y_train), torch.Tensor(y_test), s
class TimeSeriesDataSet(Dataset):
"""
This is a custom dataset class. It can get more complex than this, but simplified so you can understand what's happening here without
getting bogged down by the preprocessing
"""
def __init__(self, X, Y):
self.X = X
self.Y = Y
if len(self.X) != len(self.Y):
raise Exception("The length of X does not match the length of Y")
def __len__(self):
return len(self.X)
def __getitem__(self, index):
# note that this isn't randomly selecting. It's a simple get a single item that represents an x and y
_x = self.X[index]
_y = self.Y[index]
return _x, _y
class LSTM(nn.Module):
"""implements an lstm - a single/multilayer uni/bi directional lstm"""
def __init__(self, n_features, window_size,
output_size, h_size, n_layers=1,
bidirectional=False, device=torch.device('cpu'), initializers=[]):
super().__init__()
self.n_features = n_features
self.window_size = window_size
self.output_size = output_size
self.h_size = h_size
self.n_layers = n_layers
self.directions = 2 if bidirectional else 1
self.device = device
self.lstm = nn.LSTM(input_size=n_features, hidden_size=h_size,
num_layers=n_layers, bidirectional=bidirectional, batch_first=True)
self.hidden = None
self.linear = nn.Linear(self.h_size * self.directions, self.h_size)
self.linear2 = nn.Linear(self.h_size, 64)
self.linear3 = nn.Linear(64, 1)
self.layers = [self.lstm, self.linear, self.linear2, self.linear3]
self.initializers = initializers
self._initialize_all_layers()
self.tensors = {
"float": torch.cuda.FloatTensor if str(self.device) == "cuda" else torch.FloatTensor,
"long": torch.cuda.LongTensor if str(self.device) == "cuda" else torch.LongTensor
}
def _initialize_all_layers(self):
"""
If the user provides initializers, initializes each layer[i] with initializer[i]
If no initializers provided, moves the layers to the device specified
"""
if all([self.initializers, len(self.initializers) != self.layers,
len(self.initializers) == 1]):
warnings.warn("only one initializer: {} was provided for {} layers, the initializer will be used for all layers"\
.format(len(self.layers)))
if len(self.initializers) == 1:
[self._initialize_layer(self.initializers[0],x) for x in self.layers]
else:
[self._initialize_layer(self.initializers[i],x) for i, x in enumerate(self.layers)]
elif all([self.initializers,
len(self.initializers) != self.layers]):
raise Exception("{} initializers were provided for {} layers, need to provide an initializer for each layer"\
.format(len(self.initializers), len(self.layers)))
else:
# uses default initialization, but moves layer to device
[self._initialize_layer(None, x) for x in self.layers]
def _initialize_layer(self, initializer, layer):
if initializer:
pass
#todo - add some dynamic initialization methods
layer.to(self.device)
def _make_tensor(self, tensor_type, *args, **kwargs):
"""
returns a tensor appropriate for the device
:param tensor_type: supported ['long', 'float']
:returns: a torch.Tensor
"""
t = self.tensors[tensor_type](*args,**kwargs).to(self.device)
return t
def init_hidden(self, batch_size):
hidden_a = torch.randn(self.n_layers * self.directions,
batch_size ,self.h_size).to(self.device)
hidden_b = torch.randn(self.n_layers * self.directions,
batch_size,self.h_size).to(self.device)
hidden_a = Variable(hidden_a)
hidden_b = Variable(hidden_b)
return (hidden_a, hidden_b)
def forward(self, input):
batch_size = list(input.size())[0]
self.hidden = self.init_hidden(batch_size)
lstm_output, self.hidden = self.lstm(input, self.hidden)
last_hidden_states = torch.index_select(lstm_output, 1, index=self._make_tensor("long", ([self.window_size-1])))
predictions = self.linear3(
self.linear2(
self.linear(
last_hidden_states
)
)
)
return predictions
if __name__ == "__main__":
# intialize horovod
hvd.init()
# to handle dynamically updating GPUs
# Set CUDA_DEVICE_ORDER so the IDs assigned by CUDA match those from nvidia-smi
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
gpus = [gpu for gpu in GPUtil.getGPUs()]
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join([str(g.id) for g in gpus])
device_number = hvd.rank()
if not torch.cuda.is_available():
print("Needs a GPU to run!")
exit()
epochs = 100
# Horovod: adjust number of epochs based on number of GPUs.
epochs = int(math.ceil(epochs / hvd.size()))
window_length = 10
# Pin GPU to be used to process local rank (one GPU per process)
if torch.cuda.is_available():
torch.cuda.set_device(hvd.local_rank())
_DEVICE = torch.device("cuda:{}".format(str(torch.cuda.current_device())))
if device_number==0:
print("horovod has distributed to the following devices: {}"\
.format(["{}, device_id: cuda:{}"\
.format(gpu.name, gpu.id) for gpu in GPUtil.getGPUs()]), flush=True)
print(f"this process is using device - {_DEVICE}", flush=True)
df = pd.read_csv("data_es.csv")
x_train, x_test, y_train, y_test, scaler = reshape_and_scale_data_for_training(df, window_length, x_cols, y_cols, y_len=1, scale=True,backend='torch')
train_sampler = torch.utils.data.distributed.DistributedSampler(
TimeSeriesDataSet(x_train, y_train), num_replicas=hvd.size(), rank=hvd.rank())
train_loader = DataLoader(TimeSeriesDataSet(x_train, y_train), batch_size=32, pin_memory=True, num_workers=4, sampler=train_sampler)
test_loader = DataLoader(TimeSeriesDataSet(x_test, y_test), batch_size=len(x_test), shuffle=True, pin_memory=True, num_workers=4)
model = LSTM(n_features=23, window_size=window_length, output_size=1, h_size=256, device=_DEVICE)
optimizer = torch.optim.Adam(model.parameters(), lr=1e-6)
optimizer = hvd.DistributedOptimizer(optimizer, named_parameters=model.named_parameters())
model.to(_DEVICE)
loss_fn = nn.MSELoss(reduce=True, reduction="mean")
train_times = []
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
def train(epoch, device):
for i, data in enumerate(train_loader):
# move x and y to the gpu
inputs, labels = data[0].to(_DEVICE), data[1].to(_DEVICE)
pred = model(inputs)
# Calculate Loss: softmax --> cross entropy loss
loss = loss_fn(pred, labels)
# Getting gradients w.r.t. parameters
loss.backward()
# Updating parameters
optimizer.step()
optimizer.zero_grad()
# write stats if running on main
if device == 0:
print(f"epoch: {epoch}, train_loss: {loss}", flush=True)
def validate(epoch,device):
for i, test_data in enumerate(test_loader):
test_inputs, test_labels = test_data[0].to(_DEVICE), test_data[1].to(_DEVICE)
test_pred = model(test_inputs)
test_loss = loss_fn(test_pred, test_labels)
if device == 0:
print(f"epoch: {epoch}, test_loss: {test_loss}", flush=True)
#get statistics on the main node
if device_number == 0:
start_time = datetime.datetime.now()
for epoch in range(epochs):
epoch_start = datetime.datetime.now()
train(epoch, device_number)
validate(epoch, device_number)
epoch_end = datetime.datetime.now()
epoch_time = (epoch_end - epoch_start).total_seconds()
train_times.append(epoch_time)
print(f"device: {hvd.rank()}, avg_time_per_epoch:{np.mean(train_times)}")
if device_number == 0:
end_time = datetime.datetime.now()
total_time = (end_time - start_time).total_seconds() / 60
print(f"total training time in minutes: {total_time}")
|
<gh_stars>0
# Ignoring some linting rules in tests
# pylint: disable=redefined-outer-name
# pylint: disable=missing-docstring
import numpy as np
import pytest
from bingo.symbolic_regression.agraph.operator_definitions import *
from bingo.symbolic_regression.agraph.evaluation_backend \
import evaluation_backend as py_eval_backend
try:
from bingocpp import evaluation_backend as cpp_eval_backend
except ImportError:
cpp_eval_backend = None
CPP_PARAM = pytest.param("Cpp",
marks=pytest.mark.skipif(not cpp_eval_backend,
reason='BingoCpp import '
'failure'))
OPERATOR_LIST = [INTEGER, VARIABLE, CONSTANT, ADDITION, SUBTRACTION,
MULTIPLICATION, DIVISION, SIN, COS, EXPONENTIAL, LOGARITHM,
POWER, ABS, SQRT]
@pytest.fixture(params=["Python", CPP_PARAM])
def engine(request):
return request.param
@pytest.fixture
def eval_backend(engine):
if engine == "Python":
return py_eval_backend
return cpp_eval_backend
@pytest.fixture
def all_funcs_command_array():
return np.array([[INTEGER, 5, 5],
[VARIABLE, 0, 0],
[CONSTANT, 0, 0],
[ADDITION, 1, 0],
[SUBTRACTION, 2, 3],
[MULTIPLICATION, 4, 1],
[DIVISION, 5, 1],
[SIN, 6, 0],
[COS, 7, 0],
[EXPONENTIAL, 8, 0],
[LOGARITHM, 9, 0],
[SAFE_POWER, 10, 0],
[ABS, 11, 0],
[SQRT, 12, 0]])
@pytest.fixture
def higher_dim_command_array():
return np.array([[VARIABLE, 0, 0],
[VARIABLE, 1, 1],
[CONSTANT, 0, 0],
[CONSTANT, 1, 1],
[MULTIPLICATION, 0, 2],
[MULTIPLICATION, 1, 3],
[ADDITION, 4, 5]])
@pytest.fixture
def sample_x():
return np.vstack((np.linspace(-1.0, 0.0, 11),
np.linspace(0.0, 1.0, 11))).transpose()
@pytest.fixture
def sample_constants():
return np.array([10, 3.14])
def test_all_funcs_eval(eval_backend, all_funcs_command_array):
x = np.arange(1, 6).reshape((-1, 1))
constants = (10, )
expected_f_of_x = np.array([[0.45070097],
[0.9753327],
[0.29576841],
[0.36247937],
[1.0]])
f_of_x = eval_backend.evaluate(all_funcs_command_array,
x, constants)
np.testing.assert_array_almost_equal(f_of_x, expected_f_of_x)
def test_higher_dim_func_eval(eval_backend, higher_dim_command_array):
x = np.arange(8).reshape((-1, 2))
constants = (10, 100)
expected_f_of_x = np.sum(x*constants, axis=1).reshape((-1, 1))
f_of_x = eval_backend.evaluate(higher_dim_command_array,
x, constants)
np.testing.assert_array_almost_equal(f_of_x, expected_f_of_x)
def test_all_funcs_deriv_x(eval_backend, all_funcs_command_array):
x = np.arange(1, 6).reshape((-1, 1))
constants = (10, )
expected_f_of_x = np.array([[0.45070097],
[0.9753327],
[0.29576841],
[0.36247937],
[1.0]])
expected_df_dx = np.array([[0.69553357],
[-0.34293336],
[-0.39525239],
[0.54785643],
[0.0]])
f_of_x, df_dx = eval_backend.evaluate_with_derivative(
all_funcs_command_array, x, constants, True)
np.testing.assert_array_almost_equal(f_of_x, expected_f_of_x)
np.testing.assert_array_almost_equal(df_dx, expected_df_dx)
def test_all_funcs_deriv_c(eval_backend, all_funcs_command_array):
x = np.arange(1, 6).reshape((-1, 1))
constants = (10, )
expected_f_of_x = np.array([[0.45070097],
[0.9753327],
[0.29576841],
[0.36247937],
[1.0]])
expected_df_dc = np.array([[-0.69553357],
[0.34293336],
[0.39525239],
[-0.54785643],
[0.]])
f_of_x, df_dc = eval_backend.evaluate_with_derivative(
all_funcs_command_array, x, constants, False)
np.testing.assert_array_almost_equal(f_of_x, expected_f_of_x)
np.testing.assert_array_almost_equal(df_dc, expected_df_dc)
def test_higher_dim_func_deriv_x(eval_backend, higher_dim_command_array):
x = np.arange(8).reshape((4, 2))
constants = (10, 100)
expected_f_of_x = np.sum(x*constants, axis=1).reshape((-1, 1))
expected_df_dx = np.array([constants]*4)
f_of_x, df_dx = eval_backend.evaluate_with_derivative(
higher_dim_command_array, x, constants, True)
np.testing.assert_array_almost_equal(f_of_x, expected_f_of_x)
np.testing.assert_array_almost_equal(df_dx, expected_df_dx)
def test_higher_dim_func_deriv_c(eval_backend, higher_dim_command_array):
x = np.arange(8).reshape((4, 2))
constants = (10, 100)
expected_f_of_x = np.sum(x*constants, axis=1).reshape((-1, 1))
expected_df_dc = x
f_of_x, df_dc = eval_backend.evaluate_with_derivative(
higher_dim_command_array, x, constants, False)
np.testing.assert_array_almost_equal(f_of_x, expected_f_of_x)
np.testing.assert_array_almost_equal(df_dc, expected_df_dc)
|
<filename>djforms/communications/printrequest/models.py
# -*- coding: utf-8 -*-
from django.db import models
from django.conf import settings
from django.contrib.auth.models import User
from django.utils.safestring import mark_safe
from djtools.fields.helpers import upload_to_path
FORMATS = (
('Booklet / Event program', 'Booklet / Event program'),
('Brochure', 'Brochure'),
('Flyer / poster', 'Flyer / Poster'),
('Invitations', 'Invitations'),
('Multi-page book', 'Multi-page book'),
('Advertisement', 'Advertisement'),
('Postcard', 'Postcard'),
('Envelopes', 'Envelopes'),
('Other', 'Other'),
)
CONSENT = (
('Yes', 'I agree'),
('NA', 'Mailing is not required'),
)
WHO_MAILING = (
('Mail house', 'Mail house'),
('Requesting department', 'Requesting department'),
('Printer', 'Printer'),
('None', 'None'),
)
HOW_MAILING = (
('Self mailer', 'Self mailer'),
('I need envelopes', 'I need envelopes'),
('I have envelopes', 'I have envelopes'),
('None', 'None'),
)
SPEED_MAILING = (
('1st Class', '1st Class'),
('Non-profit / Bulk', 'Non-profit / Bulk'),
('None', 'None'),
)
class PrintRequest(models.Model):
user = models.ForeignKey(
User,
verbose_name="Created by",
related_name='communications_print_request_user',
on_delete=models.CASCADE,
editable=False,
)
updated_by = models.ForeignKey(
User, verbose_name="Updated by",
related_name='communications_print_request_updated_by',
on_delete=models.CASCADE,
editable=False,
)
date_created = models.DateTimeField("Date Created", auto_now_add=True)
date_updated = models.DateTimeField("Date Updated", auto_now=True)
department = models.CharField("Department", max_length=128)
phone = models.CharField("Department phone number", max_length=21)
account = models.CharField(
"Account number",
max_length=18,
null=True,
blank=True,
)
sponsoring_department = models.CharField(
"Sponsoring Department/Office",
max_length=128,
null=True,
blank=True,
)
contact_phone = models.CharField(
"Contact phone number",
max_length=21,
null=True,
blank=True,
)
estimate = models.BooleanField(
"Do you require an estimate for this project before we begin?",
help_text="Please allow an additional 48-72 hours to deliver quotes.",
)
project_name = models.CharField(
"What is the name of your project?",
max_length=128,
)
project_purpose = models.TextField(
"Briefly describe the purpose of your request",
)
target_audience = models.TextField(
"Who is/are your target audience/audiences?",
help_text="For example: Alumni, prospective students, community.",
)
secondary_audience = models.TextField(
"Are there secondary target audiences?",
blank=True,
)
print_format = models.CharField(
"What is the format of your finished piece?",
max_length=128,
help_text="Check all that apply",
)
print_format_other = models.CharField(
'If "Other" please describe',
max_length=255,
null=True,
blank=True,
)
approval = models.BooleanField(mark_safe(
'''
I am aware that all flyers and posters appearing on campus must be
approved by the Division of Student Affairs before hanging.
I agree to review the
<a href="/policies/" target="_blank">Event Promotion Policy</a>
and adhere to those guidelines for this project.
'''
))
format_quantity = models.CharField(
"What is the quantity for each format?",
max_length=128,
)
special_instructions = models.TextField(
"""
Please provide a short description -
including special instructions -
needed for each item you selected above.
""",
null=True,
blank=True,
)
delivery_date = models.DateField(
"Final requested delivery date of project",
auto_now=False,
)
consent = models.CharField(
"""
If the Office of Communications coordinates
your mailing with a mail house, we need your
mailing list at least one week before the mail
date. It is your responsibility to coordinate
the request from Institutional Advancement within
their established guidelines and procedures.
Advancement requires two weeks lead time to produce
a mail file. Requests can be submitted via the
<a href="https://docs.google.com/forms/d/e/1FAIpQLSexcu_M5TMphO4KpoKXNdchSzaeYWrjSHBoAKrL15M6YdtUGA/viewform">
Advancement Office List Request Form
</a>.
""",
max_length=128,
)
website_update = models.CharField(
"Is there a website that needs to be updated as part of this project?",
max_length=4,
)
website_url = models.CharField(
"If so, what is the URL of the page that needs updating?",
max_length=255,
null=True,
blank=True,
)
is_mailing = models.CharField(
"Is this project being mailed?",
max_length=4,
)
who_mailing = models.CharField(
"Who is mailing?",
choices=WHO_MAILING,
blank=True,
max_length=128,
)
how_mailing = models.CharField(
"How is it being mailed?",
choices=HOW_MAILING,
blank=True,
max_length=128,
)
speed_mailing = models.CharField(
"Please indicate how your piece is to be mailed",
choices=SPEED_MAILING,
blank=True,
max_length=128,
)
attachments = models.CharField(
"Are you including attachments?",
max_length=4,
)
file_1 = models.FileField(
"",
upload_to=upload_to_path,
blank=True,
)
file_2 = models.FileField(
"",
upload_to=upload_to_path,
blank=True,
)
file_3 = models.FileField(
"",
upload_to=upload_to_path,
blank=True,
)
file_4 = models.FileField(
"",
upload_to=upload_to_path,
blank=True,
)
fact_checking = models.BooleanField(
"""
I am responsible for fact-checking the spelling of names and titles,
dates, times, locations, URLs, etc.
"""
)
lead_time = models.BooleanField(
"""
I will provide adequate lead-time for each project and submit final and
approved concepts, copy, and assets by the set deadlines.
"""
)
deadlines = models.BooleanField(
"""
I understand that missing deadlines or making changes that result in
more than two proofs will result in a delay of project completion date.
"""
)
class Meta:
db_table = 'communications_printrequest'
def get_slug(self):
return 'files/communications/printrequest/'
|
#!/usr/bin/env python
import os
import sys
sys.path.insert(1, os.path.abspath('..'))
#---------------------------------------------------------------------------------------------------
# SYNTHESIS OF POLYNOMIALS IN NIA
#---------------------------------------------------------------------------------------------------
# This is an example of the synthesis of polynomials in the NIA logic.
#---------------------------------------------------------------------------------------------------
from pysv import templates
from pysv import smt_synthesis
from pysv import contract
from pysv import utils
csv_keijzer12 = """x:Int; y:Int; res:Int
-3; -3; 115
-3; -2; 112
-3; -1; 109
-3; 0; 108
-3; 1; 107
-3; 2; 108
-3; 3; 109
-2; -3; 31
-2; -2; 28
-2; -1; 25
-2; 0; 24
-2; 1; 23
-2; 2; 24
-2; 3; 25
-1; -3; 9
-1; -2; 6
-1; -1; 3
-1; 0; 2
-1; 1; 1
-1; 2; 2
-1; 3; 3
0; -3; 7
0; -2; 4
0; -1; 1
0; 0; 0
0; 1; -1
0; 2; 0
0; 3; 1
1; -3; 7
1; -2; 4
1; -1; 1
1; 0; 0
1; 1; -1
1; 2; 0
1; 3; 1
2; -3; 15
2; -2; 12
2; -1; 9
2; 0; 8
2; 1; 7
2; 2; 8
2; 3; 9
3; -3; 61
3; -2; 58
3; -1; 55
3; 0; 54
3; 1; 53
3; 2; 54
3; 3; 55"""
def synthesize_keijzer12():
smtgp_nia_grammar = """
(
( Start Int
( x y (Constant Int) (+ Start Start) (- Start Start) (* Start Start) (div Start Start) (ite SBool Start Start) )
)
( SBool Bool
( (> Start Start) (>= Start Start) (< Start Start) (<= Start Start) (= Start Start) (= SBool SBool) )
)
)
"""
vars = contract.ProgramVars({'x': 'Int', 'y': 'Int'}, {'res': 'Int'})
code = """(= res H1)"""
code_pre = 'true'
code_post = 'true'
grammar = templates.load_gramar_from_SYGUS_spec(smtgp_nia_grammar)
h1 = smt_synthesis.HoleDecl('H1', grammar, {'x': 'Int', 'y': 'Int'}, True, 6)
hole_decls = [h1]
tc = contract.TestCases.from_csv(csv_keijzer12)
env = utils.Options(['--solver', 'z3', '--logic', 'NIA', "--lang", "smt2"])
res = smt_synthesis.synthesize_tc(tc, code, code_pre, code_post, vars, env, hole_decls)
return res
csv_square = """x:Int; res:Int
-2; 5
0; 1
2; 5"""
def synthesize_square():
smtgp_nia_grammar = """
(
( Start Int
( x (Constant Int) (+ Start Start) (- Start Start) (* Start Start) )
)
)
"""
vars = contract.ProgramVars({'x': 'Int'}, {'res': 'Int'})
code = """(= res (+ H1 1))"""
code_pre = 'true'
code_post = 'true'
grammar = templates.load_gramar_from_SYGUS_spec(smtgp_nia_grammar)
h1 = smt_synthesis.HoleDecl('H1', grammar, {'x': 'Int'}, True, 2)
hole_decls = [h1]
tc = contract.TestCases.from_csv(csv_square)
env = utils.Options(['--solver', 'z3', '--logic', 'NIA', "--lang", "smt2", "--name_all_assertions", "0", "--synth_mode", "max"])
res = smt_synthesis.synthesize_tc(tc, code, code_pre, code_post, vars, env, hole_decls)
return res
# ------------------------------------------------------------------------
# MAIN
# ------------------------------------------------------------------------
if __name__ == "__main__":
# res = synthesize_keijzer12()
res = synthesize_square()
print('******** Z3 RESULT ********')
print(res.text)
print('--------------------------\n')
print('SYNTHESIZED PYTHON CODE:')
print(res.final_code) |
<reponame>iwaseyusuke/python-ovs-vsctl<gh_stars>1-10
# Copyright (C) 2016 <NAME> <iw<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Parsers for 'ovs-vsctl' command outputs.
"""
import json
from ovs_vsctl.utils import is_valid_uuid
def line_parser(buf):
"""
Parses the given `buf` as str representation of list of values
(e.g. 'ovs-vsctl list-br' command).
:param buf: str type value containing values list.
:return: list of parsed values.
"""
values = []
for line in buf.split('\n'):
if line:
values.append(line.strip())
return values
def show_cmd_parser(buf):
"""
Parser for 'ovs-vsctl show' command.
Currently, parses ONLY 'ovs_version' column.
:param buf: str type output of 'ovs-vsctl show' command.
:return: dict type value of 'ovs-vsctl show' command.
"""
outputs = {}
for line in line_parser(buf):
if line.startswith('ovs_version'):
# e.g.)
# ovs_version: "2.5.0"
outputs['ovs_version'] = line.split('"')[1]
return outputs
def _record_row_parser(buf):
"""
Parses the given `buf` as str representation of 'row' into `column`
and `value`.
Additionally, strips leading and trailing whitespace characters.
`buf` should be formatted in::
'<column> : <value>'
Example::
'name : "br1"'
:param buf: single row in str type.
:return: tuple of `column` and `value`.
"""
column, value = buf.split(':', 1)
return column.strip(), value.strip()
def _record_value_parser(buf):
"""
Parses value within OVSDB tables and returns python object corresponding
to the value type.
:param buf: value of 'ovs-vsctl list' or `ovs-vsctl find` command.
:return: python object corresponding to the value type of row.
"""
if buf.startswith('["uuid",'):
# UUID type
# e.g.)
# ["uuid","79c26f92-86f9-485f-945d-5786c8147f53"]
_, value = json.loads(buf)
elif buf.startswith('["set",'):
# Set type
# e.g.)
# ["set",[100,200]]
_, value = json.loads(buf)
elif buf.startswith('["map",'):
# Map type
# e.g.)
# ["map",[["stp-enable","true"]]]
_, value = json.loads(buf)
value = dict(value)
else:
# Other type
# e.g.)
# "br1" --> str
# 100 --> int
# true/false --> True/False
# null ... --> None
value = json.loads(buf)
return value
class Record(): # pylint: disable=too-few-public-methods
"""
Record object of OVSDB table.
Attributes are corresponding to columns of parsed tables.
"""
def __init__(self, **kwargs):
self.__dict__ = kwargs
@classmethod
def parse(cls, buf):
"""
Parses the given `buf` as str containing a record of rows.
:param buf: Record in str type.
:return: `Record` instance.
"""
kwargs = {}
# Splits buf containing the record info into rows
for row in buf.split('\n'):
# Skips empty.
if not row:
continue
column, value = _record_row_parser(row)
value = _record_value_parser(value)
kwargs[column] = value
return cls(**kwargs)
def __repr__(self):
def _sort(items):
return sorted(items, key=lambda x: x[0])
return ('%s(' % self.__class__.__name__
+ ', '.join(['%s=%s' % (k, repr(v))
for k, v in _sort(self.__dict__.items())]) + ')')
__str__ = __repr__
def list_cmd_parser(buf):
"""
Parser for 'ovs-vsctl list' and 'ovs-vsctl find' command.
`buf` must be the str type and the output of 'ovs-vsctl list' or
ovs-vsctl find' command with '--format=list' and '--data=json' options.
:param buf: str type output of 'ovs-vsctl list' command.
:return: list of `Record` instances.
"""
records = []
# Assumption: Each record is separated by empty line.
for record in buf.split('\n\n'):
records.append(Record.parse(record))
return records
find_cmd_parser = list_cmd_parser # pylint: disable=invalid-name
def get_cmd_parser(buf):
"""
Parser for 'ovs-vsctl get' command.
`buf` must be the str type and the output of 'ovs-vsctl get' command.
Assumption: The output is mostly formatted in json, except for 'uuid'
and 'key' of map type value.
:param buf: value of 'ovs-vsctl get' command.
:return: python object corresponding to the value type of row.
"""
buf = buf.strip('\n')
try:
value = json.loads(buf)
return value
except ValueError:
# Handles in the following.
pass
value = buf # Default value
if is_valid_uuid(buf):
# UUID type
pass # Uses the default
elif buf.startswith('['):
# Set type (might be containing UUIDs)
# e.g.)
# [<UUID>, <UUID>]
buf = buf.replace('[', '["').replace(', ', '", "').replace(']', '"]')
value = json.loads(buf)
elif buf.startswith('{'):
# Map type
# e.g.)
# {stp-enable="true", stp-priority="100"}
buf = buf.replace('{', '{"').replace('=', '": ').replace(', ', ', "')
value = json.loads(buf)
return value
|
#
# Copyright (c) 2015, <NAME> <<EMAIL>>
# Pypsi - https://github.com/ameily/pypsi
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
import sys
from pypsi.core import Command, PypsiArgParser, CommandShortCircuit
from pypsi.format import Table, Column, title_str
from pypsi.ansi import AnsiCodes
from pypsi.completers import command_completer
class Topic(object):
def __init__(self, id, name=None, content=None, commands=None):
self.id = id
self.name = name or ''
self.content = content or ''
self.commands = commands or []
class HelpCommand(Command):
'''
Provides access to manpage-esque topics and command usage information.
'''
def __init__(self, name='help', topic='shell',
brief='print information on a topic or command', topics=None,
vars=None, **kwargs):
self.parser = PypsiArgParser(
prog=name,
description=brief
)
# Add a callback to self.complete_topics so tab-complete can
# return the possible topics you may get help on
self.parser.add_argument(
"topic", metavar="TOPIC", help="command or topic to print",
nargs='?', completer=self.complete_topics
)
super(HelpCommand, self).__init__(
name=name, brief=brief, usage=self.parser.format_help(),
topic=topic, **kwargs
)
self.vars = vars or {}
self.topics = topics
def setup(self, shell):
shell.ctx.topics = list(self.topics or [])
shell.ctx.uncat_topic = Topic('uncat',
'Uncategorized Commands & Topics')
shell.ctx.topic_lookup = {t.id: t for t in shell.ctx.topics}
shell.ctx.topics_dirty = True
def complete_topics(self, shell, args, prefix): # pylint: disable=unused-argument
completions = [
x.id for x in shell.ctx.topics
if x.id.startswith(prefix) or not prefix
]
completions.extend([
x for x in shell.commands if x.startswith(prefix) or not prefix
])
return sorted(completions)
def complete(self, shell, args, prefix):
if shell.ctx.topics_dirty:
self.reload(shell)
# The command_completer function takes in the parser, automatically
# completes optional arguments (ex, '-v'/'--verbose') or sub-commands,
# and complete any arguments' values by calling a callback function
# with the same arguments as complete if the callback was defined
# when the parser was created.
return command_completer(self.parser, shell, args, prefix)
def reload(self, shell):
shell.ctx.uncat_topic.commands = []
for id in shell.ctx.topic_lookup:
shell.ctx.topic_lookup[id].commands = []
for (_, cmd) in shell.commands.items():
if cmd.topic == '__hidden__':
continue
if cmd.topic:
if cmd.topic in shell.ctx.topic_lookup:
shell.ctx.topic_lookup[cmd.topic].commands.append(cmd)
else:
self.add_topic(shell, Topic(cmd.topic, commands=[cmd]))
else:
shell.ctx.uncat_topic.commands.append(cmd)
shell.ctx.topics_dirty = False
for topic in shell.ctx.topics:
if topic.commands:
topic.commands = sorted(topic.commands, key=lambda x: x.name)
def add_topic(self, shell, topic):
shell.ctx.topics_dirty = True
shell.ctx.topic_lookup[topic.id] = topic
shell.ctx.topics.append(topic)
def print_topic_commands(self, shell, topic, title=None,
name_col_width=20):
print(
AnsiCodes.yellow,
title_str(title or topic.name or topic.id, shell.width),
AnsiCodes.reset,
sep=''
)
print(AnsiCodes.yellow, end='')
Table(
columns=(Column(''), Column('', Column.Grow)),
spacing=4,
header=False,
width=shell.width
).extend(*[
(' ' + c.name.ljust(name_col_width - 1), c.brief or '')
for c in topic.commands
]).write(sys.stdout)
print(AnsiCodes.reset, end='')
def print_topics(self, shell):
max_name_width = 0
for topic in shell.ctx.topics:
for c in topic.commands:
max_name_width = max(len(c.name), max_name_width)
for c in shell.ctx.uncat_topic.commands:
max_name_width = max(len(c.name), max_name_width)
addl = []
for topic in shell.ctx.topics:
if topic.content or not topic.commands:
addl.append(topic)
if topic.commands:
self.print_topic_commands(shell, topic,
name_col_width=max_name_width)
print()
if shell.ctx.uncat_topic.commands:
self.print_topic_commands(shell, shell.ctx.uncat_topic,
name_col_width=max_name_width)
print()
if addl:
addl = sorted(addl, key=lambda x: x.id)
print(
AnsiCodes.yellow,
title_str("Additional Topics", shell.width),
sep=''
)
Table(
columns=(Column(''), Column('', Column.Grow)),
spacing=4,
header=False,
width=shell.width
).extend(*[
(' ' + topic.id.ljust(max_name_width - 1), topic.name or '')
for topic in addl
]).write(sys.stdout)
print(AnsiCodes.reset)
def print_topic(self, shell, id):
if id not in shell.ctx.topic_lookup:
if id in shell.commands:
cmd = shell.commands[id]
print(AnsiCodes.yellow, cmd.usage, AnsiCodes.reset, sep='')
return 0
self.error(shell, "unknown topic: ", id)
return -1
topic = shell.ctx.topic_lookup[id]
if topic.content:
print(title_str(topic.name or topic.id, shell.width))
try:
cnt = topic.content.format(**self.vars)
except:
cnt = topic.content
print(cnt)
print()
if topic.commands:
self.print_topic_commands(shell, topic, "Commands")
return 0
def run(self, shell, args):
if shell.ctx.topics_dirty:
self.reload(shell)
try:
ns = self.parser.parse_args(args)
except CommandShortCircuit as e:
return e.code
rc = 0
if not ns.topic:
self.print_topics(shell)
else:
rc = self.print_topic(shell, ns.topic)
return rc
|
<reponame>HenriqueBraz/app_flask
import logging
import json
from datetime import datetime
import pymysql
class GraficoModel(object):
def __init__(self):
with open('config.json') as f:
conf = json.load(f)
self.host = conf['mysql']['host']
self.port = conf['mysql']['port']
self.user = conf['mysql']['user']
self.schema = conf['mysql']['schema']
self.bank_pass = conf['mysql']['bank_pass']
self.con = pymysql.connect(host=self.host, port=self.port, user=self.user, passwd=self.bank_pass)
self.cur = self.con.cursor()
self.cur.execute("USE " + self.schema)
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s-%(levelname)s-%(message)s')
logging.info('Construtor do GraficoModel chamado com sucesso\n')
logging.disable(logging.DEBUG)
def __del__(self):
self.cur.close()
self.con.close()
def get_tributacao(self, tributacao1, tributacao2, tributacao3, user_id):
result = []
try:
self.cur.execute(
"SELECT COUNT(e.id) FROM empresas e WHERE e.tributacao = '{}' AND e.id_responsavel = '{}' AND e.status = 'Ativo';".format(
tributacao1, user_id))
result += self.cur.fetchone()
self.cur.execute(
"SELECT COUNT(e.id) FROM empresas e WHERE e.tributacao = '{}' AND e.id_responsavel = '{}' AND e.status = 'Ativo';".format(
tributacao2, user_id))
result += self.cur.fetchone()
self.cur.execute(
"SELECT COUNT(e.id) FROM empresas e WHERE e.tributacao = '{}' AND e.id_responsavel = '{}' AND e.status = 'Ativo';".format(
tributacao3, user_id))
result += self.cur.fetchone()
return result
except Exception as e:
logging.error('Erro em GraficoModel, método get_pizza: ' + str(e) + '\n')
def get_ocorrencias(self, user_id):
result = []
try:
self.cur.execute(
"SELECT COUNT(eo.id_empresa) FROM empresas_ocorrencias eo WHERE eo.responsavel = '{}' AND eo.status = 'Aberto';".format(
user_id))
result += self.cur.fetchone()
self.cur.execute(
"SELECT COUNT(eo.id_empresa) FROM empresas_ocorrencias eo WHERE eo.responsavel = '{}' AND eo.status = 'Fechado';".format(
user_id))
result += self.cur.fetchone()
self.cur.execute(
"SELECT COUNT(eo.id_empresa) FROM empresas_ocorrencias eo WHERE eo.responsavel = '{}' AND eo.status = 'Andamento';".format(
user_id))
result += self.cur.fetchone()
return result
except Exception as e:
logging.error('Erro em GraficoModel, método get_pizza: ' + str(e) + '\n')
def get_cobrancas(self, tipo):
now = datetime.now()
ano = now.strftime('%Y')
result = []
try:
for i in range(1, 13):
self.cur.execute(
"SELECT COUNT(c.tipo_cobranca ) FROM cobrancas c WHERE DATE_FORMAT(c.data, '%m') = {} AND DATE_FORMAT(c.created, '%Y') = {} AND c.tipo_cobranca = '{}' ".format(
i, ano, tipo))
result += self.cur.fetchone()
return result
except Exception as e:
logging.error('Erro em GraficoModel, método get_pizza: ' + str(e) + '\n')
def get_numero_empresas(self, user_id):
result = []
try:
self.cur.execute(
"SELECT COUNT(e.id) FROM empresas e WHERE e.id_responsavel = '{}' AND e.status = 'Ativo';".format(
user_id))
result += self.cur.fetchone()
self.cur.execute(
"SELECT COUNT(e.id) FROM empresas e WHERE e.status = 'Ativo';".format(user_id))
result += self.cur.fetchone()
return result
except Exception as e:
logging.error('Erro em GraficoModel, método get_numero_empresas: ' + str(e) + '\n')
def get_levyings_sum(self, mes):
try:
self.cur.execute("SELECT SUM(c.valor) FROM cobrancas c WHERE MONTH(c.updated) = '{}' AND c.status='Ativo';".format(mes))
result = self.cur.fetchone()
return result
except Exception as e:
logging.error('Erro em FinanceiroModel, método get_levyings_sum, flag == 0: ' + str(e) + '\n')
|
<reponame>sreesxlnc/kaggle-right-whale
"""
ipython -i --pdb scripts/train_model.py -- --model cropped_dec19 --data 128_20151029 --use_cropped --as_grey --overwrite --no_test
"""
import numpy as np
from lasagne.layers import dnn
import lasagne as nn
import theano.tensor as T
import theano
from nolearn.lasagne import objective
from nolearn.lasagne.handlers import SaveWeights
from nolearn_utils.iterators import (
ShuffleBatchIteratorMixin,
RandomFlipBatchIteratorMixin,
AffineTransformBatchIteratorMixin,
MeanSubtractBatchiteratorMixin,
AdjustGammaBatchIteratorMixin,
RebalanceBatchIteratorMixin,
make_iterator
)
from nolearn_utils.hooks import (
SaveTrainingHistory,
PlotTrainingHistory,
EarlyStopping,
StepDecay
)
from utils import TrainSplit, PushBestLoss
from utils.layers import batch_norm
from utils.nolearn_net import NeuralNet
from utils.iterators import PairBatchIteratorMixin
from utils.nonlinearities import low_temperature_softmax
from utils.layers import TiedDropoutLayer
def float32(k):
return np.cast['float32'](k)
def conv2dbn(l, name, **kwargs):
l = nn.layers.dnn.Conv2DDNNLayer(
l, name=name,
**kwargs
)
l = batch_norm(l, name='%sbn' % name)
return l
model_fname = './models/cropped_dec19.pkl'
model_accuracy_fname = './models/cropped_dec19_accuracy.pkl'
model_history_fname = './models/cropped_dec19_history.pkl'
model_graph_fname = './models/cropped_dec19_history.png'
image_size = 256
batch_size = 32
n_classes = 447
train_iterator_mixins = [
ShuffleBatchIteratorMixin,
RandomFlipBatchIteratorMixin,
AffineTransformBatchIteratorMixin,
AdjustGammaBatchIteratorMixin,
]
TrainIterator = make_iterator('TrainIterator', train_iterator_mixins)
test_iterator_mixins = [
]
TestIterator = make_iterator('TestIterator', test_iterator_mixins)
train_iterator_kwargs = dict(
batch_size=batch_size,
flip_horizontal_p=0.5,
flip_vertical_p=0.5,
affine_p=1.,
affine_scale_choices=np.linspace(0.5, 1.5, 11),
# affine_shear_choices=np.linspace(-0.5, 0.5, 11),
affine_translation_choices=np.arange(-64, 64, 1),
# affine_rotation_choices=np.arange(0, 360, 1),
adjust_gamma_p=0.5,
adjust_gamma_chocies=np.linspace(0.5, 1.5, 11)
)
train_iterator = TrainIterator(**train_iterator_kwargs)
test_iterator_kwargs = dict(
batch_size=batch_size,
)
test_iterator = TestIterator(**test_iterator_kwargs)
save_weights = SaveWeights(model_fname, only_best=True, pickle=False)
save_training_history = SaveTrainingHistory(model_history_fname)
plot_training_history = PlotTrainingHistory(model_graph_fname)
early_stopping = EarlyStopping(patience=100)
conv_kwargs = dict(
pad='same',
nonlinearity=nn.nonlinearities.very_leaky_rectify
)
pool_kwargs = dict(
pool_size=2,
)
l = nn.layers.InputLayer(name='in', shape=(None, 3, image_size, image_size))
# 256
l = conv2dbn(l, name='l1c1', num_filters=32, filter_size=(7, 7), stride=2, **conv_kwargs)
# l = nn.layers.dnn.MaxPool2DDNNLayer(l, name='l1p', **pool_kwargs)
# 256
l = conv2dbn(l, name='l2c1', num_filters=48, filter_size=(3, 3), **conv_kwargs)
l = conv2dbn(l, name='l2c2', num_filters=48, filter_size=(3, 3), **conv_kwargs)
l = conv2dbn(l, name='l2c3', num_filters=48, filter_size=(3, 3), **conv_kwargs)
# 128
l = conv2dbn(l, name='l3c1', num_filters=64, filter_size=(3, 3), stride=2, **conv_kwargs)
l = conv2dbn(l, name='l3c2', num_filters=64, filter_size=(3, 3), **conv_kwargs)
l = conv2dbn(l, name='l3c3', num_filters=64, filter_size=(3, 3), **conv_kwargs)
# 64
l = conv2dbn(l, name='l4c1', num_filters=80, filter_size=(3, 3), stride=2, **conv_kwargs)
l = conv2dbn(l, name='l4c2', num_filters=80, filter_size=(3, 3), **conv_kwargs)
l = conv2dbn(l, name='l4c3', num_filters=80, filter_size=(3, 3), **conv_kwargs)
l = conv2dbn(l, name='l4c4', num_filters=80, filter_size=(3, 3), **conv_kwargs)
# 32
l = conv2dbn(l, name='l5c1', num_filters=96, filter_size=(3, 3), stride=2, **conv_kwargs)
l = conv2dbn(l, name='l5c2', num_filters=96, filter_size=(3, 3), **conv_kwargs)
l = conv2dbn(l, name='l5c3', num_filters=96, filter_size=(3, 3), **conv_kwargs)
l = conv2dbn(l, name='l5c4', num_filters=96, filter_size=(3, 3), **conv_kwargs)
# 16
l = conv2dbn(l, name='l6c1', num_filters=128, filter_size=(3, 3), stride=2, **conv_kwargs)
l = conv2dbn(l, name='l6c2', num_filters=128, filter_size=(3, 3), **conv_kwargs)
l = conv2dbn(l, name='l6c3', num_filters=128, filter_size=(3, 3), **conv_kwargs)
l = conv2dbn(l, name='l6c4', num_filters=128, filter_size=(3, 3), **conv_kwargs)
# 8
l = nn.layers.dnn.Pool2DDNNLayer(l, name='gp', pool_size=8, mode='average_inc_pad')
l = nn.layers.DropoutLayer(l, name='gpdrop', p=0.8)
l = nn.layers.DenseLayer(l, name='out', num_units=n_classes, nonlinearity=nn.nonlinearities.softmax)
net = NeuralNet(
layers=l,
regression=False,
use_label_encoder=False,
objective_l2=1e-5,
update=nn.updates.adam,
update_learning_rate=theano.shared(float32(1e-3)),
train_split=TrainSplit(0.15, random_state=42, stratify=False),
batch_iterator_train=train_iterator,
batch_iterator_test=test_iterator,
on_epoch_finished=[
save_weights,
save_training_history,
plot_training_history,
early_stopping,
StepDecay('update_learning_rate', start=1e-3, stop=1e-5)
],
verbose=10,
max_epochs=1500,
)
|
<filename>RS/libera5.py
import os
import netCDF4
import cdsapi
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
def download_ERA5_bbox(i_year, out_dir, prefix, north, west, south, east):
c = cdsapi.Client()
c.retrieve(
'reanalysis-era5-single-levels',
{
'product_type': 'reanalysis',
'variable': [
'surface_solar_radiation_downwards','10m_u_component_of_wind',
'10m_v_component_of_wind','2m_temperature','surface_pressure',
'total_precipitation', '2m_dewpoint_temperature'
# 'evaporation', 'potential_evaporation',
# 'snow_density', 'snow_depth'
# '10m_u_component_of_wind', '10m_v_component_of_wind', '2m_dewpoint_temperature',
# '2m_temperature', 'angle_of_sub_gridscale_orography',
# 'anisotropy_of_sub_gridscale_orography',
# 'evaporation', 'land_sea_mask',
# 'maximum_total_precipitation_rate_since_previous_post_processing',
# 'mean_evaporation_rate', 'mean_potential_evaporation_rate',
# 'minimum_total_precipitation_rate_since_previous_post_processing',
# 'orography', 'potential_evaporation', 'precipitation_type',
# 'snow_density', 'snow_depth', 'soil_temperature_level_1',
# 'soil_temperature_level_2', 'soil_temperature_level_3', 'soil_temperature_level_4',
# 'soil_type', 'surface_pressure', 'surface_solar_radiation_downwards',
# 'total_column_snow_water', 'total_precipitation', 'volumetric_soil_water_layer_1',
# 'volumetric_soil_water_layer_2', 'volumetric_soil_water_layer_3',
# 'volumetric_soil_water_layer_4',
],
'area': [
north, west, south, east,
], # North, West, South, East. Default: global
'year': str(i_year),
'month': [
'01', '02', '03',
'04', '05', '06',
'07', '08', '09',
'10', '11', '12',
],
'day': [
'01', '02', '03',
'04', '05', '06',
'07', '08', '09',
'10', '11', '12',
'13', '14', '15',
'16', '17', '18',
'19', '20', '21',
'22', '23', '24',
'25', '26', '27',
'28', '29', '30',
'31',
],
'time': [
'00:00', '01:00', '02:00',
'03:00', '04:00', '05:00',
'06:00', '07:00', '08:00',
'09:00', '10:00', '11:00',
'12:00', '13:00', '14:00',
'15:00', '16:00', '17:00',
'18:00', '19:00', '20:00',
'21:00', '22:00', '23:00',
],
'format': 'netcdf',
},
out_dir + '/ERA5_'+ prefix +'_' + str(i_year) +'.nc')
def getTimeNetcdf(netcdfFile):
"""
Function to extract the time component of a netCDF file
"""
#Get the time array from NETCDF file and convert to Python datetime
ncfile = netCDF4.Dataset(netcdfFile, 'r')
tim = ncfile.variables['time'] # do not cast to numpy array yet
time_convert = netCDF4.num2date(tim[:], tim.units, tim.calendar,only_use_cftime_datetimes=False)
return(time_convert)
def getPixelVals(netcdfFile, layer, longitude, latitude):
"""
Function to query pixels values for a given layer at a given Lat/long
"""
#Construct the Layer name for GDAL
lyr = "NETCDF:"+netcdfFile+":"+layer
#print(lyr)
#Parse GDALINFO for the needed
#Define UR + Resolution
URx=os.popen('gdalinfo %s | grep Origin | sed "s/\(.*\)(\(.*\),\(.*\))/\\2/"' % (lyr)).read()
URy=os.popen('gdalinfo %s | grep Origin | sed "s/\(.*\)(\(.*\),\(.*\))/\\3/"' % (lyr)).read()
#print(URx,URy)
Rsx=os.popen('gdalinfo %s | grep Pixel\ Size | sed "s/\(.*\)(\(.*\),\(.*\))/\\2/"' % (lyr)).read()
Rsy=os.popen('gdalinfo %s | grep Pixel\ Size | sed "s/\(.*\)(\(.*\),-\(.*\))/\\3/"' % (lyr)).read()
#print(Rsx,Rsy)
#Extract the offset and scale from netcdf file
offset=os.popen('gdalinfo %s | grep Offset | tail -n1 | sed "s/\(.*\):\ \(.*\),\(.*\):\(.*\)/\\2/"' % (lyr)).read()
scale=os.popen('gdalinfo %s | grep Offset | tail -n1 | sed "s/\(.*\):\ \(.*\),\(.*\):\(.*\)/\\4/"' % (lyr)).read()
#print(offset,scale)
#Extract NoData Value from layer
nodata=os.popen('gdalinfo %s | grep NoData\ Value | tail -n1 | sed "s/\ \ NoData\ Value=//"' % (lyr)).read()
#Get row and column numbers (Not needed at this point)
#nX=os.popen('gdalinfo %s | grep Size\ is\ | sed "s/Size\ is\ \(.*\),\(.*\)/\\1/"' % (lyr)).read()
#nY=os.popen('gdalinfo %s | grep Size\ is\ | sed "s/Size\ is\ \(.*\),\(.*\)/\\2/"' % (lyr)).read()
#print(nX,nY)
#Clean vars
URx=float(URx.strip())
URy=float(URy.strip())
Rsx=float(Rsx.strip())
Rsy=float(Rsy.strip())
offset=float(offset.strip())
scale=float(scale.strip())
#print(offset,scale)
nodata=int(nodata.strip())
#Convert from Lat/Long to X/Y
lon=float(longitude)
lat=float(latitude)
#lon=URx+X*Rsx
X=int((lon-URx)/Rsx)
#lat=URy-Y*Rsy
Y=int((URy-lat)/Rsy)
#print(X,Y)
#Create GDALLOCATION X Y Var
loc = str(X)+" "+str(Y)
#Query the temporal values at pixel location in image column and row
result = os.popen('gdallocationinfo -valonly %s %s' % (lyr, loc)).read()
#NETCDF driver does not read in projected lon lat (Not needed)
#result = os.popen('gdallocationinfo -valonly -wgs84 %s %s' % (lyr, loc)).read()
#cleanup the \n everywhere and remove empty elements
result1=list(result.split("\n"))
while '' in result1:
result1.remove('')
#Create and fill a Numpy array
array=np.zeros(len(result1))
for i in range (len(result1)):
try:
array[i]=float(result1[i])
except:
print("###%s###" % (result1[i]))
#Replace nodata with NAN
[np.nan if x==nodata in x else x for x in array]
#Rescale the data
array=offset+array*scale
#Return the array
return(array)
def plotLayer(layer, time_convert, array):
"""
Function to plot the array data with time
"""
fig, ax = plt.subplots()
ax.plot_date(time_convert,array, 'g-')
if(layer.strip() == "ssrd"):
ylab='surface downwelling shortwave flux in air (J m-2)'
elif(layer.strip() == "u10"):
ylab='windspeed u component (m s-1)'
elif(layer.strip() == "v10"):
ylab='windspeed v component (m s-1)'
elif(layer.strip() == "t2m"):
ylab='T2m (K)'
elif(layer.strip() == "d2m"):
ylab='Dewpoint T2m (K)'
elif(layer.strip() == "sp"):
ylab='Surface air pressure (Pa)'
elif(layer.strip() == "tp"):
ylab='Total precipitation (m)'
else:
ylab='undefined product'
ax.set(xlabel='time',ylabel=ylab,title='ERA5 '+ylab)
ax.grid()
#fig.savefig("test.png")
plt.show()
def d2m2eact(d2m):
"""
Converts dew point temperature to actual vapour pressure (hPa)
"""
return(0.6108*np.exp((17.27*d2m)/(d2m+237.3)))
|
<reponame>hotsyk/vulyk-declaration<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import codecs
import json
import sys
import re
sys.stdout = codecs.getwriter('utf8')(sys.stdout)
lists = {}
fields = []
current_answer = {}
REGIONS_MAP = {
"ukraine": "Загальнодержавний регіон",
"kiev": "<NAME>",
"1": "Івано-Франківська область",
"2": "Вінницька область",
"3": "Волинська область",
"4": "Дніпропетровська область",
"5": "Донецька область",
"6": "Житомирська область",
"7": "Закарпатська область",
"8": "Запорізька область",
"9": "Кіровоградська область",
"10": "Київська область",
"11": "Кримська Автономна Республіка",
"Sevostopol": "<NAME>",
"Sevastopol": "<NAME>",
"12": "Луганська область",
"13": "Львівська область",
"14": "Миколаївська область",
"15": "Одеська область",
"16": "Полтавська область",
"17": "Рівненська область",
"18": "Сумська область",
"19": "Тернопільська область",
"20": "Харківська область",
"21": "Херсонська область",
"22": "Хмельницька область",
"23": "Черкаська область",
"24": "Чернівецька область",
"25": "Чернігівська область"
}
def cleanup(s):
if isinstance(s, str):
s = s.replace("—", " - ")
s = re.sub("([^\s])\-\s+", r"\1-", s)
s = re.sub("\s+\-([^\s])", r"-\1", s)
s = re.sub("\.([^\s])", r". \1", s)
s = re.sub("\s*\(\s*", " (", s)
s = re.sub("\s*\)\s*", ") ", s)
s = s.replace(" ,", ", ")
s = s.replace(" .", ". ")
s = re.sub("\s+", " ", s)
return s.strip().rstrip(".")
else:
return s
if __name__ == '__main__':
if len(sys.argv) < 2:
exit("Not enough arguments")
in_file = sys.argv[1]
all_tasks = []
with open(in_file, "r") as fp:
for r in fp:
answers = json.loads(r)
current_task = []
for answer in answers:
region = REGIONS_MAP.get(
answer["answer"]["general"]["post"].get("region"))
data = map(cleanup, [
answer["answer"]["general"]["last_name"].capitalize(),
answer["answer"]["general"]["name"].capitalize(),
answer["answer"]["general"]["patronymic"].capitalize(),
region,
answer["answer"]["general"]["post"]["office"],
answer["answer"]["general"]["post"]["post"],
answer["answer"]["intro"]["declaration_year"]
])
print("%s %s %s,%s,%s,%s,%s" % tuple(data))
|
"""View of Graphs as SubGraph, Reverse, Directed, Undirected.
In some algorithms it is convenient to temporarily morph
a graph to exclude some nodes or edges. It should be better
to do that via a view than to remove and then re-add.
In other algorithms it is convenient to temporarily morph
a graph to reverse directed edges, or treat a directed graph
as undirected, etc. This module provides those graph views.
The resulting views are essentially read-only graphs that
report data from the orignal graph object. We provide an
attribute G._graph which points to the underlying graph object.
Note: Since graphviews look like graphs, one can end up with
view-of-view-of-view chains. Be careful with chains because
they become very slow with about 15 nested views.
For the common simple case of node induced subgraphs created
from the graph class, we short-cut the chain by returning a
subgraph of the original graph directly rather than a subgraph
of a subgraph. We are careful not to disrupt any edge filter in
the middle subgraph. In general, determining how to short-cut
the chain is tricky and much harder with restricted_views than
with induced subgraphs.
Often it is easiest to use .copy() to avoid chains.
"""
from snapx.classes.coreviews import (
# UnionAdjacency,
# UnionMultiAdjacency,
FilterAtlas,
FilterAdjacency,
FilterMultiAdjacency,
)
from snapx.classes.filters import no_filter
# from snapx.exception import SnapXError
# from snapx.utils import not_implemented_for
import snapx as sx
__all__ = ["subgraph_view"]
def subgraph_view(G, filter_node=no_filter, filter_edge=no_filter):
""" View of `G` applying a filter on nodes and edges.
`subgraph_view` provides a read-only view of the input graph that excludes
nodes and edges based on the outcome of two filter functions `filter_node`
and `filter_edge`.
The `filter_node` function takes one argument --- the node --- and returns
`True` if the node should be included in the subgraph, and `False` if it
should not be included.
The `filter_edge` function takes two (or three arguments if `G` is a
multi-graph) --- the nodes describing an edge, plus the edge-key if
parallel edges are possible --- and returns `True` if the edge should be
included in the subgraph, and `False` if it should not be included.
Both node and edge filter functions are called on graph elements as they
are queried, meaning there is no up-front cost to creating the view.
Parameters
----------
G : networkx.Graph
A directed/undirected graph/multigraph
filter_node : callable, optional
A function taking a node as input, which returns `True` if the node
should appear in the view.
filter_edge : callable, optional
A function taking as input the two nodes describing an edge (plus the
edge-key if `G` is a multi-graph), which returns `True` if the edge
should appear in the view.
Returns
-------
graph : networkx.Graph
A read-only graph view of the input graph.
Examples
--------
>>> import networkx as nx
>>> G = nx.path_graph(6)
Filter functions operate on the node, and return `True` if the node should
appear in the view:
>>> def filter_node(n1):
... return n1 != 5
...
>>> view = nx.subgraph_view(
... G,
... filter_node=filter_node
... )
>>> view.nodes()
NodeView((0, 1, 2, 3, 4))
We can use a closure pattern to filter graph elements based on additional
data --- for example, filtering on edge data attached to the graph:
>>> G[3][4]['cross_me'] = False
>>> def filter_edge(n1, n2):
... return G[n1][n2].get('cross_me', True)
...
>>> view = nx.subgraph_view(
... G,
... filter_edge=filter_edge
... )
>>> view.edges()
EdgeView([(0, 1), (1, 2), (2, 3), (4, 5)])
>>> view = nx.subgraph_view(
... G,
... filter_node=filter_node,
... filter_edge=filter_edge,
... )
>>> view.nodes()
NodeView((0, 1, 2, 3, 4))
>>> view.edges()
EdgeView([(0, 1), (1, 2), (2, 3)])
"""
newG = sx.freeze(G.__class__())
newG._NODE_OK = filter_node
newG._EDGE_OK = filter_edge
# create view by assigning attributes from G
newG._graph = G
newG.graph = G.graph
newG._node_extra_attr = FilterAtlas(G._node_extra_attr, filter_node)
if G.is_multigraph():
Adj = FilterMultiAdjacency
def reverse_edge(u, v, k):
return filter_edge(v, u, k)
else:
Adj = FilterAdjacency
def reverse_edge(u, v):
return filter_edge(v, u)
if G.is_directed():
newG._succ = Adj(G._succ, filter_node, filter_edge)
newG._pred = Adj(G._pred, filter_node, reverse_edge)
newG._adj = newG._succ
else:
newG.nodes = Adj(G.nodes, filter_node, filter_edge)
return newG
|
# -*- coding: utf-8 -*-
# vim:set ts=4 sw=4 et:
#
# Copyright 2014 <NAME>.
#
# This file is part of the mavros package and subject to the license terms
# in the top-level LICENSE file of the mavros repository.
# https://github.com/mavlink/mavros/tree/master/LICENSE.md
import csv
import time
import rospy
import mavros
from mavros_msgs.msg import ParamValue
from mavros_msgs.srv import ParamPull, ParamPush, ParamGet, ParamSet
class Parameter(object):
"""Class representing one parameter"""
def __init__(self, param_id, param_value=0):
self.param_id = param_id
self.param_value = param_value
def __repr__(self):
return "<Parameter '{}': {}>".format(self.param_id, self.param_value)
class ParamFile(object):
"""Base class for param file parsers"""
def __init__(self, args):
pass
def read(self, file_):
"""Returns a iterable of Parameters"""
raise NotImplementedError
def write(self, file_, parametes):
"""Writes Parameters to file"""
raise NotImplementedError
class MissionPlannerParam(ParamFile):
"""Parse MissionPlanner param files"""
class CSVDialect(csv.Dialect):
delimiter = ','
doublequote = False
skipinitialspace = True
lineterminator = '\r\n'
quoting = csv.QUOTE_NONE
def read(self, file_):
to_numeric = lambda x: float(x) if '.' in x else int(x)
for data in csv.reader(file_, self.CSVDialect):
if data[0].startswith('#'):
continue # skip comments
if len(data) != 2:
raise ValueError("wrong field count")
yield Parameter(data[0].strip(), to_numeric(data[1]));
def write(self, file_, parameters):
writer = csv.writer(file_, self.CSVDialect)
writer.writerow(("#NOTE: " + time.strftime("%d.%m.%Y %T") ,))
for p in parameters:
writer.writerow((p.param_id, p.param_value))
class QGroundControlParam(ParamFile):
"""Parse QGC param files"""
class CSVDialect(csv.Dialect):
delimiter = '\t'
doublequote = False
skipinitialspace = True
lineterminator = '\n'
quoting = csv.QUOTE_NONE
def read(self, file_):
to_numeric = lambda x: float(x) if '.' in x else int(x)
for data in csv.reader(file_, self.CSVDialect):
if data[0].startswith('#'):
continue # skip comments
if len(data) != 5:
raise ValueError("wrong field count")
yield Parameter(data[2].strip(), to_numeric(data[3]));
def write(self, file_, parameters):
def to_type(x):
if isinstance(x, float):
return 9 # REAL32
elif isinstance(x, int):
return 6 # INT32
else:
raise ValueError("unknown type: " + repr(type(x)))
sysid = rospy.get_param(mavros.get_topic('target_system_id'), 1)
compid = rospy.get_param(mavros.get_topic('target_component_id'), 1)
writer = csv.writer(file_, self.CSVDialect)
writer.writerow(("# NOTE: " + time.strftime("%d.%m.%Y %T"), ))
writer.writerow(("# Onboard parameters saved by mavparam for ({}, {})".format(sysid, compid), ))
writer.writerow(("# MAV ID" , "COMPONENT ID", "PARAM NAME", "VALUE", "(TYPE)"))
for p in parameters:
writer.writerow((sysid, compid, p.param_id, p.param_value, to_type(p.param_value), )) # XXX
def param_ret_value(ret):
if ret.value.integer != 0:
return ret.value.integer
elif ret.value.real != 0.0:
return ret.value.real
else:
return 0
def param_get(param_id):
try:
get = rospy.ServiceProxy(mavros.get_topic('param', 'get'), ParamGet)
ret = get(param_id=param_id)
except rospy.ServiceException as ex:
raise IOError(str(ex))
if not ret.success:
raise IOError("Request failed.")
return param_ret_value(ret)
def param_set(param_id, value):
if isinstance(value, float):
val = ParamValue(integer=0, real=value)
else:
val = ParamValue(integer=value, real=0.0)
try:
set = rospy.ServiceProxy(mavros.get_topic('param', 'set'), ParamSet)
ret = set(param_id=param_id, value=val)
except rospy.ServiceException as ex:
raise IOError(str(ex))
if not ret.success:
raise IOError("Request failed.")
return param_ret_value(ret)
def param_get_all(force_pull=False):
try:
pull = rospy.ServiceProxy(mavros.get_topic('param', 'pull'), ParamPull)
ret = pull(force_pull=force_pull)
except rospy.ServiceException as ex:
raise IOError(str(ex))
if not ret.success:
raise IOError("Request failed.")
params = rospy.get_param(mavros.get_topic('param'))
return (ret.param_received,
sorted((Parameter(k, v) for k, v in params.iteritems()),
cmp=lambda x, y: cmp(x.param_id, y.param_id))
)
def param_set_list(param_list):
# 1. load parameters to parameter server
for p in param_list:
rospy.set_param(mavros.get_topic('param', p.param_id), p.param_value)
# 2. request push all
try:
push = rospy.ServiceProxy(mavros.get_topic('param', 'push'), ParamPush)
ret = push()
except rospy.ServiceException as ex:
raise IOError(str(ex))
if not ret.success:
raise IOError("Request failed.")
return ret.param_transfered
|
<reponame>rdo-infra/releng
#! /usr/bin/python3
#
# Mostly copied code from find_unblocked_orphans.py in fedora
#
# Credits to original authors:
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# Copyright (c) 2009-2013 Red Hat
# SPDX-License-Identifier: GPL-2.0
#
# From:
# https://pagure.io/releng/blob/main/f/scripts/find_unblocked_orphans.py
from collections import OrderedDict
import argparse
import os
import sys
import dnf
def get_repos(release):
RDO_TRUNK_C8 = {
"rdo-baremetal": "http://trunk.rdoproject.org/centos8-%s/component/baremetal/current" % release, # noqa
"rdo-cinder": "http://trunk.rdoproject.org/centos8-%s/component/cinder/current" % release, # noqa
"rdo-clients": "http://trunk.rdoproject.org/centos8-%s/component/clients/current" % release, # noqa
"rdo-cloudops": "http://trunk.rdoproject.org/centos8-%s/component/cloudops/current" % release, # noqa
"rdo-common": "http://trunk.rdoproject.org/centos8-%s/component/common/current" % release, # noqa
"rdo-compute": "http://trunk.rdoproject.org/centos8-%s/component/compute/current" % release, # noqa
"rdo-glance": "http://trunk.rdoproject.org/centos8-%s/component/glance/current" % release, # noqa
"rdo-manila": "http://trunk.rdoproject.org/centos8-%s/component/manila/current" % release, # noqa
"rdo-network": "http://trunk.rdoproject.org/centos8-%s/component/network/current" % release, # noqa
"rdo-octavia": "http://trunk.rdoproject.org/centos8-%s/component/octavia/current" % release, # noqa
"rdo-security": "http://trunk.rdoproject.org/centos8-%s/component/security/current" % release, # noqa
"rdo-swift": "http://trunk.rdoproject.org/centos8-%s/component/swift/current" % release, # noqa
"rdo-tempest": "http://trunk.rdoproject.org/centos8-%s/component/tempest/current" % release, # noqa
"rdo-tripleo": "http://trunk.rdoproject.org/centos8-%s/component/tripleo/current" % release, # noqa
"rdo-ui": "http://trunk.rdoproject.org/centos8-%s/component/ui/current" % release, # noqa
"rdo-component": "http://trunk.rdoproject.org/centos8-%s/component/validation/current" % release, # noqa
"deps": "http://trunk.rdoproject.org/centos8-%s/deps/latest" % release, # noqa
"build-deps": "http://trunk.rdoproject.org/centos8-%s/build-deps/latest" % release, # noqa
"deps-srpm": "http://trunk.rdoproject.org/centos8-%s/deps/latest/SRPMS" % release, # noqa
"build-srpm": "http://trunk.rdoproject.org/centos8-%s/build-deps/latest/SRPMS" % release, # noqa
"baseos": "http://mirror.centos.org/centos/8-stream/BaseOS/x86_64/os/", # noqa
"appstream": "http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/", # noqa
"baseos-srpm": "https://vault.centos.org/centos/8-stream/BaseOS/Source/", # noqa
"appstream-srpm": "https://vault.centos.org/centos/8-stream/AppStream/Source/", # noqa
}
releases = {
"master": RDO_TRUNK_C8,
"wallaby": RDO_TRUNK_C8,
"victoria": RDO_TRUNK_C8,
"ussuri": RDO_TRUNK_C8,
"train": RDO_TRUNK_C8,
}
return releases[release]
def eprint(*args, **kwargs):
kwargs.setdefault('file', sys.stderr)
kwargs.setdefault('flush', True)
print(*args, **kwargs)
def setup_dnf(release="wallaby"):
""" Setup dnf query with two repos
"""
repos = get_repos(release)
base = dnf.Base()
# use digest to make repo id unique for each URL
conf = base.conf
for name in repos.keys():
r = base.repos.add_new_repo(
("repo-%s" % name),
conf,
baseurl=[repos[name]],
skip_if_unavailable=False,
gpgcheck=0,
)
r.enable()
r.load()
base.fill_sack(load_system_repo=False, load_available_repos=True)
return base.sack.query()
class DepChecker:
def __init__(self, release, repo=None, source_repo=None, namespace='rpms'):
self._src_by_bin = None
self._bin_by_src = None
self.release = release
dnfquery = setup_dnf(release=release)
self.dnfquery = dnfquery
self.pagure_dict = {}
self.not_in_repo = []
def create_mapping(self):
src_by_bin = {} # Dict of source pkg objects by binary package objects
bin_by_src = {} # Dict of binary pkgobjects by srpm name
# Populate the dicts
for rpm_package in self.dnfquery:
if rpm_package.arch == 'src':
continue
srpm = self.SRPM(rpm_package)
src_by_bin[rpm_package] = srpm
if srpm:
if srpm.name in bin_by_src:
bin_by_src[srpm.name].append(rpm_package)
else:
bin_by_src[srpm.name] = [rpm_package]
self._src_by_bin = src_by_bin
self._bin_by_src = bin_by_src
@property
def by_src(self):
if not self._bin_by_src:
self.create_mapping()
return self._bin_by_src
@property
def by_bin(self):
if not self._src_by_bin:
self.create_mapping()
return self._src_by_bin
def find_dependent_packages(self, srpmname, ignore):
""" Return packages depending on packages built from SRPM ``srpmname``
that are built from different SRPMS not specified in ``ignore``.
:param ignore: list of binary package names that will not be
returned as dependent packages or considered as alternate
providers
:type ignore: list() of str()
:returns: OrderedDict dependent_package: list of requires only
provided by package ``srpmname`` {dep_pkg: [prov, ...]}
"""
# Some of this code was stolen from repoquery
dependent_packages = {}
# Handle packags not found in the repo
try:
rpms = self.by_src[srpmname]
except KeyError:
# If we don't have a package in the repo, there is nothing to do
eprint(f"Package {srpmname} not found in repo")
self.not_in_repo.append(srpmname)
rpms = []
# provides of all packages built from ``srpmname``
provides = []
for pkg in rpms:
# add all the provides from the package as strings
string_provides = [str(prov) for prov in pkg.provides]
provides.extend(string_provides)
# add all files as provides
# pkg.files is a list of paths
# sometimes paths start with "//" instead of "/"
# normalise "//" to "/":
# os.path.normpath("//") == "//", but
# os.path.normpath("///") == "/"
file_provides = [os.path.normpath(f'//{fn}') for fn in pkg.files]
provides.extend(file_provides)
# Zip through the provides and find what's needed
for prov in provides:
# check only base provide, ignore specific versions
# "foo = 1.fc20" -> "foo"
base_provide, *_ = prov.split()
# FIXME: Workaround for:
# https://bugzilla.redhat.com/show_bug.cgi?id=1191178
if base_provide[0] == "/":
base_provide = base_provide.replace("[", "?")
base_provide = base_provide.replace("]", "?")
# Elide provide if also provided by another package
for pkg in self.dnfquery.filter(provides=base_provide, latest=1):
# FIXME: might miss broken dependencies in case the other
# provider depends on a to-be-removed package as well
if pkg.name in ignore:
# eprint(f"Ignoring provider package {pkg.name}")
pass
elif pkg not in rpms:
break
else:
for dependent_pkg in self.dnfquery.filter(
latest=1,
requires=base_provide):
# skip if the dependent rpm package belongs to the
# to-be-removed Fedora package
if dependent_pkg in self.by_src[srpmname]:
continue
# skip if the dependent rpm package is also a
# package that should be removed
if dependent_pkg.name in ignore:
continue
# use setdefault to either create an entry for the
# dependent package or add the required prov
dependent_packages.setdefault(dependent_pkg, set()).add(
prov)
return OrderedDict(sorted(dependent_packages.items()))
# This function was stolen from pungi
def SRPM(self, package):
"""Given a package object, get a package object for the
corresponding source rpm. Requires dnf still configured
and a valid package object."""
srpm, *_ = package.sourcerpm.split('.src.rpm')
sname, sver, srel = srpm.rsplit('-', 2)
return srpm_nvr_object(self.dnfquery, sname, sver, srel)
def srpm_nvr_object(query, name, version, release):
try:
srpmpo = query.filter(name=name,
version=version,
release=release,
latest=1,
arch='src').run()[0]
return srpmpo
except IndexError:
pass
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--release",
choices=["master", "wallaby", "victoria", "ussuri",
"train"],
default="master")
parser.add_argument("--pkg-name")
args = parser.parse_args()
eprint('Getting dependants for %s' % args.pkg_name)
depchecker = DepChecker(args.release)
dependants = depchecker.find_dependent_packages(args.pkg_name, [])
for dep in dependants:
print(dep.name + "-" + dep.evr + "." + dep.arch +
" from " + str(dep.reponame))
|
from pprint import pprint
import pickle
from tqdm import tqdm
import json
from collections import defaultdict
import native_api_utils
import classes
import config
import utils
incident_types=config.incident_types
languages_list=config.languages_list
def get_additional_reference_texts(ref_texts, found_names, found_languages):
"""
Get more reference texts using the Wiki langlinks of the other API.
"""
search_for_languages=set(languages)-set(found_languages)
if not search_for_languages:
return ref_texts
to_query=defaultdict(set)
for ref_text in ref_texts:
wiki_langlinks=ref_text.wiki_langlinks
for lang, the_link in wiki_langlinks:
if lang in search_for_languages:
to_query[lang].add(the_link)
props=['extracts', 'langlinks', 'extlinks']
for language, pages in to_query.items():
for page in pages:
page_info=native_api_utils.obtain_wiki_page_info(page, language, props)
if 'extract' in page_info.keys():
ref_text = classes.ReferenceText(
content=page_info['extract'],
wiki_text_and_links=page_info['wikitext'],
wiki_langlinks=page_info['langlinks'],
name=page,
language=language,
found_by=['langlinks']
)
if 'extlinks' in page_info.keys():
ref_text.secondary_ref_texts=page_info['extlinks']
if 'langlinks' in page_info.keys():
ref.text.wiki_langlinks=page_info['langlinks']
ref_texts.append(ref_text)
return ref_texts
def add_wikipedia_pages_from_api(incidents, wdt_ids, raw_results):
assert(len(wdt_ids)>0)
id_batches=utils.split_in_batches(wdt_ids, 50)
for index, batch in enumerate(id_batches):
print('Querying batch number %d' % index)
wiki_pages=native_api_utils.obtain_wiki_page_titles(batch, languages)
for incident in incidents:
if incident.wdt_id in wiki_pages.keys():
incident_wikipedia=wiki_pages[incident.wdt_id]
for language, name in incident_wikipedia.items():
found=False
for rt in incident.reference_texts:
if rt.name==name and rt.language==language:
rt.found_by.append('API')
found=True
if not found:
ref_text=classes.ReferenceText(
name=name,
language=language,
found_by=['API']
)
incident.reference_texts.append(ref_text)
return incidents
def retrieve_incidents_per_type(type_label, limit=10):
"""
Given an event type identifier, retrieve incidents that belong to this type.
"""
eventtype2json={'election': 'change_of_leadership', 'murder': 'killing'}
jsonfilename='wdt_fn_mappings/%s.json' % eventtype2json[type_label]
with open(jsonfilename, 'rb') as f:
wdt_fn_mappings_COL=json.load(f)
incidents=[]
print("### 1. ### Retrieving and storing wikidata information from SPARQL...")
results_by_id=utils.construct_and_run_query(type_label, languages, wdt_fn_mappings_COL, limit)
wdt_ids=[]
for full_wdt_id, inc_data in results_by_id.items():
extra_info=inc_data['extra_info']
wdt_id=full_wdt_id.split('/')[-1]
wdt_ids.append(wdt_id)
ref_texts=[]
for language, name in inc_data['references'].items():
print(language, name, wdt_id)
ref_text=classes.ReferenceText(
name=name,
language=language,
found_by=['SPARQL']
)
ref_texts.append(ref_text)
incident=classes.Incident(
incident_type=type_label,
wdt_id=wdt_id,
extra_info=extra_info,
reference_texts=ref_texts
)
incidents.append(incident)
print("Wikidata querying and storing finished. Number of incidents:", len(incidents))
print('### 2. ### Enriching the reference texts through the Wikipedia-Wikidata API...')
incidents=add_wikipedia_pages_from_api(incidents, wdt_ids, results_by_id)
print('API querying done. Number of incidents:', len(incidents))
return incidents
def obtain_reference_texts(incidents):
print('### 3. ### Retrieve reference text information from the wikipedia API + obtain extra documents through Wiki langlinks')
new_incidents=[]
for incident in tqdm(incidents):
new_reference_texts=[]
for ref_text in incident.reference_texts:
props=['extracts', 'langlinks', 'extlinks']
other_languages=set(languages)-set([ref_text.language])
page_info=native_api_utils.obtain_wiki_page_info(ref_text.name, ref_text.language, props, other_languages=other_languages)
if 'extract' in page_info.keys():
ref_text.content=page_info['extract']
if 'extlinks' in page_info.keys():
ref_text.secondary_ref_texts=page_info['extlinks']
if 'langlinks' in page_info.keys():
ref_text.wiki_langlinks=page_info['langlinks']
if 'wikitext' in page_info.keys():
ref_text.wiki_text_and_links=page_info['wikitext']
#ref_text.uri=uri
new_reference_texts.append(ref_text)
new_reference_texts=utils.deduplicate_ref_texts(new_reference_texts)
found_languages, found_names=utils.get_languages_and_names(new_reference_texts)
if len(new_reference_texts): # if there are reference texts with text, try to get more data by using the Wiki langlinks info we have stored.
new_reference_texts=get_additional_reference_texts(new_reference_texts, found_names, found_languages)
incident.reference_texts=new_reference_texts
new_incidents.append(incident)
print('Retrieval of reference texts done. Number of incidents:', len(new_incidents))
return new_incidents
if __name__ == '__main__':
for incident_type in incident_types:
for languages in languages_list:
# Query SPARQL and the API to get incidents, their properties, and labels.
incidents=retrieve_incidents_per_type(incident_type,999999)
new_incidents=obtain_reference_texts(incidents)
collection=classes.IncidentCollection(incidents=new_incidents,
incident_type=incident_type,
languages=languages)
output_file=utils.make_output_filename(incident_type, languages)
with open(output_file, 'wb') as of:
pickle.dump(collection, of)
|
<filename>scripts/usufy.py
################################################################################
#
# Copyright 2015-2021 <NAME> and <NAME>
#
# This program is part of OSRFramework. You can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
################################################################################
import argparse
import colorama
colorama.init(autoreset=True)
import datetime as dt
import json
# global issues for multiprocessing
from multiprocessing import Process, Queue, Pool
import os
# Preparing to capture interruptions smoothly
import signal
import sys
import time
import traceback
import textwrap
# configuration and utils
import osrframework
import osrframework.utils.platform_selection as platform_selection
import osrframework.utils.configuration as configuration
import osrframework.utils.banner as banner
import osrframework.utils.benchmark as benchmark
import osrframework.utils.browser as browser
import osrframework.utils.general as general
from osrframework.utils.exceptions import *
def fuzzUsufy(fDomains = None, fFuzzStruct = None):
"""
Method to guess the usufy path against a list of domains or subdomains.
Args:
fDomains: A list to strings containing the domains and (optionally) a
nick.
fFuzzStruct: A list to strings containing the transforms to be
performed.
Returns:
dict: A dictionary of the form of `{"domain": "url"}`.
"""
if fFuzzStruct == None:
# Loading these structures by default
fuzzingStructures = [
"http://<DOMAIN>/<USERNAME>",
"http://<DOMAIN>/~<USERNAME>",
"http://<DOMAIN>/?action=profile;user=<USERNAME>",
"http://<DOMAIN>/causes/author/<USERNAME>",
"http://<DOMAIN>/channel/<USERNAME>",
"http://<DOMAIN>/community/profile/<USERNAME>",
"http://<DOMAIN>/component/comprofiler/userprofiler/<USERNAME>",
"http://<DOMAIN>/details/@<USERNAME>",
"http://<DOMAIN>/foros/member.php?username=<USERNAME>",
"http://<DOMAIN>/forum/member/<USERNAME>",
"http://<DOMAIN>/forum/member.php?username=<USERNAME>",
"http://<DOMAIN>/forum/profile.php?mode=viewprofile&u=<USERNAME>",
"http://<DOMAIN>/home/<USERNAME>",
"http://<DOMAIN>/index.php?action=profile;user=<USERNAME>",
"http://<DOMAIN>/member_profile.php?u=<USERNAME>",
"http://<DOMAIN>/member.php?username=<USERNAME>",
"http://<DOMAIN>/members/?username=<USERNAME>",
"http://<DOMAIN>/members/<USERNAME>",
"http://<DOMAIN>/members/view/<USERNAME>",
"http://<DOMAIN>/mi-espacio/<USERNAME>",
"http://<DOMAIN>/u<USERNAME>",
"http://<DOMAIN>/u/<USERNAME>",
"http://<DOMAIN>/user-<USERNAME>",
"http://<DOMAIN>/user/<USERNAME>",
"http://<DOMAIN>/user/<USERNAME>.html",
"http://<DOMAIN>/users/<USERNAME>",
"http://<DOMAIN>/usr/<USERNAME>",
"http://<DOMAIN>/usuario/<USERNAME>",
"http://<DOMAIN>/usuarios/<USERNAME>",
"http://<DOMAIN>/en/users/<USERNAME>",
"http://<DOMAIN>/people/<USERNAME>",
"http://<DOMAIN>/profil/<USERNAME>",
"http://<DOMAIN>/profile/<USERNAME>",
"http://<DOMAIN>/profile/page/<USERNAME>",
"http://<DOMAIN>/rapidforum/index.php?action=profile;user=<USERNAME>",
"http://<DOMAIN>/social/usuarios/<USERNAME>",
"http://<USERNAME>.<DOMAIN>",
"http://<USERNAME>.<DOMAIN>/user/"
]
else:
try:
fuzzingStructures = fFuzzStruct.read().splitlines()
except:
print("Usufy could NOT open the following file: " + fFuzzStruct)
res = {}
lines = fDomains.read().splitlines()
# Going through all the lines
for l in lines:
domain = l.split()[0]
print("Performing tests for" + domain + "...")
# selecting the number of nicks to be tested in this domain
nick = l.split()[1]
# possibleURLs found
possibleURL = []
for struct in fuzzingStructures:
# initiating list
urlToTry = struct.replace("<DOMAIN>", domain)
test = urlToTry.replace("<USERNAME>", nick.lower())
print("Processing "+ test + "...")
i3Browser = browser.Browser()
try:
html = i3Browser.recoverURL(test)
if nick in html:
possibleURL.append(test)
print(general.success("\tPossible usufy found!!!\n"))
except:
print("The resource could not be downloaded.")
res[domain] = possibleURL
print(json.dumps(res, indent = 2))
return res
def pool_function(p, nick, rutaDescarga, avoidProcessing=True, avoidDownload=True, verbosity=1):
"""
Wrapper for being able to launch all the threads of getPageWrapper.
We receive the parameters for getPageWrapper as a tuple.
Args:
pName: Platform where the information is stored. It is a string.
nick: Nick to be searched.
rutaDescarga: Local file where saving the obtained information.
avoidProcessing: Boolean var that defines whether the profiles will NOT
be processed (stored in this version).
avoidDownload: Boolean var that defines whether the profiles will NOT be
downloaded (stored in this version).
verbosity: The verbosity level: 1, shows errors; 2, shows warnings.
Returns:
A dictionary with the following structure:
{
"platform": "Platform",
"status": "DONE",
"data": "<data>"
}
Data is None or a serialized representation of the dictionary.
"""
try:
res = p.get_info(
query=nick,
mode="usufy"
)
return {"platform" : str(p), "status": "Ok", "data": res}
except Exception as e:
if (isinstance(e, OSRFrameworkError) and verbosity >= 1) and (isinstance(e, OSRFrameworkException) and verbosity >= 2):
print(str(e))
return {"platform" : str(p), "status": e, "data": e.generic}
def process_nick_list(nicks, platforms=None, rutaDescarga="./", avoidProcessing=True, avoidDownload=True, nThreads=12, verbosity=1, logFolder="./logs"):
"""
Process a list of nicks to check whether they exist.
This method receives as a parameter a series of nicks and verifies whether
those nicks have a profile associated in different social networks.
Args:
nicks: List of nicks to process.
platforms: List of <Platform> objects to be processed.
rutaDescarga: Local file where saving the obtained information.
avoidProcessing: A boolean var that defines whether the profiles will
NOT be processed.
avoidDownload: A boolean var that defines whether the profiles will NOT
be downloaded.
verbosity: The level of verbosity to be used.
logFolder: The path to the log folder.
Returns:
A dictionary where the key is the nick and the value another dictionary
where the keys are the social networks and the value is the
corresponding URL.
"""
if platforms is None:
platforms = platform_selection.get_all_platform_names("usufy")
# Defining the output results variable
res = []
# Processing the whole list of terms...
for nick in nicks:
# If the process is executed by the current app, we use the Processes. It is faster than pools.
if nThreads <= 0 or nThreads > len(platforms):
nThreads = len(platforms)
# Using threads in a pool if we are not running the program in main
# Example catched from: https://stackoverflow.com/questions/11312525/catch-ctrlc-sigint-and-exit-multiprocesses-gracefully-in-python
try:
original_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)
pool = Pool(nThreads)
signal.signal(signal.SIGINT, original_sigint_handler)
except ValueError:
# To avoid: ValueError: signal only works in main thread
pool = Pool(nThreads)
pool_results = []
try:
def log_result(result):
# This is called whenever foo_pool(i) returns a result.
# result_list is modified only by the main process, not the pool workers.
pool_results.append(result)
for plat in platforms:
# We need to create all the arguments that will be needed
parameters = (plat, nick, rutaDescarga, avoidProcessing, avoidDownload, verbosity)
pool.apply_async(pool_function, args=parameters, callback=log_result,)
# Waiting for results to be finished
while len(pool_results) < len(platforms):
time.sleep(1)
# Closing normal termination
pool.close()
except KeyboardInterrupt:
print(general.warning("\n[!] Process manually stopped by the user. Terminating workers.\n"))
pool.terminate()
print(general.warning("[!] The following platforms were not processed:"))
pending = ""
for p in platforms:
processed = False
for processedPlatform in pool_results:
if str(p) == processedPlatform["platform"]:
processed = True
break
if not processed:
print("\t- " + str(p))
pending += " " + str(p).lower()
print("\n")
print(general.warning("If you want to relaunch the app with these platforms you can always run the command with: "))
print("\t usufy ... -p " + general.emphasis(pending))
print("\n")
print(general.warning("If you prefer to avoid these platforms you can manually evade them for whatever reason with: "))
print("\t usufy ... -x " + general.emphasis(pending))
print("\n")
pool.join()
# Collecting the results
profiles = []
errors = {}
warnings = {}
for info in pool_results:
if info["status"] == "Ok":
array = json.loads(info["data"])
for r in array:
if r != "{}":
profiles.append(r)
else:
e = info["status"]
if isinstance(e, OSRFrameworkError):
aux = errors.get(e.__class__.__name__, {})
aux["info"] = info["data"]
aux["counter"] = aux.get("counter", 0) + 1
errors[e.__class__.__name__] = aux
else:
aux = warnings.get(e.__class__.__name__, {})
aux["info"] = info["data"]
aux["counter"] = aux.get("counter", 0) + 1
warnings[e.__class__.__name__] = aux
res += profiles
if errors:
now = dt.datetime.now()
print(f"\n{now}\tSome errors where found in the process:")
for key, value in errors.items():
print(textwrap.fill("- {} (found: {}). Details:".format(general.error(key), general.error(value["counter"])), 90, initial_indent="\t"))
print(textwrap.fill("\t{}".format(value["info"]), 80, initial_indent="\t"))
if warnings and verbosity >= 2:
now = dt.datetime.now()
print("\n{}\tSome warnings where found in the process:".format(now))
for key, value in warnings.items():
print(textwrap.fill("- {} (found: {}). Details:".format(general.warning(key), general.warning(value["counter"])), 90, initial_indent="\t"))
print(textwrap.fill("\t{}".format(value["info"]), 80, initial_indent="\t"))
return res
def get_parser():
"""Defines the argument parser
Returns:
argparse.ArgumentParser.
"""
DEFAULT_VALUES = configuration.get_configuration_values_for("usufy")
# Capturing errors just in case the option is not found in the configuration
try:
excludeList = [DEFAULT_VALUES["exclude_platforms"]]
except:
excludeList = []
# Recovering all the possible options
platOptions = platform_selection.get_all_platform_names("usufy")
parser = argparse.ArgumentParser(description= 'usufy - Piece of software that checks the existence of a profile for a given user in dozens of different platforms.', prog='usufy', epilog='Check the README.md file for further details on the usage of this program or follow us on Twitter in <http://twitter.com/i3visio>.', add_help=False, conflict_handler='resolve')
parser._optionals.title = "Input options (one required)"
# Adding the main options
group_mainOptions = parser.add_mutually_exclusive_group(required=True)
group_mainOptions.add_argument('--info', metavar='<action>', choices=['list_platforms', 'list_tags'], action='store', help='select the action to be performed amongst the following: list_platforms (list the details of the selected platforms), list_tags (list the tags of the selected platforms). Afterwards, it exists.')
group_mainOptions.add_argument('-b', '--benchmark', action='store_true', default=False, help='perform the benchmarking tasks.')
group_mainOptions.add_argument('-f', '--fuzz', metavar='<path_to_fuzzing_list>', action='store', type=argparse.FileType('r'), help='this option will try to find usufy-like URLs. The list of fuzzing platforms in the file should be (one per line): <BASE_DOMAIN>\t<VALID_NICK>')
group_mainOptions.add_argument('-l', '--list', metavar='<path_to_nick_list>', action='store', type=argparse.FileType('r'), help='path to the file where the list of nicks to verify is stored (one per line).')
group_mainOptions.add_argument('-n', '--nicks', metavar='<nick>', nargs='+', action='store', help = 'the list of nicks to process (at least one is required).')
group_mainOptions.add_argument('--show_tags', action='store_true', default=False, help='it will show the platforms grouped by tags.')
# Selecting the platforms where performing the search
groupPlatforms = parser.add_argument_group('Platform selection arguments', 'Criteria for selecting the platforms where performing the search.')
groupPlatforms.add_argument('-p', '--platforms', metavar='<platform>', choices=platOptions, nargs='+', required=False, default=DEFAULT_VALUES.get("platforms", []), action='store', help='select the platforms where you want to perform the search amongst the following: ' + str(platOptions) + '. More than one option can be selected.')
groupPlatforms.add_argument('-t', '--tags', metavar='<tag>', default = [], nargs='+', required=False, action='store', help='select the list of tags that fit the platforms in which you want to perform the search. More than one option can be selected.')
groupPlatforms.add_argument('-x', '--exclude', metavar='<platform>', choices=platOptions, nargs='+', required=False, default=excludeList, action='store', help='select the platforms that you want to exclude from the processing.')
# Configuring the processing options
group_processing = parser.add_argument_group('Processing arguments', 'Configuring the way in which usufy will process the identified profiles.')
group_processing.add_argument('--avoid_download', required=False, action='store_true', default=False, help='argument to force usufy NOT to store the downloadable version of the profiles.')
group_processing.add_argument('--avoid_processing', required=False, action='store_true', default=False, help='argument to force usufy NOT to perform any processing task with the valid profiles.')
group_processing.add_argument('--fuzz_config', metavar='<path_to_fuzz_list>', action='store', type=argparse.FileType('r'), help='path to the fuzzing config details. Wildcards such as the domains or the nicknames should come as: <DOMAIN>, <USERNAME>.')
group_processing.add_argument('--nonvalid', metavar='<not_valid_characters>', required=False, default = '\\|<>=', action='store', help="string containing the characters considered as not valid for nicknames." )
group_processing.add_argument('-e', '--extension', metavar='<sum_ext>', nargs='+', choices=['csv', 'gml', 'json', 'ods', 'png', 'txt', 'xls', 'xlsx' ], required=False, default=DEFAULT_VALUES.get("extension", ["csv"]), action='store', help='output extension for the summary files. Default: csv.')
group_processing.add_argument('-L', '--logfolder', metavar='<path_to_log_folder', required=False, default = './logs', action='store', help='path to the log folder. If none was provided, ./logs is assumed.')
group_processing.add_argument('-o', '--output_folder', metavar='<path_to_output_folder>', required=False, default=DEFAULT_VALUES.get("output_folder", "."), action='store', help='output folder for the generated documents. While if the paths does not exist, usufy will try to create; if this argument is not provided, usufy will NOT write any down any data. Check permissions if something goes wrong.')
group_processing.add_argument('-w', '--web_browser', required=False, action='store_true', help='opening the uris returned in the default web browser.')
group_processing.add_argument('-F', '--file_header', metavar='<alternative_header_file>', required=False, default=DEFAULT_VALUES.get("file_header", "profiles"), action='store', help='Header for the output filenames to be generated. If None was provided the following will be used: profiles.<extension>.' )
group_processing.add_argument('-T', '--threads', metavar='<num_threads>', required=False, action='store', default=int(DEFAULT_VALUES.get("threads", 0)), type=int, help='write down the number of threads to be used (default 32). If 0, the maximum number possible will be used, which may make the system feel unstable.')
# About options
group_about = parser.add_argument_group('About arguments', 'Showing additional information about this program.')
group_about.add_argument('-h', '--help', action='help', help='shows this help and exists.')
group_about.add_argument('-v', '--verbose', metavar='<verbosity>', choices=[0, 1, 2], required=False, action='store', default=1, help='select the verbosity level: 0 - minimal; 1 - normal (default); 2 - debug.', type=int)
group_about.add_argument('--version', action='version', version='[%(prog)s] OSRFramework ' + osrframework.__version__, help='shows the version of the program and exits.')
return parser
def main(params=None):
"""ain function to launch usufy
The function is created in this way so as to let other applications make
use of the full configuration capabilities of the application. The
parameters received are used as parsed by this modules `get_parser()`.
Args:
params: A list with the parameters as grabbed by the terminal. It is
None when this is called by an entry_point. If it is called by osrf
the data is already parsed.
Returns:
dict: A Json representing the matching results.
"""
if params is None:
parser = get_parser()
args = parser.parse_args(params)
else:
args = params
print(general.title(banner.text))
saying_hello = f"""
Usufy | Copyright (C) <NAME> & <NAME> (i3visio) 2014-2021
This program comes with ABSOLUTELY NO WARRANTY. This is free software, and you
are welcome to redistribute it under certain conditions. For additional info,
visit <{general.LICENSE_URL}>.
"""
print(general.info(saying_hello))
if args.fuzz:
res = fuzzUsufy(args.fuzz, args.fuzz_config)
else:
# Recovering the list of platforms to be launched
list_platforms = platform_selection.get_platforms_by_name(platform_names=args.platforms, tags=args.tags, mode="usufy", exclude_platform_names=args.exclude)
if args.info:
# Information actions...
if args.info == 'list_platforms':
info_platforms ="Listing the platforms:\n"
for p in list_platforms:
info_platforms += "\t\t" + (str(p) + ": ").ljust(16, ' ') + str(p.tags)+"\n"
return info_platforms
elif args.info == 'list_tags':
tags = {}
# Going through all the selected platforms to get their tags
for p in list_platforms:
for t in p.tags:
if t not in tags.keys():
tags[t] = 1
else:
tags[t] += 1
info_tags = "List of tags:\n"
# Displaying the results in a sorted list
for t in tags.keys():
info_tags += "\t\t" + (t + ": ").ljust(16, ' ') + str(tags[t]) + " time(s)\n"
return info_tags
else:
pass
# performing the test
elif args.benchmark:
platforms = platform_selection.get_all_platform_names("usufy")
res = benchmark.do_benchmark(platforms)
str_times = ""
for e in sorted(res.keys()):
str_times += str(e) + "\t" + str(res[e]) + "\n"
return str_times
# showing the tags of the usufy platforms
elif args.show_tags:
tags = platform_selection.get_all_platform_names_by_tag("usufy")
print(general.info("This is the list of platforms grouped by tag.\n"))
print(json.dumps(tags, indent=2, sort_keys=True))
print(general.info("[Tip] Remember that you can always launch the platform using the -t option followed by any of the aforementioned.\n"))
# Executing the corresponding process...
else:
# Showing the execution time...
start_time = dt.datetime.now()
print(f"{start_time}\tStarting search in {general.emphasis(str(len(list_platforms)))} platform(s)... Relax!\n")
print(general.emphasis("\tPress <Ctrl + C> to stop...\n"))
# Defining the list of users to monitor
nicks = []
if args.nicks:
for n in args.nicks:
nicks.append(n)
else:
# Reading the nick files
try:
nicks = args.list.read().splitlines()
except:
print(general.error("ERROR: there has been an error when opening the file that stores the nicks.\tPlease, check the existence of this file."))
# Definning the results
res = []
if args.output_folder != None:
# if Verifying an output folder was selected
if not os.path.exists(args.output_folder):
os.makedirs(args.output_folder)
# Launching the process...
res = process_nick_list(nicks, list_platforms, args.output_folder, avoidProcessing = args.avoid_processing, avoidDownload = args.avoid_download, nThreads=args.threads, verbosity= args.verbose, logFolder=args.logfolder)
else:
try:
res = process_nick_list(nicks, list_platforms, nThreads=args.threads, verbosity= args.verbose, logFolder=args.logfolder)
except Exception as e:
print(general.error("Exception grabbed when processing the nicks: " + str(e)))
print(general.error(traceback.print_stack()))
# We are going to iterate over the results...
str_results = "\t"
# Structure returned
"""
[
{
"attributes": [
{
"attributes": [],
"type": "com.i3visio.URI",
"value": "http://twitter.com/i3visio"
},
{
"attributes": [],
"type": "com.i3visio.Alias",
"value": "i3visio"
},
{
"attributes": [],
"type": "com.i3visio.Platform",
"value": "Twitter"
}
],
"type": "com.i3visio.Profile",
"value": "Twitter - i3visio"
}
,
...
]
"""
for r in res:
# The format of the results (attributes) for a given nick is a list as follows:
for att in r["attributes"]:
# iterating through the attributes
platform = ""
uri = ""
for details in att["attributes"]:
if details["type"] == "com.i3visio.Platform":
platform = details["value"]
if details["type"] == "com.i3visio.URI":
uri = details["value"]
try:
str_results += (str(platform) + ":").ljust(16, ' ')+ " "+ str(uri)+"\n\t\t"
except:
pass
# Generating summary files for each ...
if args.extension:
# Verifying if the outputPath exists
if not os.path.exists (args.output_folder):
os.makedirs(args.output_folder)
# Grabbing the results
file_header = os.path.join(args.output_folder, args.file_header)
# Iterating through the given extensions to print its values
for ext in args.extension:
# Generating output files
general.export_usufy(res, ext, file_header)
now = dt.datetime.now()
print(f"\n{now}\tResults obtained ({general.emphasis(len(res))}):\n")
print(general.success(general.osrf_to_text_export(res)))
if args.web_browser:
general.open_results_in_browser(res)
now = dt.datetime.now()
print("\n" + str(now) + "\tYou can find all the information here:")
for ext in args.extension:
# Showing the output files
print("\t" + general.emphasis(file_header + "." + ext))
# Showing the execution time...
end_time = dt.datetime.now()
print(f"\n{end_time}\tFinishing execution...\n")
print("Total time consumed:\t" + general.emphasis(str(end_time-start_time)))
print("Average seconds/query:\t" + general.emphasis(str((end_time-start_time).total_seconds()/len(list_platforms))) +" seconds\n")
# Urging users to place an issue on Github...
print(banner.footer)
if params:
return res
if __name__ == "__main__":
main(sys.argv[1:])
|
<filename>paas-ce/paas/paas/common/tests.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available.
Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
""" # noqa
from __future__ import unicode_literals
from django.conf import settings
from django.test import TestCase
import mock
import requests
from common.constants import LogoImgRelatedDirEnum
from common.http import _gen_header, http_delete, http_get, http_post
from common.utils import file_size_bytes_to_m, get_app_logo, should_update_logo
class CommonUtilsTestCase(TestCase):
def test_file_size_bytes_to_m(self):
size = None
self.assertEqual(size, file_size_bytes_to_m(size))
size = 0
self.assertEqual(size, file_size_bytes_to_m(0))
size = 1024 * 1024
self.assertEqual(1.0, file_size_bytes_to_m(size))
def test_get_app_logo(self):
app_code = 'bk_framework'
logo_name = '{}/{}.png'.format(LogoImgRelatedDirEnum.APP.value, app_code)
result = '{}{}'.format(settings.MEDIA_URL, logo_name)
self.assertEqual(result, get_app_logo(app_code))
app_code = "not_exists"
self.assertEqual("", get_app_logo(app_code))
def test_should_update_logo(self):
app_code = "test"
app_logo_name = "{}/{}.png".format(LogoImgRelatedDirEnum.APP.value, app_code)
ok, _ = should_update_logo(app_code, app_logo_name)
self.assertFalse(ok)
ok, logo_name = should_update_logo('test1', app_logo_name)
self.assertTrue(ok)
class CommonHttpTestCase(TestCase):
def _mock_response(self, status=200, content="CONTENT", json_data=None, raise_for_status=None):
"""
https://gist.github.com/evansde77/45467f5a7af84d2a2d34f3fcb357449c
since we typically test a bunch of different
requests calls for a service, we are going to do
a lot of mock responses, so its usually a good idea
to have a helper function that builds these things
"""
mock_resp = mock.Mock()
# mock raise_for_status call w/optional error
mock_resp.raise_for_status = mock.Mock()
if raise_for_status:
mock_resp.raise_for_status.side_effect = raise_for_status
# set status code and content
mock_resp.status_code = status
mock_resp.content = content
# add json data if provided
if json_data:
mock_resp.json = mock.Mock(
return_value=json_data
)
return mock_resp
@mock.patch('requests.get')
def test_http_get(self, mock_get):
# 200
mock_resp = self._mock_response(status=200)
mock_get.return_value = mock_resp
ok, data = http_get("http://not_exists.com/", data={})
self.assertTrue(ok)
# 200, with json
json_data = {"a": 1, "b": 2}
mock_resp = self._mock_response(status=200, json_data=json_data)
mock_get.return_value = mock_resp
ok, data = http_get("http://not_exists.com/", data={})
self.assertTrue(ok)
self.assertEqual(json_data, data)
# not 200
mock_resp = self._mock_response(status=400)
mock_get.return_value = mock_resp
ok, data = http_get("http://not_exists.com/", data={})
self.assertFalse(ok)
# timeout
# https://stackoverflow.com/questions/48723711/python-mock-requests-post-to-throw-exception
mock_get.side_effect = requests.exceptions.Timeout()
ok, data = http_get("http://not_exists.com/", data={})
self.assertFalse(ok)
@mock.patch('requests.post')
def test_http_post(self, mock_post):
# 200
mock_resp = self._mock_response(status=200)
mock_post.return_value = mock_resp
ok, data = http_post("http://not_exists.com/", data={})
self.assertTrue(ok)
# 200, with json
json_data = {"a": 1, "b": 2}
mock_resp = self._mock_response(status=200, json_data=json_data)
mock_post.return_value = mock_resp
ok, data = http_post("http://not_exists.com/", data={})
self.assertTrue(ok)
self.assertEqual(json_data, data)
@mock.patch('requests.delete')
def test_http_delete(self, mock_delete):
# 200
mock_resp = self._mock_response(status=200)
mock_delete.return_value = mock_resp
ok, data = http_delete("http://not_exists.com/", data={})
self.assertTrue(ok)
# 200, with json
json_data = {"a": 1, "b": 2}
mock_resp = self._mock_response(status=200, json_data=json_data)
mock_delete.return_value = mock_resp
ok, data = http_delete("http://not_exists.com/", data={})
self.assertTrue(ok)
self.assertEqual(json_data, data)
def test_default_header(self):
headers = {
"Content-Type": "application/json",
}
self.assertEqual(headers, _gen_header())
|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (c) 2018 by Delphix. All rights reserved.
#
# Author : <NAME>
# Date : April 2018
import logging
import pickle
import json
from dxm.lib.DxLogging import print_error
from dxm.lib.DxLogging import print_message
class DxAlgorithm(object):
def __init__(self, engine):
"""
Constructor
:param engine: DxMaskingEngine object
"""
#Algorithm.__init__(self)
self.__engine = engine
self.__logger = logging.getLogger()
self.__domain_name = None
self.__sync = None
self.__logger.debug("creating DxAlgorithm object")
if (self.__engine.version_ge('6.0.0')):
from masking_api_60.models.algorithm import Algorithm
from masking_api_60.api.sync_api import SyncApi
from masking_api_60.rest import ApiException
else:
from masking_api_53.models.algorithm import Algorithm
from masking_api_53.api.sync_api import SyncApi
from masking_api_53.rest import ApiException
self.__api = SyncApi
self.__model = Algorithm
self.__apiexc = ApiException
self.__obj = None
@property
def obj(self):
if self.__obj is not None:
return self.__obj
else:
return None
def from_alg(self, alg):
"""
Set obj properties with a Algorithm object
:param column: Algorithm object
"""
self.__obj = alg
@property
def domain_name(self):
return self.__domain_name
@domain_name.setter
def domain_name(self, domain):
self.__domain_name = domain
@property
def sync(self):
return self.__sync
@sync.setter
def sync(self, sync):
self.__sync = sync
@property
def algorithm_name(self):
if self.obj is not None:
return self.obj.algorithm_name
else:
return None
@property
def algorithm_type(self):
if self.obj is not None:
return self.obj.algorithm_type
else:
return None
# def export(self, path=None):
# """
# Export algorithm into file
# :param path: path to save algorithm
# """
# api_sync = SyncApi(self.__engine.api_client)
# self.__logger.debug("Export input %s" % self.sync)
# export_list = []
# export_list.append(self.sync)
# api_response = api_sync.export(export_list)
# self.__logger.debug("Export response %s" % str(api_response))
#
# # binary_file = open('{0}.alg'.format(self.algorithm_name), mode='wb')
# # json.dump(api_response.blob, binary_file)
# # binary_file.close()
#
# binary_file = open('{0}.alg_bin '.format(self.algorithm_name), mode='wb')
# pickle.dump(api_response, binary_file)
# binary_file.close()
#
#
# def importalg(self, path=None):
# """
# Import algorithm from file
# :param path: path to save algorithm
# """
#
# binary_file = open('{0}.alg_bin'.format("EU_LAST_NAME"), mode='rb')
# algobj = pickle.load(binary_file)
# binary_file.close()
#
#
# api_sync = SyncApi(self.__engine.api_client)
# self.__logger.debug("Import input %s" % self.sync)
# api_response = api_sync.import_object(algobj, force_overwrite=True)
# self.__logger.debug("Import response %s" % str(api_response))
#
# # binary_file = open('{0}.alg'.format(self.algorithm_name), mode='wb')
# # json.dump(api_response.blob, binary_file)
# # binary_file.close()
|
<filename>test_QA/browser/test_API_Browser_in_AWS.py
from osbot_aws.apis.Lambda import Lambda
from osbot_aws.apis.shell.Lambda_Shell import Lambda_Shell
from osbot_aws.helpers.Lambda_Package import Lambda_Package
from osbot_aws.helpers.Test_Helper import Test_Helper
from osbot_utils.decorators.lists.group_by import group_by
from osbot_utils.utils import Misc
from osbot_browser.Deploy import Deploy
class test_API_Browser_in_AWS(Test_Helper):
def setUp(self):
super().setUp()
self.lambda_name = 'osbot_browser.lambdas.dev.browser_test'
self._lambda = Lambda(self.lambda_name)
self.api_browser_code = """
from osbot_aws.Dependencies import load_dependencies
load_dependencies('syncer')
from osbot_browser.browser.API_Browser import API_Browser
api_browser = API_Browser()
"""
def auth_key(self):
return Lambda_Shell().get_lambda_shell_auth()
def _invoke_shell_command(self, command, kwargs=None):
params = {'lambda_shell': {'method_name': command , 'method_kwargs': kwargs , 'auth_key': self.auth_key()}}
return self._lambda.invoke(params)
def _invoke_python_code(self, code):
code = self.api_browser_code + code
return self._lambda.shell_python_exec(code, self.auth_key())
def _invoke_python_line(self, line_of_code):
code = self.api_browser_code + "result = " + line_of_code
return self._lambda.shell_python_exec(code, self.auth_key())
def _reset_lambda(self):
Lambda_Package(self.lambda_name).reset()
@group_by
def _lambda_process_list(self):
def parse_ps_aux(raw_data):
import re
regex = re.compile('[\s]+')
lines = raw_data.split('\n')
headers = regex.split(lines.pop(0))
data = []
for line in lines:
item = {}
for index, header in enumerate(headers):
values = regex.split(line)
item[header] = Misc.array_get(values, index)
data.append(item)
return data
ps_aux = self._invoke_shell_command('list_processes')
return parse_ps_aux(ps_aux)
def _lambda_chrome_processes(self):
return self._lambda_process_list(group_by='COMMAND').get('/tmp/lambdas-dependencies/pyppeteer/headless_shell')
def _lambda_headless_shell_processes(self):
return self._lambda_process_list(group_by='COMMAND').get('[headless_shell]')
# test methods
def test_update_lambda(self):
self.result = Deploy().deploy_lambda__browser_dev(self.lambda_name)
def test_ctor(self):
assert self._invoke_python_line('api_browser.file_tmp_last_chrome_session')== '/tmp/browser-last_chrome_session.json'
assert self._invoke_python_line('api_browser.headless' ) == True
assert self._invoke_python_line('api_browser.new_browser' ) == False
assert self._invoke_python_line('api_browser.log_js_errors_to_console' ) == True
def test_load_latest_version_of_chrome(self):
self._reset_lambda() # force lambda cold start
headless_shell = '/tmp/lambdas-dependencies/pyppeteer/headless_shell'
code_file_exists = f"""
from osbot_utils.utils.Files import file_exists
result = file_exists('{headless_shell}')
"""
assert self._invoke_python_code(code_file_exists) is False # check that file doesn't exist after cold start
assert self._invoke_python_line('api_browser.load_latest_version_of_chrome()') is None # trigger download of dependency
assert self._invoke_python_code(code_file_exists) is True # check that now it exists
def test_sync__setup_browser(self):
self._reset_lambda()
code = """
from osbot_utils.utils.Files import file_contents
load_dependencies('requests,pyppeteer,websocket-client')
api_browser.sync__setup_browser()
result = api_browser.get_last_chrome_session()
"""
assert self._invoke_python_code(code).startswith('ws://127.0.0.1:')
assert len(self._lambda_chrome_processes()) == 1 # there should only be one process
def test_sync__setup_browser__new_browser__True(self):
self._reset_lambda()
code = """
from osbot_utils.utils.Files import file_contents
load_dependencies('requests,pyppeteer,websocket-client')
api_browser.set_new_browser(True)
api_browser.sync__setup_browser()
result = api_browser.new_browser
"""
self._invoke_python_code(code)
assert len(self._lambda_chrome_processes()) == 1 # after call to sync__setup_browser, there should be 1 process
self._invoke_python_code(code)
assert len(self._lambda_chrome_processes()) == 2 # now there should be 2
self._invoke_python_code(code)
assert len(self._lambda_chrome_processes()) == 3 # now there should be 3
self._reset_lambda()
#print(self._invoke_shell_command('disk_space' ))
#print(self._invoke_shell_command('list_processes'))
#print(self._invoke_shell_command('memory_usage' ))
#print(self._invoke_shell_command('file_contents' , {'path': '/var/runtime/lambda_runtime_client.py'}))
def test_sync__screenshot(self):
#self._reset_lambda()
code = """
from osbot_utils.utils.Files import file_contents
load_dependencies('requests,pyppeteer,websocket-client')
api_browser.sync__setup_browser()
#api_browser.sync__open('https://www.google.com/')
api_browser.sync__open('https://www.whatismybrowser.com/')
result = api_browser.sync__screenshot_base64()
"""
self.png_data = self._invoke_python_code(code)
#self.result = self._invoke_shell_command('list_processes')
|
<reponame>Briancbn/fleet_adapter_mir<filename>fleet_adapter_mir/fleet_adapter_mir.py<gh_stars>1-10
from rmf_fleet_msgs.msg import Location, RobotMode, RobotState
import rmf_adapter as adpt
from mir100_client.rest import ApiException
from mir100_client.models import PostMissionQueues, PostMissions, \
PostMissionActions, PutStatus
from collections import namedtuple
import threading
import urllib3
import copy
import enum
import math
__all__ = [
"MiRLocation",
"MiRState",
"MiRPositionTypes",
"MiRCommandHandle",
"MiRRetryContext"
]
###############################################################################
# TYPES
###############################################################################
MiRLocation = namedtuple("MiRLocation", ['x', 'y', 'yaw'])
class MiRState(enum.IntEnum):
READY = 3
PAUSE = 4
EXECUTING = 5
MANUAL_CONTROL = 11
ERROR = 12
class MiRPositionTypes(enum.IntEnum):
ROBOT = 0
CHARGING_STATION = 7
CHARGING_STATION_ENTRY = 8
###############################################################################
# CLASSES
###############################################################################
class MiRCommandHandle(adpt.RobotCommandHandle):
def __init__(self,
name,
node,
rmf_graph,
robot_state_update_frequency=1,
dry_run=False):
adpt.RobotCommandHandle.__init__(self)
self.name = name # Name of robot object in config yaml
self.node = node
self.dry_run = dry_run # For testing only. Disables REST calls.
self.paused = False
self.paused_path = []
# Robot State =========================================================
self.robot_state = RobotState()
self.last_robot_state_update = -1
self.robot_state_update_frequency = robot_state_update_frequency
# NOTE(CH3): This is a naively monotonically increasing task counter.
#
# There is no interface to get the task request message ID!
# Instead I am just following the behaviour from rmf_core's
# full_control adapter.
self.current_task_id = 0
self.transforms = {'rmf_to_mir': None,
'mir_to_rmf': None}
# RMF Variables =======================================================
self.rmf_updater = None
self.rmf_graph = rmf_graph
self.rmf_lane_dict = {} # Maps entry, exit to lane index
self.rmf_map_name = ""
# This is made out of RMF Plan Waypoints
self.rmf_remaining_path_waypoints = []
# NOTE(CH3): This is required for fleet state publishing
# The path is in reverse order! (i.e. [last, ... first])
# This is made out of RMF Location messages
self.rmf_robot_state_path_locations = []
# Populate lane dict
for i in range(self.rmf_graph.num_lanes):
graph_lane = self.rmf_graph.get_lane(i)
id_tuple = (graph_lane.entry.waypoint_index,
graph_lane.exit.waypoint_index)
self.rmf_lane_dict[id_tuple] = graph_lane
# RMF Location Trackers ===============================================
self.rmf_current_lane_index = None # None when moving
self.rmf_current_waypoint_index = None
self.rmf_target_waypoint_index = None # None when not moving
# RMF Execution Flags =================================================
self.rmf_docking_executed = False
self.rmf_docking_requested = False
self.rmf_path_requested = False
# MiR Variables =======================================================
self.mir_name = "" # Name of robot on MiR REST server
self.mir_missions = {} # MiR Mission Name-GUID Dict
self.mir_positions = {} # MiR Place Name-GUID Dict
self.mir_api = None # MiR REST API
self.mir_state = MiRState.PAUSE
# Thread Management ===================================================
# Path queue execution thread
self._path_following_thread = None
self._path_quit_event = threading.Event()
self._path_quit_cv = threading.Condition()
# Dock queue execution thread
self._docking_thread = None
self._docking_quit_event = threading.Event()
self._docking_quit_cv = threading.Condition()
# Start State Update Timer ============================================
self.state_update_timer = self.node.create_timer(
self.robot_state_update_frequency,
self.update_internal_robot_state
)
##########################################################################
# ROBOTCOMMANDHANDLE OVERLOADS (DO NOT CHANGE METHOD SIGNATURES)
##########################################################################
# Pause and resume are not technically overrides...
# But it's neater to put them here
def pause(self):
"""Set pause flag and hold on to any requested paths."""
self.paused = True
if self.rmf_remaining_path_waypoints:
self.node.get_logger().info(
'[PAUSE] {self.name}: Current path saved!'
)
self.paused_path = self.rmf_remaining_path_waypoints
self.rmf_remaining_path_waypoints = []
def resume(self):
"""Unset pause flag and substitute paused paths if no paths exist."""
if self.paused:
self.paused = False
if self.rmf_remaining_path_waypoints:
return
elif self.paused_path:
self.rmf_remaining_path_waypoints = self.paused_path
self.paushed_path = []
self.node.get_logger().info(
'[RESUME] {self.name}: Saved path restored!'
)
else:
return
def clear(self):
"""Clear all pending action information"""
self.rmf_remaining_path_waypoints.clear()
self.rmf_path_requested = False
self.rmf_target_waypoint_index = None
self.rmf_docking_requested = False
self.rmf_docking_executed = False
def stop(self):
"""Stop all path following and docking commands."""
self.clear()
if self._path_following_thread is not None:
self._path_quit_event.set()
self._path_quit_cv.acquire()
self._path_quit_cv.notify_all()
self._path_quit_cv.release()
self._path_following_thread.join()
self._path_following_thread = None
if self._docking_thread is not None:
self._docking_quit_event.set()
self._docking_quit_cv.acquire()
self._docking_quit_cv.notify_all()
self._docking_quit_cv.release()
self._docking_thread.join()
self._docking_thread = None
if not self.dry_run:
self.mir_api.mission_queue_delete()
old_state = self.mir_state
self.mir_state = MiRState.PAUSE
# Prevent repeat and needless logs
if (old_state != MiRState.PAUSE
and self.robot_state.mode.mode != RobotMode.MODE_IDLE):
self.node.get_logger().info(
'[ABORT] {self.name}: Robot stop called!'
)
def follow_new_path(self,
waypoints,
next_arrival_estimator, # function!
path_finished_callback):
self.stop()
self.current_task_id += 1
self.rmf_path_requested = True
# Obtain plan waypoints ===============================================
self.rmf_remaining_path_waypoints = copy.copy(waypoints)
# We reverse this list so that we can pop it instead of traversing
# it using an index (which is more Pythonic)
self.rmf_remaining_path_waypoints.reverse()
# Construct robot state path list =====================================
self.rmf_robot_state_path_locations = []
for waypoint in self.rmf_remaining_path_waypoints:
# Split timestamp into decimal and whole second portions
_sub_seconds, _seconds = math.modf(waypoint.time.timestamp())
_msg = Location()
_msg.x, _msg.y, _msg.yaw = waypoint.position
_msg.t.sec, _msg.t.nanosec = int(_seconds), int(_sub_seconds * 1e9)
self.rmf_robot_state_path_locations.append(_msg)
if not self.dry_run:
status = PutStatus(state_id=MiRState.READY)
self.mir_api.status_put(status)
def path_following_closure():
_current_waypoint = None
_next_waypoint = None
# LOOP ============================================================
# Kept alive if paused
while ((self.rmf_remaining_path_waypoints or self.paused)
or _current_waypoint):
if not self.paused: # Skipped if paused
if _current_waypoint is None:
_current_waypoint = (
self.rmf_remaining_path_waypoints.pop()
)
self.rmf_path_requested = True
waypoint_leave_msg = _current_waypoint.t
ros_waypoint_leave_time = (
waypoint_leave_msg.sec
+ waypoint_leave_msg.nanosec / 1e9
)
ros_now = self.node.get_clock().now().nanoseconds / 1e9
next_mission_wait = (ros_waypoint_leave_time - ros_now)
else:
# Prevent spinning out of control when paused
self._path_quit_cv.acquire()
self._path_quit_cv.wait(1)
self._path_quit_cv.release()
# CHECK FOR PRE-EMPT ==========================================
if self._path_quit_event.is_set():
self.clear()
self.node.get_logger().info(
'[ABORT] {self.name}: Pre-empted path following!'
)
return
# EXECUTE NEXT COMMAND ========================================
# Wait for time to leave and robot to finish moving
if (next_mission_wait <= 0
and self.mir_state == MiRState.READY
and not self.paused): # Skipped if paused
# END =====================================================
if not self.rmf_remaining_path_waypoints: # We're done!
self.rmf_path_requested = False
path_finished_callback()
self.execute_updates()
return
# ASSIGN NEXT TARGET ======================================
else:
_next_waypoint = self.rmf_remaining_path_waypoints[-1]
# Grab graph indices
if _next_waypoint.graph_index.has_value:
_next_index = _next_waypoint.graph_index.value
else:
_next_index = None
if _current_waypoint.graph_index.has_value:
_current_index = (
_current_waypoint.graph_index.value
)
else:
_current_index = None
_current_waypoint = None
# Update Internal Location Trackers ===================
# [IdleAtWaypoint -> RMF_Move]
# [IdleAtLane -> RMF_Move]
# [IdleAtUnknown -> RMF_Move]
# Set target index
self.rmf_target_waypoint_index = _next_index
# Infer and set lane index
if not self.rmf_current_lane_index:
if _current_index is not None:
self.rmf_current_lane_index = (
self.lane_dict.get((_current_index,
_next_index))
)
# Unset current index
self.rmf_current_waypoint_index = None
# SEND NEXT TARGET ========================================
_mir_pos = self.transforms['rmf_to_mir'].transform(
[_next_waypoint.position[0],
_next_waypoint.position[1]]
)
_mir_ori_rad = (
math.radians(_next_waypoint.position[2] % 360)
+ self.transforms['rmf_to_mir'].get_rotation()
)
# NOTE(CH3): MiR Location is sent in Degrees
_mir_ori = math.degrees(_mir_ori_rad % (2 * math.pi))
if _mir_ori > 180.0:
_mir_ori = _mir_ori - 360.0
elif _mir_ori <= -180.0:
_mir_ori = _mir_ori + 360.0
mir_location = MiRLocation(x=_mir_pos[0],
y=_mir_pos[1],
yaw=_mir_ori)
print(f"RMF location x:{_next_waypoint.position[0]}"
f"y:{_next_waypoint.position[1]}")
print(f'MiR location: {mir_location}')
self.queue_move_coordinate_mission(mir_location)
self.execute_updates()
continue
if not self.paused: # Skipped if paused
# Prevent spinning out of control
if next_mission_wait <= 0:
next_mission_wait = 0.1
self._path_quit_cv.acquire()
self._path_quit_cv.wait(next_mission_wait)
self._path_quit_cv.release()
self._path_quit_event.clear()
if self._path_following_thread is not None:
self._path_following_thread.join()
self._path_following_thread = threading.Thread(
target=path_following_closure
)
self._path_following_thread.start()
def dock(self, dock_name, docking_finished_callback):
"""Start thread to invoke MiR docking mission, then notify rmf_core."""
self.stop()
self.current_task_id += 1
self.rmf_docking_requested = True
self.rmf_docking_executed = False
if not self.dry_run:
status = PutStatus(state_id=MiRState.READY)
self.mir_api.status_put(status)
def dock_closure():
if not self.dry_run:
self.queue_dock_mission(dock_name)
# Check for docking complete!
while self.rmf_docking_requested:
if not self.dry_run:
api_response = self.mir_api.status_get()
self.rmf_docking_executed = (
'docking' in api_response.mission_text.lower())
else:
api_response = None
self.rmf_docking_executed = False
self.execute_updates(api_response)
# Docking completed
if not self.dry_run:
if (self.rmf_docking_executed
and api_response.state_id == MiRState.READY):
self.rmf_docking_requested = False
docking_finished_callback()
self.node.get_logger().info(
'[COMPLETE] Completed dock at: "{dock_name}"!'
)
return
else:
self.rmf_docking_requested = False
self.rmf_docking_executed = True
self.node.get_logger().info(
'[COMPLETE-DRYRUN] Completed dock at: "{dock_name}"!'
)
docking_finished_callback()
return
# Docking pre-empted
if self._docking_quit_event.is_set():
self.rmf_docking_requested = False
self.rmf_docking_executed = False
self.clear()
self.node.get_logger().info(
'[ABORT] Pre-empted dock at: "{dock_name}"!'
)
return
self._docking_quit_cv.acquire()
self._docking_quit_cv.wait(1)
self._docking_quit_cv.release()
self._docking_quit_event.clear()
if self._docking_thread is not None:
self._docking_thread.join()
self._docking_thread = threading.Thread(target=dock_closure)
self._docking_thread.start()
##########################################################################
# INIT METHODS
##########################################################################
def load_mir_missions(self):
if self.dry_run:
self.node.get_logger().info('{self.name}: DRY_RUN LOAD MISSIONS')
return
self.node.get_logger().info('{self.name}: Retrieving MiR Missions...')
robot_missions_ls = self.mir_api.missions_get()
for i in robot_missions_ls:
if i.name not in self.mir_missions:
self.mir_missions[i.name] = i
else:
if "move_coordinate" in i.name:
print("removing {}".format(i.name))
self.mir_api.missions_guid_delete(i.guid)
self.node.get_logger().info(
f'retrieved {len(self.mir_missions)} missions'
)
def load_mir_positions(self):
if self.dry_run:
self.node.get_logger().info('{self.name}: DRY_RUN LOAD POSITIONS')
return
self.node.get_logger().info('{self.name}: Retrieving MiR Positions...')
count = 0
for pos in self.mir_api.positions_get():
if (
pos.name not in self.mir_positions
or pos.guid != self.mir_positions[pos.name].guid
):
if (
pos.type_id == MiRPositionTypes.ROBOT
or pos.type_id == MiRPositionTypes.CHARGING_STATION_ENTRY
):
self.mir_positions[pos.name] = (
self.mir_api.positions_guid_get(pos.guid)
)
count += 1
self.node.get_logger().info(f'updated {count} positions')
##########################################################################
# MISSION METHODS
##########################################################################
def queue_move_coordinate_mission(self, mir_location):
"""Add a move mission to the mission queue, creating when needed."""
mission_name = ('move_coordinate_to'
f'_{mir_location.x:.3f}',
f'_{mir_location.y:.3f}',
f'_{mir_location.yaw:.3f}')
# Get mission GUID. If missing, create one and save it.
mission_id = self.missions.get(
mission_name, self.create_move_coordinate_mission(mir_location)
)
# Queue mission
try:
mission = PostMissionQueues(mission_id=mission_id)
self.mir_api.mission_queue_post(mission)
except KeyError:
self.node.get_logger().error(
'{self.name}: No mission to move coordinates to '
'[{mir_location.x:3f}_{mir_location.y:.3f}]!'
)
def create_move_coordinate_mission(self, mir_location, retries=10):
mission_name = ('move_coordinate_to'
f'_{mir_location.x:.3f}',
f'_{mir_location.y:.3f}',
f'_{mir_location.yaw:.3f}')
mission = PostMissions(
group_id='mirconst-guid-0000-0001-missiongroup',
name=mission_name,
description='automatically created by mir fleet adapter',
)
response = self.mir_api.missions_post(mission)
action = PostMissionActions(
action_type='move_to_position',
mission_id=response.guid,
parameters=[
{'id': 'x', 'value': mir_location.x},
{'id': 'y', 'value': mir_location.y},
{'id': 'orientation', 'value': mir_location.yaw},
{'id': 'retries', 'value': retries},
{'id': 'distance_threshold', 'value': 0.1},
],
priority=1
)
self.mir_api.missions_mission_id_actions_post(
mission_id=response.guid,
body=action
)
self.node.get_logger().info(
f'{self.name}: '
f'Created mission to move coordinate to "{mir_location}"'
)
# NOTE(CH3): Unsure if I should be doing this
self.missions[mission_name] = response.guid
return response.guid
def queue_dock_mission(self, dock_name):
"""Add a dock mission to the mission queue, creating when needed."""
mission_name = f'dock_to_{dock_name}'
# Get mission GUID. If missing, create one and save it.
mission_id = self.missions.get(mission_name,
self.create_dock_mission(dock_name))
# Queue mission
try:
mission = PostMissionQueues(mission_id=mission_id)
self.mir_api.mission_queue_post(mission)
except KeyError:
self.node.get_logger().error(
f'{self.name}: No mission to dock to {dock_name}!'
)
def create_dock_mission(self, dock_name):
"""Create, POST, and populate MiR docking mission, then save it."""
mission_name = f'dock_to_{dock_name}'
mission = PostMissions(
# mir const, retrieved with GET /mission_groups
group_id='mirconst-guid-0000-0001-missiongroup',
name=mission_name,
description='automatically created by mir fleet handler',
)
response = self.mir_api.missions_post(mission)
action = PostMissionActions(
action_type='docking',
mission_id=response.guid,
parameters=[
{'id': 'marker', 'value': dock_name},
],
priority=1
)
self.mir_api.missions_mission_id_actions_post(
mission_id=response.guid,
body=action
)
self.node.get_logger().info(
f'created mission to move and dock to: "{dock_name}"'
)
# NOTE(CH3): Unsure if I should be doing this
self.missions[mission_name] = response.guid
return response.guid
##########################################################################
# RMF CORE INTERACTION METHODS
##########################################################################
def get_position(self, rmf=True, api_response=None, as_dimensions=False):
"""Get MiR or RMF robot location from the MiR REST API."""
if api_response is None:
if not self.dry_run:
api_response = self.mir_api.status_get()
else:
if as_dimensions:
return [[0.0], [0.0], [0.0]]
else:
return [0.0, 0.0, 0.0]
mir_pos = [api_response.position.x, api_response.position.y]
mir_ori = api_response.position.yaw
# Output is [x, y, yaw]
if rmf:
rmf_pos = self.transforms['mir_to_rmf'].transform(mir_pos)
rmf_ori = (math.radians(mir_ori % 360)
+ self.transforms['mir_to_rmf'].get_rotation())
output = [*rmf_pos, rmf_ori]
else:
output = [*mir_pos, mir_ori]
if as_dimensions:
return [[x] for x in output]
else:
return output
# Priority...
# 1. update_position(waypoint, orientation) [At waypoint]
# 2. update_position(position, lanes) [In transit]
# 3. update_position(position, target_waypoint) [In transit, unknown lane]
# 4. update_position(map_name, position) [Lost]
def update_position(self, api_response=None):
"""Update position using the MiR status location."""
if api_response is None:
if not self.dry_run:
api_response = self.mir_api.status_get()
else:
self.rmf_updater.update_position(self.rmf_map_name,
[0.0, 0.0, 0.0])
self.node.get_logger().info("[DRYRUN] Updated Position: "
"pos: [0, 0] | ori: [0]")
return
mir_pos = [api_response.position.x, api_response.position.y]
mir_ori = api_response.position.yaw
rmf_pos = self.transforms['mir_to_rmf'].transform(mir_pos)
rmf_ori = (math.radians(mir_ori % 360)
+ self.transforms['mir_to_rmf'].get_rotation())
rmf_3d_pos = [*rmf_pos, rmf_ori]
# At waypoint
# States: (0, 1, 0)
if self.rmf_current_waypoint_index is not None:
self.rmf_updater.update_position(self.rmf_current_waypoint_index,
self.rmf_ori)
# In Transit or Idle in Lane
# States: (1, 0, 0), (1, 0, 1)
elif self.rmf_current_lane_index is not None:
self.rmf_updater.update_position(rmf_3d_pos,
self.rmf_current_lane_index)
# In Transit, Unknown Lane
# States: (0, 0, 1)
elif self.rmf_target_waypoint_index is not None: # In Unknown Lane
self.rmf_updater.update_position(rmf_3d_pos,
self.rmf_target_waypoint_index)
# Lost or MiR Commanded
# States: (0, 0, 0)
else:
self.rmf_updater.update_position(self.rmf_map_name,
rmf_3d_pos)
self.node.get_logger().info(f"Updated Position: pos: {rmf_pos} | "
f"ori: {rmf_ori}")
def update_internal_location_trackers(self):
"""Traverses the state machine to help manage robot location."""
state_tuple = (self.rmf_current_lane_index is not None,
self.rmf_current_waypoint_index is not None,
self.rmf_target_waypoint_index is not None)
# In the absence of a state, treat it as paused
if self.robot_state:
robot_mode = self.robot_state.mode.mode
else:
robot_mode = RobotMode.MODE_PAUSED
# SEMANTIC STATE INFERENCE AND ADJUSTMENT =============================
# See docs for more information on the state transitions
# MiR_Move: Non-RMF Commanded Move-To-Coordinate
# (0, 1, 0) and (1, 0 ,0) --> (0, 0, 0)
# When robot is done moving, robot will be IdleAtUnknown
if not self.rmf_path_requested and robot_mode == RobotMode.MODE_MOVING:
# Unset all
self.rmf_current_lane_index = None
self.rmf_current_waypoint_index = None
self.rmf_target_waypoint_index = None
# RMF_ReachedWaypoint -> IdleAtWaypoint
# Set current to target's value, unset target and lane
# (0, 0, 1) and (1, 0, 1) --> (0, 1, 0)
if (state_tuple == (0, 0, 1) or state_tuple == (1, 0, 1)
and robot_mode == RobotMode.MODE_IDLE
and not self.rmf_path_requested):
self.rmf_current_waypoint_index = self.rmf_target_waypoint_index
self.rmf_target_waypoint_index = None
self.rmf_current_lane_index = None
# IdleAtWaypoint/Lane/Unknown -> RMF_Move
#
# Defined in self.follow_new_path's path_following_closure
# and called during path following execution
def update_internal_robot_state(self, api_response=None):
"""Update internal robot state message. Does not publish!"""
# NOTE(CH3): You might need to use robot.mir_name depending
# on whether you want to use the config yaml name or MiR server
# name whereever the FleetState message is intended to be used
robot_state = RobotState() # Temporary msg to avoid race conditions
robot_state.name = self.name
# NOTE(CH3): Presuming model here means robot model, not sim model
robot_state.model = "MiR100"
if self.dry_run:
self.robot_state = robot_state
return
try:
if api_response is None:
api_response = self.mir_api.status_get()
now_sec, now_ns = math.modf(
self.node.get_clock().now().seconds_nanoseconds())
# Populate Location message
rmf_location = Location()
rmf_location.x, rmf_location.y, rmf_location.yaw = (
self.get_position(rmf=True, api_response=api_response)
)
rmf_location.level_name = self.rmf_map_name
# Populate RobotState message
robot_state.task_id = str(self.current_task_id)
robot_state.battery_percent = api_response.battery_percentage
robot_state.location = rmf_location
robot_state.path = self.rmf_robot_state_path_locations
robot_state.location.t.sec = now_sec
robot_state.location.t.nanosec = now_ns
if api_response.mission_text.startswith('Charging'): # Charging
robot_state.mode.mode = RobotMode.MODE_CHARGING
self.mir_state = MiRState.READY
elif api_response.state_id == MiRState.PAUSE: # Paused/Pre-empted
self.pause()
robot_state.mode.mode = RobotMode.MODE_PAUSED
self.mir_state = MiRState.PAUSE
elif (api_response.state_id == MiRState.EXECUTING # Moving
and not api_response.mission_text.startswith('Charging')):
self.resume()
robot_state.mode.mode = RobotMode.MODE_MOVING
self.mir_state = MiRState.EXECUTING
elif api_response.state_id == MiRState.READY: # Idle/Moved
self.resume()
robot_state.mode.mode = RobotMode.MODE_IDLE
self.mir_state = MiRState.READY
if self.rmf_docking_requested:
if self.rmf_docking_executed: # Docked
if api_response.state_id == MiRState.READY:
robot_state.mode.mode = RobotMode.MODE_IDLE
else: # Docking
robot_state.mode.mode = RobotMode.MODE_DOCKING
# Update internal RobotState
self.robot_state = robot_state
self.last_robot_state_update = (
self.node.get_clock().now().nanoseconds / 1e9
)
except ApiException as e:
self.node.get_logger().warn('Exception when calling '
'DefaultApi->status_get: %s\n'
% e)
##########################################################################
# INTERNAL UPDATE LOOP
##########################################################################
def execute_updates(self, api_response=None):
if api_response is None:
api_response = self.mir_api.status_get()
self.update_internal_robot_state(api_response=api_response)
self.update_internal_location_trackers()
self.update_position(api_response=api_response)
self.state_update_timer.reset()
###############################################################################
# HELPER FUNCTIONS AND CLASSES
###############################################################################
class MiRRetryContext():
"""Context to prevent race conditions during robot startup."""
def __init__(self, robot):
self.robot = robot
self.connection_pool_kw = (self.robot
.mir_api
.api_client
.rest_client
.pool_manager.connection_pool_kw)
self.orig_retries = self.connection_pool_kw.get('retries')
def __enter__(self):
retries = urllib3.Retry(10)
retries.backoff_factor = 1
retries.status_forcelist = (404,)
self.connection_pool_kw['retries'] = retries
return self.robot
def __exit__(self, exc_type, exc_value, exc_traceback):
if self.orig_retries is not None:
self.connection_pool_kw['retries'] = self.orig_retries
else:
del self.connection_pool_kw['retries']
|
<filename>gribmagic/smith/bbox.py
"""
grib_bbox.py
Copyright (C) 2020-2021 <NAME>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import dataclasses
import json
import logging
import os
import tempfile
from pathlib import Path
from typing import List
import click
from click_option_group import RequiredMutuallyExclusiveOptionGroup, optgroup
from gribmagic.smith.util import FileProcessor, ProcessingResult, json_serializer
from gribmagic.util import setup_logging
logger = logging.getLogger(__name__)
"""
This programs supports the topic "Area of interest from GRIB files".
See https://github.com/earthobservations/gribmagic/blob/main/docs/area_of_interest.rst.
"""
@dataclasses.dataclass
class BBox:
"""
This holds bounding box information.
It has to factory methods to create a bounding box
- ``from_country`` uses an ISO 2-letter country code
- ``from_coordinates`` uses a 4-tuple (lat_min, lat_max, lon_min, lon_max)
"""
latitude_min: float
latitude_max: float
longitude_min: float
longitude_max: float
@staticmethod
def from_country(country_iso: str):
"""
Create bounding box using 2-letter country code.
:param country_iso: 2-letter country code
:return: BBox instance
"""
from country_bounding_boxes import country_subunits_by_iso_code
# Lookup using "country_bounding_boxes"
# responds with (lon1, lat1, lon2, lat2) tuple.
countries = list(country_subunits_by_iso_code(country_iso))
if not countries:
raise ValueError(f"Unknown country iso code: {country_iso}")
bbox = countries[0].bbox
bbox = BBox(
latitude_min=bbox[1],
latitude_max=bbox[3],
longitude_min=bbox[0],
longitude_max=bbox[2],
)
return bbox
@staticmethod
def from_coordinates(bbox_tuple: tuple):
"""
Create bounding box using 4-tuple.
:param bbox_tuple: 4-tuple (lat_min, lat_max, lon_min, lon_max)
:return: BBox instance
"""
#
bbox = BBox(*bbox_tuple)
return bbox
def to_tuple(self, lonlat: bool = False) -> tuple:
"""
Return bounding box as 4-tuple, optionally swaps to longitude/latitude.
:param lonlat: Whether to swap to lon/lat.
:return: 4-tuple
"""
if lonlat:
# Return tuple like (lon_min, lon_max, lat_min, lat_max)
# This is needed for CDO.
bbox_tuple = (
self.longitude_min,
self.longitude_max,
self.latitude_min,
self.latitude_max,
)
else:
# Return tuple like (lat_min, lat_max, lon_min, lon_max)
bbox_tuple = dataclasses.astuple(self)
return bbox_tuple
def to_string(self, separator: str, lonlat: bool = False) -> str:
"""
Return bounding box as 4-tuple, serialized to a string using given separator.
Optionally swaps to longitude/latitude.
:param separator: Separator character to use when joining tuple elements.
:param lonlat: Whether to swap to lon/lat.
:return:
"""
bbox_tuple = self.to_tuple(lonlat=lonlat)
return separator.join(map(str, bbox_tuple))
class GRIBSubset:
"""
The main workhorse to read a number of GRIB files and
extract a subset by applying a bounding box.
It can use different methods like
- cdo-shellout
- cdo-python
- xarray
As of today, Xarray's cfgrib backend (version 0.9.8.5) can
not properly write GRIB output, so there is an option to work
around that by using netCDF.
"""
def __init__(
self,
input: List[Path],
output: Path,
bbox: BBox,
method: str,
use_netcdf: bool,
plot: bool,
dry_run: bool = False,
):
"""
Create a new GRIBSubset instance.
:param input: List of input filenames.
:param output: Output directory. If this doesn't exist, it will be created beforehand.
:param bbox: The BBox instance describing the area of interest.
:param method: One of the methods how bbox'ing will take place.
:param use_netcdf: Whether to process into netCDF.
:param plot:
"""
self.input = input
self.output = output
self.bbox = bbox
self.method = method
self.use_netcdf = use_netcdf
self.do_plot = plot
self.dry_run = dry_run
# Compute output folder.
subdirectory = f'bbox_{self.bbox.to_string("_")}'
self.outfolder = Path(self.output).joinpath(subdirectory)
def process(self) -> List[ProcessingResult]:
"""
Process all input files.
:return: List of ``ProcessingResult`` instances
"""
processor = FileProcessor(input=self.input, method=self.step)
return processor.resolve().run()
def step(self, item: ProcessingResult) -> None:
"""
Process a singe input item.
:param item:
:return:
"""
# Render GRIB.
gribfile_subgrid = self.extract_area(item.input)
item.output = gribfile_subgrid
# Render PNG.
if self.do_plot:
try:
pngfile = self.plot(gribfile_subgrid)
item.plot = pngfile
except Exception as ex:
logger.exception(f"Plotting failed: {ex}")
# TODO: Raise exception conditionally.
raise
def extract_area(self, infile: Path) -> Path:
"""
Main area subsetting method.
:param infile: Path to input file
:return: Path to output file
"""
# Prepare information about output file.
if self.use_netcdf:
folder = "netcdf"
suffix = ".nc"
else:
folder = "grib"
suffix = None
# Compute output file location.
outfolder = self.outfolder.joinpath(folder)
outfolder.mkdir(parents=True, exist_ok=True)
outfile = outfolder.joinpath(infile.name)
if suffix:
outfile = outfile.with_suffix(suffix)
if self.dry_run:
return outfile
# Apply bounding box to GRIB file.
if self.method == "cdo-shellout":
payload = self.bbox_cdo_shellout(infile)
elif self.method == "cdo-python":
payload = self.bbox_cdo_python(infile)
elif self.method == "xarray":
payload = self.bbox_xarray(infile)
# Write output file.
open(outfile, "wb").write(payload)
return outfile
def bbox_cdo_shellout(self, infile: Path) -> bytes:
"""
Apply bounding box using "cdo".
Here, we build the command ourselves.
- https://code.mpimet.mpg.de/projects/cdo/wiki/Tutorial
- https://github.com/mhaberler/docker-dwd-open-data-downloader/blob/003ab3f/extract/Makefile#L53-L62
:param infile: Path to input file
:return: Content of output file
"""
# cdo -sellonlatbox,-180,180,0,90 <infile> <outfile>
bbox_string = self.bbox.to_string(",", lonlat=True)
tmpfile = tempfile.NamedTemporaryFile()
# Compute output format.
output_format = ""
# FIXME: That would yield a netCDF file with parameter "2t" instead of "t2m".
"""
if self.use_netcdf:
output_format = "--format=nc4"
"""
command = f"cdo --eccodes --cmor {output_format} sellonlatbox,{bbox_string} '{infile}' '{tmpfile.name}'"
exitcode = os.system(command)
assert exitcode == 0, f"Invoking `cdo` failed. command={command}"
return self.to_grib_or_netcdf(tmpfile.name)
def bbox_cdo_python(self, infile: Path) -> bytes:
"""
Apply bounding box using "cdo".
Here, we use the Python wrapper.
- https://pypi.org/project/cdo/
- https://code.mpimet.mpg.de/boards/1/topics/6392
:param infile: Path to input file
:return: Content of output file
"""
import cdo
bbox_string = self.bbox.to_string(",", lonlat=True)
cdo = cdo.Cdo(logging=True, debug=False)
tmpfile = tempfile.NamedTemporaryFile()
cdo.sellonlatbox(bbox_string, input=str(infile), output=tmpfile.name)
return self.to_grib_or_netcdf(tmpfile.name)
def to_grib_or_netcdf(self, gribfile: str) -> bytes:
"""
Depending on the configuration of GRIBSubset,
either return content of GRIB file or convert
to netCDF-4 format with compression.
This is needed because the ``--format=nc4`` option of ``cdo``
would produce a netCDF-4 file with parameter "2t" instead of "t2m".
:param gribfile: Path to input GRIB file
:return: Content of output file
"""
if self.use_netcdf:
tmpfile_netcdf = tempfile.NamedTemporaryFile()
command = f"grib_to_netcdf -k 4 -d 6 -o '{tmpfile_netcdf.name}' '{gribfile}'"
os.system(command)
outfile = tmpfile_netcdf.name
else:
outfile = gribfile
return open(outfile, "rb").read()
def bbox_xarray(self, infile: Path) -> bytes:
"""
Apply bounding box using Xarray.
- https://xarray.pydata.org/en/stable/generated/xarray.Dataset.where.html
- https://stackoverflow.com/a/62209490
FIXME: Needs a patch.
Currently, Xarray will croak on indexing the Pandas datetime field
when operating on GRIB2 files.
:param infile: Path to input file
:return: Content of output file
"""
import xarray as xr
from cfgrib.xarray_to_grib import to_grib
ds = xr.open_dataset(infile, engine="cfgrib")
result: xr.Dataset = ds.where(
(ds.latitude >= self.bbox.latitude_min)
& (ds.latitude <= self.bbox.latitude_max)
& (ds.longitude >= self.bbox.longitude_min)
& (ds.longitude <= self.bbox.longitude_max),
drop=True,
)
tmpfile = tempfile.NamedTemporaryFile()
if self.use_netcdf:
result.to_netcdf(tmpfile.name)
else:
to_grib(result, tmpfile.name)
return open(tmpfile.name, "rb").read()
def plot(self, infile: Path) -> Path:
"""
Plot the outcome using ECMWF Magics.
TODO: Use custom ``magics.mmap()`` instead of
``subpage_map_area_name="central_europe"``
for better zooming into the area of interest.
:param infile: Path to input file
:return: Path to output file
"""
# Suppress banner output on STDOUT.
os.environ["MAGPLUS_QUIET"] = "true"
from Magics import macro as magics
# Compute outfile location.
outfolder = self.outfolder.joinpath("png")
outfolder.mkdir(parents=True, exist_ok=True)
outfile = outfolder.joinpath(infile.name)
outfile_real = outfile.with_suffix(".png")
if self.dry_run:
return outfile_real
# Setting of the output file name
output = magics.output(
output_name=str(outfile), output_formats=["png"], output_name_first_page_number="off"
)
# Import the data
if self.use_netcdf:
# When plotting netCDF, the variable name has to be given.
netcdf_variable = get_netcdf_main_variable(infile)
data = magics.mnetcdf(
netcdf_filename=str(infile), netcdf_value_variable=netcdf_variable
)
else:
data = magics.mgrib(grib_input_file_name=str(infile))
# Apply an automatic styling
contour = magics.mcont(contour_automatic_setting="ecmwf")
coast = magics.mcoast()
# Select area by coordinates
# https://github.com/ecmwf/notebook-examples/blob/master/visualisation/tutorials/Subpage-Projections.ipynb
projection = magics.mmap(
subpage_map_library_area="on",
subpage_map_area_name="central_europe",
page_id_line="off",
)
"""
projection = magics.mmap(
subpage_map_projection="cylindrical",
subpage_lower_left_latitude=bbox[1] + 15,
subpage_lower_left_longitude=bbox[0] - 15,
subpage_upper_right_latitude=bbox[3] + 15,
subpage_upper_right_longitude=bbox[2] - 15,
)
"""
# magics.plot(output, data, contour, projection, coast)
# magics.plot(output, projection, coast)
magics.plot(output, projection, data, contour, coast)
return outfile_real
def get_netcdf_main_variable(filename: str) -> str:
"""
Return first variable from netCDF file.
This is usually what you want.
Examples:
>>> f.variables.keys()
dict_keys(['t2m', 'time', 'step', 'heightAboveGround', 'latitude', 'longitude', 'valid_time'])
>>> f.variables.keys()
dict_keys(['u', 'time', 'step', 'isobaricInhPa', 'latitude', 'longitude', 'valid_time'])
:param filename:
:return:
"""
import netCDF4
nc = netCDF4.Dataset(filename)
first_variable = list(nc.variables.keys())[0]
nc.close()
return first_variable
@click.command(
help="""
Extract area of interest from GRIB files using a bounding box.
INPUT can be a single file or a list of files.
For specifying the area of interest, either use "--country" or "--bbox".
"""
)
@click.argument("input", type=click.Path(file_okay=True, dir_okay=True), required=True, nargs=-1)
@click.option(
"--output",
envvar="GM_DATA_PATH",
type=click.Path(exists=False, file_okay=False, dir_okay=True),
help="The output directory",
required=True,
)
@optgroup.group("area", cls=RequiredMutuallyExclusiveOptionGroup, help="The area of interest")
@optgroup.option("--country", type=str, help="The country ISO code to derive a bounding box")
@optgroup.option(
"--bbox",
type=click.Tuple([float, float, float, float]),
nargs=4,
help="The bounding box. Use a space-separated list of 'lat_min lat_max lon_min lon_max'",
default=(None, None, None, None),
)
@click.option(
"--method",
type=click.Choice(["cdo-shellout", "cdo-python", "xarray"], case_sensitive=False),
help="Which bbox method to use, defaults to cdo-shellout",
required=False,
default="cdo-shellout",
)
@click.option("--use-netcdf", is_flag=True, help="Whether to use netCDF", required=False)
@click.option("--plot", is_flag=True, help="Whether to produce png plots", required=False)
@click.option(
"--dry-run", is_flag=True, help="Whether to simulate processing", required=False, default=False
)
def main(
input: List[Path],
output: Path,
country: str,
bbox: tuple,
method: str,
use_netcdf: bool,
plot: bool,
dry_run: bool,
):
# Setup logging.
setup_logging(level=logging.INFO)
# Create bounding box from selected area of interest.
if country:
bbox = BBox.from_country(country)
elif bbox:
bbox = BBox.from_coordinates(bbox)
logger.info(f"Using bounding box {bbox}")
# Invoke the machinery.
subgrid = GRIBSubset(
input=input,
output=output,
bbox=bbox,
method=method,
use_netcdf=use_netcdf,
plot=plot,
dry_run=dry_run,
)
results = subgrid.process()
# Report about the outcome.
print(json.dumps(results, default=json_serializer, indent=4))
if __name__ == "__main__": # pragma: nocover
main()
|
<reponame>PseudoSky/rpcs-sensors<filename>testbit_keys2.py
#teOfSleep': u'2016-04-'dateOfSleep': u'2016-04-V1 - Minute by minute data
#V2 - Summary data added
#!/usr/bin/env python
import fitbit
import pprint
import serial
import requests
from datetime import datetime, timedelta
import json
import cherrypy
import os
import sys
import threading
import traceback
import webbrowser
from base64 import b64encode
from fitbit.api import FitbitOauth2Client
from oauthlib.oauth2.rfc6749.errors import MismatchingStateError, MissingTokenError
from requests_oauthlib import OAuth2Session
class OAuth2Server:
def __init__(self, client_id, client_secret,
redirect_uri='http://1172.16.17.32:8080/'):
""" Initialize the FitbitOauth2Client """
self.redirect_uri = redirect_uri
self.success_html = """
<h1>You are now authorized to access the Fitbit API!</h1>
<br/><h3>You can close this window</h3>"""
self.failure_html = """
<h1>ERROR: %s</h1><br/><h3>You can close this window</h3>%s"""
self.oauth = FitbitOauth2Client(client_id, client_secret)
def browser_authorize(self):
"""
Open a browser to the authorization url and spool up a CherryPy
server to accept the response
"""
url, _ = self.oauth.authorize_token_url(redirect_uri=self.redirect_uri)
# Open the web browser in a new thread for command-line browser support
threading.Timer(1, webbrowser.open, args=(url,)).start()
cherrypy.quickstart(self)
@cherrypy.expose
def index(self, state, code=None, error=None):
"""
Receive a Fitbit response containing a verification code. Use the code
to fetch the access_token.
"""
error = None
if code:
try:
self.oauth.fetch_access_token(code, self.redirect_uri)
except MissingTokenError:
error = self._fmt_failure(
'Missing access token parameter.</br>Please check that '
'you are using the correct client_secret')
except MismatchingStateError:
error = self._fmt_failure('CSRF Warning! Mismatching state')
else:
error = self._fmt_failure('Unknown error while authenticating')
# Use a thread to shutdown cherrypy so we can return HTML first
self._shutdown_cherrypy()
return error if error else self.success_html
def _fmt_failure(self, message):
tb = traceback.format_tb(sys.exc_info()[2])
tb_html = '<pre>%s</pre>' % ('\n'.join(tb)) if tb else ''
return self.failure_html % (message, tb_html)
def _shutdown_cherrypy(self):
""" Shutdown cherrypy in one second, if it's running """
if cherrypy.engine.state == cherrypy.engine.states.STARTED:
threading.Timer(1, cherrypy.engine.exit).start()
#Constants
#Need to run 'gatherkeys_oauth' to get USER KEY and USER SECRET
CLIENT_KEY = '<KEY>'
CLIENT_SECRET = '2a0f3ad96095957db6e13687ebb2687d'
#The first date I used Fitbit
FirstFitbitDate = "2016-04-20"
#Determine how many days to process for.
def CountTheDays():
#See how many days there's been between today and my first Fitbit date.
now = datetime.now() #Todays date
print now
FirstDate = datetime.strptime(FirstFitbitDate,"%Y-%m-%d") #First Fitbit date as a Python date object
#FirstDate = FirstFitbitDate
#Calculate difference between the two and return it
return abs((now - FirstDate).days)
#Post to the BEC server
#Produce a date in yyyy-mm-dd format that is n days before today's date (where n is a passed parameter)
def ComputeADate(DaysDiff):
#Get today's date
now = datetime.now()
#Compute the difference betwen now and the day difference paremeter passed
DateResult = now - timedelta(days=DaysDiff)
#DateResult = now
return DateResult.strftime("%Y-%m-%d")
arg = ["227PRQ", CLIENT_SECRET]
server = OAuth2Server(*arg)
server.browser_authorize()
print('FULL RESULTS = %s' % server.oauth.token)
print('ACCESS_TOKEN = %s' % server.oauth.token['access_token'])
print('REFRESH_TOKEN = %s' % server.oauth.token['refresh_token'])
host = {
"ip":"172.16.58.3",
"port": 50000,
"endpoint":"values"
}
def get_vals(host):
r = requests.get("http://{}:{}/{}".format(host["ip"],host["port"],host["endpoint"]))
print r.text
return r.json()
def post(host, data):
r = requests.post("http://{}:{}/{}".format(host["ip"],host["port"],host["endpoint"]), data=data)
print r.status_code,r.reason,r.text
return r.text
#Get a client
authd_client = fitbit.Fitbit(CLIENT_KEY, CLIENT_SECRET, access_token = server.oauth.token['access_token'], refresh_token = server.oauth.token['refresh_token'])
#Find out how many days to compute for
DayCount = CountTheDays()
# Make directories for Fitbit
hr_dict = {};
steps_dict = {};
awakeCount = {};
awakeDuration ={};
#Process each one of these days stepping back in the for loop and thus stepping up in time
for i in range(DayCount,-1,-1):
#Get the date to process
DateForAPI = ComputeADate(i)
#Tell the user what is happening
print 'Processing this date: ' + DateForAPI
#Get a Fitbit data call
# Request a fitbit data call from the server about a specific sensor
fitbit_heart = authd_client.intraday_time_series('activities/heart', base_date=DateForAPI, detail_level='1sec'); #works correctly, yay!!
fitbit_steps = authd_client.intraday_time_series('activities/steps', base_date=DateForAPI, detail_level='1min');
fitbit_sleep = authd_client._COLLECTION_RESOURCE('sleep', DateForAPI);
#print fitbit_hear
# Print to terminal
#print fitbit_act
pp = pprint.PrettyPrinter(indent=4)
#pp.pprint(fitbit_sleep)
#pp.pprint(fitbit_act)
pp.pprint(fitbit_heart)
#f=open('datadumpHeart.json', 'w')
dict = json.loads(json.dumps(fitbit_heart))
dict1 = dict.get('activities-heart')
dict = json.loads(json.dumps(dict1[0]))
dict1 = dict.get('value')
rest_hr = dict1.get('restingHeartRate')
hr_dict[DateForAPI] = rest_hr
dict = json.loads(json.dumps(fitbit_steps))
dict1 = dict.get('activities-steps')
dict = json.loads(json.dumps(dict1[0]))
dict1 = dict.get('value')
dict1 = str(dict1)
dict1 = dict1.replace("u'","")
steps = dict1.replace("'", "")
#steps_dict[DateForAPI] = int(dict1)
hr_rate = {"user_id": "571b97467391f8524f9d96fc", "sensor_id": "resting-heart-rate", "value": str(rest_hr) }
steps_count = {"user_id": "571b97467391f8524f9d96fc", "sensor_id": "steps", "value": str(steps) }
print hr_rate
print steps_count
# Post to the BEC server
post(host,hr_rate)
post(host,steps_count)
print "DATA POSTED TO BEC SERVER!"
print "Heart rate"
print hr_dict
print "Steps"
print steps_dict
|
<gh_stars>100-1000
from .constants import GROUPBY_SEASONALITIES
OPTIONS = {
"seasonality": "month",
"warn_for_failed_PredictionEnsemble_xr_call": True,
"warn_for_rename_to_climpred_dims": True,
"warn_for_init_coords_int_to_annual": True,
"climpred_warnings": True,
} # defaults
_SEASONALITY_OPTIONS = frozenset(GROUPBY_SEASONALITIES)
_VALIDATORS = {
"seasonality": _SEASONALITY_OPTIONS.__contains__,
"warn_for_PredictionEnsemble_xr_call": lambda choice: choice
in [True, False, "default"],
"warn_for_rename_to_climpred_dims": lambda choice: choice
in [True, False, "default"],
"warn_for_init_coords_int_to_annual": lambda choice: choice
in [True, False, "default"],
"climpred_warnings": lambda choice: choice in [True, False, "default"],
}
class set_options:
"""Set options for climpred in a controlled context. Analogous to
`xarray.set_options(**option) <http://xarray.pydata.org/en/stable/generated/xarray.set_options.html>`_.
Currently supported options:
- ``seasonality``
- Attribute to group dimension ``groupby(f"{dim}.{seasonality}"")``.
Used in ``reference=climatology`` and
:py:meth:`~climpred.classes.HindcastEnsemble.remove_bias`.
- Allowed: [``"dayofyear"``, ``"weekofyear"``, ``"month"``, ``"season"``]
- Default: ``dayofyear``.
- ``warn_for_failed_PredictionEnsemble_xr_call``
- Raise UserWarning when PredictionEnsemble.xr_call,
e.g. ``.sel(lead=[1])`` fails on one of the datasets.
- Allowed: [True, False]
- Default: True
- ``warn_for_rename_to_climpred_dims``
- Raise UserWarning when dimensions are renamed to ``CLIMPRED_DIMS`` when
PredictionEnsemble is instantiated.
- Allowed: [True, False]
- Default: True
- ``warn_for_init_coords_int_to_annual``
- Raise UserWarning when ``init`` coordinate is of type integer and gets
converted to annual cftime_range when PredictionEnsemble is instantiated.
- Allowed: [True, False]
- Default: True
- ``climpred_warnings``
- Overwrites all options containing ``"*warn*"``.
- Allowed: [True, False]
- Default: True
Examples:
You can use ``set_options`` either as a context manager:
>>> kw = dict(metric='mse', comparison='e2o', dim='init',
... alignment='same_verifs', reference='climatology')
>>> with climpred.set_options(seasonality='month'):
... HindcastEnsemble.verify(**kw).SST.sel(skill='climatology')
<xarray.DataArray 'SST' (lead: 10)>
array([0.03712573, 0.03712573, 0.03712573, 0.03712573, 0.03712573,
0.03712573, 0.03712573, 0.03712573, 0.03712573, 0.03712573])
Coordinates:
* lead (lead) int32 1 2 3 4 5 6 7 8 9 10
skill <U11 'climatology'
Or to set global options:
>>> climpred.set_options(seasonality='month') # doctest: +ELLIPSIS
<climpred.options.set_options object at 0x...>
"""
def __init__(self, **kwargs):
self.old = {}
for k, v in kwargs.items():
if k not in OPTIONS:
raise ValueError(
"argument name %r is not in the set of valid options %r"
% (k, set(OPTIONS))
)
if k in _VALIDATORS and not _VALIDATORS[k](v):
if k == "seasonality":
expected = f"Expected one of {_SEASONALITY_OPTIONS!r}"
else:
expected = ""
raise ValueError(
f"option {k!r} given an invalid value: {v!r}. " + expected
)
self.old[k] = OPTIONS[k]
self._apply_update(kwargs)
def _apply_update(self, options_dict):
if (
"climpred_warnings" in options_dict
): # climpred_warnings == False overwrites all warnings options
if not options_dict["climpred_warnings"]:
for k in [o for o in OPTIONS.keys() if "warn" in o]:
options_dict[k] = False
OPTIONS.update(options_dict)
def __enter__(self):
return
def __exit__(self, type, value, traceback):
self._apply_update(self.old)
|
<reponame>drmidnightytb/InstaBot
## Tabela de cores ANSI (Python) ##
# fonte #
Mblack = '\033[1;30m' # Preto
Ired = '\033[1;31m' # Vermelho
Dgreen = '\033[1;32m' # Verde
Nyellow = '\033[1;33m' # Amarelo
Iblue = '\033[1;34m' # Azul
Gpurple = '\033[1;35m' # Roxo
Hcyan = '\033[1;36m' # Ciano
Twhite = '\033[1;37m' # Branco
VRCRM = '\033[0;0m' # Remover
import os
import requests
error = f'{Twhite}[{Ired}ERROR{Twhite}]';
warning = f'{Twhite}[{Nyellow}!{Twhite}]';
info = f'{Twhite}[{Dgreen}i{Twhite}]'
result = os.popen('figlet INSTA-BOT').read()
os.system('clear')
print(f'Bot Stalker para Instagram by Dr Midnight')
def clear():
os.system('cls' if os.name == 'nt' else 'clear')
def restart():
python = sys.executable
os.execl(python, python, *sys.argv)
import os, sys, time, json, subprocess, platform
try:
import instaloader
from instabot import Bot
except:
install = input(
f'{Twhite}{Dgreen}[i]{Twhite} Ola! Vejo que esta é sua primeira vez aqui...'
f'\nDeseja instalar o software necessário?\n1-Sim\n2-Não\n_').strip().upper()[0]
if install == 'S' or install == '1':
os.system("apt install figlet -y")
os.system('python3 -m pip install --upgrade pip')
os.system('pip install instaloader && pip install instabot')
clear()
else:
print(f'Ok... Tente realizar a instalação manual ou Adeus');
exit()
restart()
try:
from database import banner, download_posts, download_profile, list_followers, publish_photo
except Exception as error:
print(f'{Twhite}{Ired}[*]{Twhite} Erro: ' + error)
exit()
def dialog(text='', tiled='='):
clear();
print(os.popen('figlet INSTA-BOT').read())
text = text.split('\n')
maior = 0
for txt in text:
tamanho = len(txt)
if tamanho > maior:
maior = tamanho
print(str(Twhite) + str(Dgreen) + tiled + tiled + tiled * maior + tiled + tiled + str(Twhite))
for txt in text:
print(str(warning) + ' ' + txt)
print(str(Twhite) + str(Dgreen) + tiled + tiled + tiled * maior + tiled + tiled + str(Twhite))
time.sleep(3)
def error_dialog(text='', tiled='='):
clear();
print(os.popen('figlet INSTA-BOT').read())
text = text.split('\n')
maior = 0
for txt in text:
tamanho = len(txt)
if tamanho > maior:
maior = tamanho
print(str(Twhite) + str(Ired) + tiled * 8 + tiled * maior + tiled * 8 + str(Twhite))
for txt in text:
print(str(error) + ' ' + txt + ' ' + str(error))
print(str(Twhite) + str(Ired) + tiled * 8 + tiled * maior + tiled * 8 + str(Twhite))
time.sleep(3)
requests = requests.Session();result = os.popen('figlet INSTA-BOT').read()
try:
if __name__ == '__main__':
dialog('Buscando atualizações ...')
update = subprocess.check_output('git pull', shell=True)
if 'Already up to date' not in update.decode():
dialog('Atualização instalada.\nReiniciando o bot.')
restart()
else:
print(f'{Twhite}[{Nyellow}i{Twhite}] Nenhuma atualização disponivel.')
time.sleep(2)
except:
if os.path.exists('.git'):
pass
else:
error_dialog('Falta de repositório GIT local')
try:
subprocess.check_output('apt update -y', shell=True)
os.system("apt install figlet curl -y")
except:
os.system("pacman -Sy figlet curl")
Sair = False
while Sair == False:
try:
banner.menu()
opc = int(input(f'{Dgreen}Digite o numero da opção que deseja: \n>>>'))
except:
error_dialog('Caracteres não reconhecidos');
opc = None
clear()
if opc == 1: # PHOTO/INFO PROFILE
download_profile.start()
# elif opc == 2: # EXPLORANDO PERFIS [ OFF ]
# dr.midnight()
elif opc == 3: # LISTANDO SEGUIDORES INSTAGRAM
list_followers.start()
elif opc == 4: # BAIXAR POSTS DO INSTAGRAM
download_posts.start()
elif opc == 5: # PUBLICAR FOTO INSTAGRAM
publish_photo.start()
# elif opc == 6: # INSTA STALKER [ OFF ]
# vrc.goxtoso()
elif opc == 7: # Atualizar painel
os.popen('cd database && bash update.sh');
dialog('Reiniciando o painel...');
restart()
elif opc == 8: # Sair
Sair = True
elif opc == 9: # Criador
os.system('termux-open-url https://wa.me/5512988789266')
elif opc == 10: # Grupo
os.system('termux-open-url https://discord.gg/kgXhZzGJDY')
elif opc == None:
pass
else:
error_dialog('Opção incorreta')
|
# coding:utf-8
#!/usr/bin/python
#
# Copyright (c) Contributors to the Open 3D Engine Project.
# For complete copyright and license terms please see the LICENSE at the root of this distribution.
#
# SPDX-License-Identifier: Apache-2.0 OR MIT
#
#
# -- This line is 75 characters -------------------------------------------
from __future__ import unicode_literals
# from builtins import str
# A patch to make this module work in Python3 (hopefully still works in Py27)
try:
unicode = unicode
except NameError:
# 'unicode' is undefined, must be Python3
str = str
unicode = str
bytes = bytes
basestring = (str, bytes)
else:
# 'unicode' exists, must be Python2
str = str
unicode = unicode
bytes = str
basestring = basestring
# -------------------------------------------------------------------------
'''
Module Documentation:
DccScriptingInterface\\azpy\\shared\\common\\core_utils.py
A set of utility functions
<to do: further document this module>
To Do:
ATOM-5859
'''
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# built in's
import os
import sys
import site
import fnmatch
# 3rd Party
from pathlib import Path
# from progress.spinner import Spinner # deprecate use (or refactor)
# Lumberyard extensions
from azpy.constants import *
from azpy import initialize_logger
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# global space debug flag
from azpy.env_bool import env_bool
from azpy.constants import ENVAR_DCCSI_GDEBUG
from azpy.constants import ENVAR_DCCSI_DEV_MODE
# global space
_G_DEBUG = env_bool(ENVAR_DCCSI_GDEBUG, False)
_DCCSI_DEV_MODE = env_bool(ENVAR_DCCSI_DEV_MODE, False)
_PACKAGENAME = __name__
if _PACKAGENAME is '__main__':
_PACKAGENAME = 'azpy.shared.common.core_utils'
import azpy
_LOGGER = azpy.initialize_logger(_PACKAGENAME)
_LOGGER.debug('Invoking __init__.py for {0}.'.format({_PACKAGENAME}))
# -------------------------------------------------------------------------
# --------------------------------------------------------------------------
def gather_paths_of_type_from_dir(in_path=str('c:\\'),
extension=str('*.py'),
return_path_list=list()):
'''Walks from in_path and returns list of directories that contain the
file type matching the extension'''
# recursive function for finding paths
dir_contents = os.listdir(in_path)
complete = False
while complete != True:
found = None
for item in dir_contents:
# found a dir to search
if os.path.isdir((in_path + "/" + item)):
return_path_list = gather_paths_of_type_from_dir((in_path + "/" + item),
extension,
return_path_list)
# found a path
elif os.path.isfile:
if fnmatch.fnmatch(item, extension):
found = True
if found:
return_path_list.append(dir_trim_following_slash(to_unix_path(os.path.abspath(in_path))))
complete = True
return return_path_list
# --------------------------------------------------------------------------
# ------------------------------------------------------------------------
def dir_trim_following_slash(current_path_str):
'''removes the trailing slash from a path str'''
safe_path = current_path_str
if current_path_str.endswith('/'):
safe_path = current_path_str[0:-1]
if current_path_str.endswith('\\'):
safe_path = current_path_str[0:-1]
return safe_path
# --------------------------------------------------------------------------
# ------------------------------------------------------------------------
def to_unix_path(current_path_str):
'''converts path string to use unix slashes'''
_LOGGER.debug('to_unix_path({0})'.format(current_path_str))
safe_path = str(current_path_str).replace('\\', '/')
return safe_path
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
def we_are_frozen():
# All of the modules are built-in to the interpreter, e.g., by py2exe
return hasattr(sys, "frozen")
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
def module_path():
encoding = sys.getfilesystemencoding()
if we_are_frozen():
return os.path.dirname(unicode(sys.executable, encoding))
return os.path.dirname(__file__)
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
def get_stub_check_path(in_path, check_stub='engineroot.txt'):
'''
Returns the branch root directory of the dev\'engineroot.txt'
(... or you can pass it another known stub)
so we can safely build relative filepaths within that branch.
If the stub is not found, it returns None
'''
path = Path(in_path).absolute()
while 1:
test_path = Path(path, check_stub)
if test_path.is_file():
return Path(test_path)
else:
path, tail = (path.parent, path.name)
if (len(tail) == 0):
return None
# --------------------------------------------------------------------------
# -------------------------------------------------------------------------
def reorder_sys_paths(known_sys_paths):
"""Reorders new directories to the front"""
sys_paths = list(sys.path)
new_sys_paths = []
for item in sys_paths:
item = Path(item)
if str(item).lower() not in known_sys_paths:
new_sys_paths.append(item)
sys_paths.remove(str(item))
sys.path[:0] = new_sys_paths
known_sys_paths = site._init_pathinfo()
return known_sys_paths
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
def check_path_exists(in_path):
"""Will bark if path does not exist"""
check = os.path.exists(in_path)
if check:
_LOGGER.info('~ Path EXISTS: {0}\r'.format(in_path))
else:
_LOGGER.info('~ Path does not exist: {0}\r'.format(in_path))
return check
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
def path_split_all(in_path):
'''Splits the in_path to all of it's parts'''
all_path_parts = []
while 1:
parts = os.path.split(path)
if parts[0] == path: # sentinel for absolute paths
all_path_parts.insert(0, parts[0])
break
elif parts[1] == path: # sentinel for relative paths
all_path_parts.insert(0, parts[1])
break
else:
path = parts[0]
all_path_parts.insert(0, parts[1])
return all_path_parts
# -------------------------------------------------------------------------
# --------------------------------------------------------------------------
def synthetic_property(inst, name, value, read_only=False):
'''
This is a convenience method for OOP
synthesizes the creation of property attr with convenience methods:
x.attrbute # the @property (and setter)
x._attribute # attribute storage (private)
x.getAttribute() # retreive attribute
x.setAttribute() # set attribute (only created if not 'read only')
x.delAttribute() # delete the attribute from object
'''
cls = type(inst)
storage_name = '_{0}'.format(name)
getter_name = 'get{0}{1}'.format(name[0].capitalize(), name[1:])
setter_name = 'set{0}{1}'.format(name[0].capitalize(), name[1:])
deleter_name = 'del{0}{1}'.format(name[0].capitalize(), name[1:])
setattr(inst, storage_name, value)
# We always define the getter
def custom_getter(self):
return getattr(self, storage_name)
# Add the Getter
if not hasattr(inst, getter_name):
setattr(cls, getter_name, custom_getter)
# Handle Read Only
if read_only:
if not hasattr(inst, name):
setattr(cls, name, property(fget=getattr(cls, getter_name, None)
or custom_getter,
fdel=getattr(cls, getter_name, None)))
else:
# We only define the setter if we aren't read only
def custom_setter(self, state):
setattr(self, storage_name, state)
if not hasattr(inst, setter_name):
setattr(cls, setter_name, custom_setter)
member = None
if hasattr(cls, name):
# we need to try to update the property fget, fset,
# fdel incase the class has defined its own custom functions
member = getattr(cls, name)
if not isinstance(member, property):
raise ValueError('Member "{0}" for class "{1}" exists and is not a property.'
''.format(name, cls.__name__))
# Regardless if the class has the property or not we still try to set it with
setattr(cls, name, property(fget=getattr(member, 'fget', None)
or getattr(cls, getter_name, None)
or custom_getter,
fset=getattr(member, 'fset', None)
or getattr(cls, setter_name, None)
or custom_setter,
fdel=getattr(member, 'fdel', None)
or getattr(cls, getter_name, None)))
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
def find_arg(arg_pos_index=None, arg_tag=None, remove_kwarg=None,
in_args=None, in_kwargs=None, default_value=None):
"""
# finds and returns an arg...
# if a positional index is given arg_pos_index=0, it checks args first
# if a arg_tag is given, it checks kwargs
# If remove_kwarg=True, it will remove the found arg from kwargs
# * I actually want/need to do this often
#
# a set kwarg will ALWAYS take precident over positional arg!!!
#
# return outArg, args, kwargs <-- get back modified kwargs!
#
# proper usage:
#
# found_arg, args, kwargs = find_arg(0, 'name',)
"""
if arg_pos_index != None:
if not isinstance(arg_pos_index, int):
raise TypeError('remove_kwarg: accepts a index integer!\r'
'got: {0}'.format(remove_kwarg))
# positional args ... check the position
if len(in_args) > 0:
try:
found_arg = in_args[arg_pos_index]
except:
pass
# check kwargs ... a set kwarg will ALWAYS take precident over
# positional arg!!!
try:
found_arg
except:
found_arg = in_kwargs.get(arg_tag, default_value) # defaults to None
if remove_kwarg:
if arg_tag in in_kwargs:
del in_kwargs[arg_tag]
# if we didn't find the arg/kwarg, the defualt return will be None
return found_arg, in_kwargs
# -------------------------------------------------------------------------
# --------------------------------------------------------------------------
def set_synth_arg_kwarg(inst, arg_pos_index, arg_tag, in_args, in_kwargs,
remove_kwarg=True, default_value=None, set_anyway=True):
"""
Uses find_arg and sets a property on a object.
Special args:
set_anyway <-- if the object has the property already, set it
"""
# find the argument, or set to default value
found_arg, in_kwargs = find_arg(arg_pos_index, arg_tag, remove_kwarg,
in_args, in_kwargs,
default_value)
# make sure the object doesn't arealdy have this property
try:
hasattr(inst, arg_tag) # <-- check if property exists
if set_anyway:
try:
setattr(inst, arg_tag, found_arg) # <-- try to set
except Exception as e:
raise e
except:
try:
found_arg = synthetic_property(inst, arg_tag, found_arg)
except Exception as e:
raise e
return found_arg, in_kwargs
# --------------------------------------------------------------------------
# -------------------------------------------------------------------------
def walk_up_dir(in_path, dir_tag='foo'):
'''
Mimic something like os.walk, but walks up the directory tree
Walks Up from the in_path looking for a dir with the name dir_tag
in_path: the path to start in
dir_tag: the name of diretory above us we are looking for
returns None if the directory named dir_tag is not found
'''
path = Path(__file__).absolute()
while 1:
# hmmm, will this break on unix paths?
# what about case sensitivity?
dir_base_name = path.norm_case().name()
if (dir_base_name == dir_tag):
break
path, tail = (path.parent(), path.name())
if (len(tail) == 0):
return None
return path
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
def return_stub(stub):
'''Take a file name (stub) and returns the directory of the file (stub)'''
# to do: refactor to pathlib.Path
from unipath import Path
dir_last_file = None
if dir_last_file is None:
path = Path(__file__).absolute()
while 1:
path, tail = (path.parent, path.name)
newpath = Path(path, stub)
if newpath.isfile():
break
if (len(tail) == 0):
path = ""
_LOGGER.debug('~ Debug Message: I was not able to find the '
'path to that file (stub) in a walk-up from currnet path')
break
dir_last_file = path
return dir_last_file
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# direct call for testing methods functions
if __name__ == "__main__":
'''To Do: Document'''
# constants for shared use.
_G_DEBUG = True
# happy _LOGGER.info
_LOGGER.info("# {0} #".format('-' * 72))
_LOGGER.info('~ constants.py ... Running script as __main__')
_LOGGER.info("# {0} #\r".format('-' * 72))
cwd = Path(os.getcwd())
# This grabs pythons known paths
_KNOWN_SITEDIR_PATHS = list(sys.path) # this appears to give me a somehow malformed syspath?
_KNOWN_SITEDIR_PATHS = site._init_pathinfo()
# this is just a debug developer convenience _LOGGER.info (for testing acess)
if _G_DEBUG:
import pkgutil
_LOGGER.info('Current working dir: {0}'.format(cwd))
search_path = ['.'] # set to None to see all modules importable from sys.path
all_modules = [x[1] for x in pkgutil.iter_modules(path=search_path)]
_LOGGER.info('All Available Modules in working dir: {0}\r'.format(all_modules))
# test toUnixPath
# assumes the current working directory (cwd) is <ly>\\dev\\Gems\\DccScriptingInterface
test_path = Path(cwd, 'LyPy', 'si_shared', 'common', 'core_utils.py')
safeTest = to_unix_path(test_path)
_LOGGER.info("Unix format: '{0}'".format(safeTest))
_LOGGER.info('')
# test dirTrimFollowingSlash
short_path = Path(cwd)
_LOGGER.info("Original: '{0}'".format(short_path))
short_path = to_unix_path(short_path)
_LOGGER.info("Unix: '{0}'".format(short_path))
trimTest = dir_trim_following_slash(short_path)
_LOGGER.info("Trimmed: '{0}'".format(trimTest))
_LOGGER.info('')
# test gather_paths_of_type_from_dir
extTest = '*.py'
fileList = gather_paths_of_type_from_dir(os.getcwd(), extTest, use_spinner=True)
_LOGGER.info('Found {0}: {1}'.format(extTest, len(fileList)))
_LOGGER.info('')
# test weAreFrozen
# none
# test modulePath
modulePathTest = module_path()
_LOGGER.info("This Module: '{0}'".format(modulePathTest))
modulePathTest = to_unix_path(modulePathTest)
_LOGGER.info("This module unix: '{0}'".format(modulePathTest))
_LOGGER.info('')
# test checkstub_getpath
stubTest = get_stub_check_path(__file__)
_LOGGER.info("Stub Path: '{0}'".format(stubTest))
# reorderSysPaths test
pkgTestPath = Path(cwd, 'LyPy', 'si_shared', 'packagetest')
site.addsitedir(pkgTestPath)
anotherTestPath = Path(cwd, 'LyPy', 'si_shared', 'dev')
site.addsitedir(anotherTestPath)
# pass in the previous list we retreived earlier
_KNOWN_SITEDIR_PATHS = reorder_sys_paths(_KNOWN_SITEDIR_PATHS) # I think this is broken,
# I get back this as one of the paths:
# G:\depot\gallowj_PC1_lrgWrlds\dev\Gems\DccScriptingInterface\Shared\Python\LyPyCommon\PYTHONPATH
_LOGGER.info('done')
pass
# checkPathExists test
# pathSplitAll test
# walkUp test
# test synthesize
# to do: write test
# test findArg
# to do: write test
# test setSynthArgKwarg
# to do: write test
|
from asmdot import * # pylint: disable=W0614
@handle_command_line()
class NimEmitter(Emitter):
@property
def language(self):
return 'nim'
@property
def filename(self):
return f'asmdot/private/{self.arch}.nim'
@property
def test_filename(self):
return f'test/test{self.arch}.nim'
def get_operator(self, op: Operator) -> str:
dic = {
OP_BITWISE_AND: 'and',
OP_BITWISE_OR : 'or',
OP_BITWISE_XOR: 'xor',
OP_AND: 'and',
OP_OR : 'or',
OP_XOR: 'xor',
OP_SHL: 'shl',
OP_SHR: 'shr'
}
if op in dic:
return dic[op]
else:
return op.op
def get_function_name(self, function: Function) -> str:
if function.initname in ('and', 'div', 'or', 'xor'):
return function.initname.capitalize()
else:
return function.initname
def get_builtin_name(self, builtin: Builtin) -> str:
if builtin is BUILTIN_X86_PREFIX:
return 'getPrefix'
else:
return builtin.name
def write_footer(self):
self.writeline('proc assemble*(buf: var seq[byte], opcode: string, params: varargs[Any]): bool =')
self.indent += 1
self.writelinei('return false')
# for fun in self.functions:
# args = ', '.join([ f'' for name, ])
self.indent -= 1
def write_expr(self, expr: Expression):
if isinstance(expr, Binary):
self.write('(', expr.l, ' ', expr.op, ' ', expr.r, ')')
elif isinstance(expr, Unary):
self.write(expr.op, expr.v)
elif isinstance(expr, Ternary):
self.write('(if ', expr.condition, ': ', expr.consequence, ' else: ', expr.alternative, ')')
elif isinstance(expr, Var):
self.write(expr.name)
elif isinstance(expr, Call):
self.write(expr.builtin, '(', join_any(', ', expr.args), ')')
elif isinstance(expr, Literal):
t = replace_pattern({ r'uint(\d+)': r'u\1', r'int(\d+)': r'i\1', r'.+': 'nop' },
expr.type.under.id)
if t == 'nop':
self.write(expr.value)
else:
self.write(expr.value, '\'', t)
else:
raise UnsupportedExpression(expr)
def write_stmt(self, stmt: Statement):
if isinstance(stmt, Assign):
self.writelinei(stmt.variable, ' = ', stmt.value)
elif isinstance(stmt, Conditional):
self.writelinei('if ', stmt.condition, ':')
with self.indent.further():
self.write_stmt(stmt.consequence)
if stmt.alternative:
self.writelinei('else:')
with self.indent.further():
self.write_stmt(stmt.alternative)
elif isinstance(stmt, Block):
for s in stmt.statements:
self.write_stmt(s)
elif isinstance(stmt, Set):
if stmt.type.under in (TYPE_U8, TYPE_I8):
self.writelinei('buf.add ', stmt.value)
else:
endian = 'writeBE' if self.bigendian else 'writeLE'
self.writelinei('buf.', endian, ' cast[', stmt.type.under, '](', stmt.value, ')')
elif isinstance(stmt, Define):
self.writelinei(f'var {stmt.name} = ', stmt.value)
else:
raise UnsupportedStatement(stmt)
def write_function(self, fun: Function):
name = fun.name
self.write(f'proc {name}*(buf: var seq[byte]')
needs_underlying = False
for name, typ, _ in fun.params:
self.write(f', {name}: {typ}')
if typ.underlying:
needs_underlying = True
self.write(') = \n')
self.indent += 1
if needs_underlying:
self.write('var\n', indent=True)
with self.indent.further():
for name, _, usagetyp in fun.params:
self.write(f'{name} = {usagetyp} {name}\n', indent=True)
self.write('\n')
for condition in fun.conditions:
self.write('assert ', condition, '\n', indent=True)
for stmt in fun.body:
self.write_stmt(stmt)
self.write('\n\n')
self.indent -= 1
def write_decl(self, decl: Declaration):
if isinstance(decl, Enumeration):
self.write('type ', decl.type, '* {.pure.} = enum ## ', decl.descr, '\n')
for name, value, descr, _ in decl.members:
self.write(' ', name, ' = ', value, ' ## ', descr, '\n')
self.write('\n\n')
for name, value, descr, _ in decl.additional_members:
self.write('template ', name, '*(typ: type ', decl.type, '): ', decl.type, ' =\n')
self.write(' ## ', descr, '\n')
self.write(' ', value, '\n\n')
if decl.flags:
self.write('proc `+`*(a, b: ', decl.type, '): ', decl.type, ' =\n')
self.write(' ', decl.type, '(byte(a) + byte(b))\n')
self.write('proc `and`*(a, b: ', decl.type, '): ', decl.type, ' =\n')
self.write(' ', decl.type, '(byte(a) and byte(b))\n')
self.write('proc `or`*(a, b: ', decl.type, '): ', decl.type, ' =\n')
self.write(' ', decl.type, '(byte(a) or byte(b))\n\n')
elif isinstance(decl, DistinctType):
self.write('type ', decl.type, '* = distinct ', decl.type.underlying, ' ## ', decl.descr, '\n\n')
if decl.constants:
self.write('const\n')
for name, value in decl.constants:
self.write(' ', name, '* = ', decl.type, ' ', value, '\n')
self.write('\n\n')
else:
raise UnsupportedDeclaration(decl)
def write_test_header(self):
self.write(f'import sequtils, unittest, ../asmdot/{self.arch}\n\n')
self.write(f'suite "test {self.arch} assembler":\n')
self.indent += 1
self.writelinei('setup:')
with self.indent.further():
self.writelinei('var')
with self.indent.further():
self.writelinei('buf = newSeqOfCap[byte](100)')
self.writeline()
def write_test(self, test: TestCase):
self.writelinei(f'test "{test.name}":')
self.indent += 1
def arg_str(arg: TestCaseArgument):
if isinstance(arg, ArgConstant):
return arg.const.name
if isinstance(arg, ArgEnumMember):
return arg.member.name
elif isinstance(arg, ArgInteger):
return str(arg.value)
else:
raise UnsupportedTestArgument(arg)
for func, args in test.calls:
args_str = ', '.join([ arg_str(arg) for arg in args ])
self.writelinei('buf.', func.name, '(', args_str, ')')
self.writeline()
self.writelinei('check cast[seq[char]](buf) == toSeq("', test.expected_string, '".items)')
self.writeline()
self.indent -= 1
|
<filename>escpos/showcase.py
# -*- coding: utf-8 -*-
#
# escpos/showcase.py
#
# Copyright 2020 Base4 Sistemas Ltda ME
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from __future__ import unicode_literals
import math
from datetime import datetime
from . import barcode
from .impl import epson
def showcase(printer, **kwargs):
"""All printing showcases in one call."""
printer.justify_center()
printer.set_expanded(True)
printer.text(printer.model.name)
printer.set_expanded(False)
printer.text(printer.model.vendor)
printer.justify_left()
printer.lf()
fonts_showcase(printer, **kwargs)
printer.cut()
modes_and_alignment_showcase(printer, **kwargs)
printer.cut()
text_size_showcase(printer, **kwargs)
printer.cut()
rulers_showcase(printer)
printer.cut()
receipt_showcase(printer)
printer.cut()
barcode_showcase(printer, **kwargs)
printer.cut()
qrcode_showcase(printer, **kwargs)
printer.cut()
def fonts_showcase(printer, **kwargs):
"""A showcase of available fonts."""
_header(printer, 'Fonts')
font_set = kwargs.get('font_set', epson.AVAILABLE_FONTS)
for param, name in font_set:
printer.init()
printer.set_font(param)
printer.text(name)
printer.lf()
def modes_and_alignment_showcase(printer):
"""A showcase for font modes (normal, condensed, emphasized and expanded)
and alignment (left, centered and right alignment).
"""
_header(printer, 'Modes and Alignment')
def print_modes(title):
printer.set_expanded(True)
printer.text(title)
printer.set_expanded(False)
printer.text('Normal mode')
printer.set_condensed(True)
printer.text('Condensed mode')
printer.set_condensed(False)
printer.set_emphasized(True)
printer.text('Emphasized mode')
printer.set_emphasized(False)
printer.init()
printer.justify_right()
print_modes('Right aligned')
printer.lf()
printer.justify_center()
print_modes('Centered')
printer.lf()
printer.justify_left()
print_modes('Left aligned')
printer.lf()
def text_size_showcase(printer, **kwargs):
"""A showcase of various text sizes.
:param str text: Any text eight characters long. If its longer than
eight chars it will be truncated. If less than eight chars it will
be completed with "X"s.
"""
text = kwargs.get('text', 'SPAMEGGS')
letters = text[:8].ljust(8, 'X')
_header(printer, 'Text Size')
printer.init()
for w, c in zip(range(8), letters):
printer.set_text_size(w, 7)
printer.textout(c)
printer.lf()
for h, c in zip(range(8), letters):
printer.set_text_size(w, h)
printer.textout(c)
printer.lf(2)
def rulers_showcase(printer):
"""A showcase of various column widths."""
cols = printer.feature.columns
n = max(cols.normal, max(cols.condensed, cols.expanded))
ruler = '....:....!' * n
_header(printer, 'Rulers')
printer.init()
printer.text('Normal ({:d} columns)'.format(cols.normal))
printer.text(ruler[:cols.normal])
printer.lf()
printer.text('Condensed ({:d} columns)'.format(cols.condensed))
printer.set_condensed(True)
printer.text(ruler[:cols.condensed])
printer.set_condensed(False)
printer.lf()
printer.text('Expanded ({:d} columns)'.format(cols.expanded))
printer.set_expanded(True)
printer.text(ruler[:cols.expanded])
printer.set_expanded(False)
printer.lf()
def receipt_showcase(printer):
"""A showcase of a fictional POS receipt."""
ruler_single = _get_ruler(printer)
ruler_double = _get_ruler(printer, '=')
printer.init()
printer.text(ruler_double)
printer.set_expanded(True)
printer.justify_center()
printer.text('RECEIPT #5678')
printer.justify_left()
printer.set_expanded(False)
printer.text(ruler_single)
printer.text('{:%x %X} Session #{:d}'.format(datetime.now(), 42))
item_mask = _build_item_mask(
printer.feature.columns.condensed,
alignments='><>^>>',
column_widths=[
0.1,
0.4,
0.15,
0.05,
0.15,
0.15,
]
)
data = (
('ID', 'Product', 'Qty', '', 'Price', 'Subtotal'),
('1234', 'SAMPLE', '2', 'x', '0.25', '0.50'),
('1235', 'OTHER SAMPLE', '1', 'x', '1.50', '1.50'),
('1237', 'ANOTHER ONE', '3', 'x', '0.75', '2.25'),
)
printer.set_condensed(True)
for row in data:
printer.text(item_mask.format(*row))
printer.set_condensed(False)
printer.text(ruler_single)
printer.set_emphasized(True)
printer.text('TOTAL 4.25')
printer.set_emphasized(False)
printer.text(ruler_double)
printer.lf()
def barcode_showcase(printer, **kwargs):
"""A showcase of 1-dimensional barcodes."""
barcode_height = kwargs.get('barcode_height', 120)
barcode_width = kwargs.get('barcode_width', barcode.BARCODE_NORMAL_WIDTH)
barcode_hri = kwargs.get('barcode_hri', barcode.BARCODE_HRI_BOTTOM)
barcodes = (
('EAN-8', 'ean8', '12345670'),
('EAN-13', 'ean13', '1234567890128'),
('Code128-A', 'code128', '12345'),
)
_header(printer, 'Barcodes')
printer.init()
for title, method, data in barcodes:
printer.set_emphasized(True)
printer.text(title)
printer.set_emphasized(False)
getattr(printer, method)(
data,
barcode_hri=barcode_hri,
barcode_height=barcode_height,
barcode_width=barcode_width
)
printer.lf()
def qrcode_showcase(printer, **kwags):
"""A showcase of QRCodes in various configurations."""
data = kwags.get('data', 'https://github.com/base4sistemas/pyescpos')
_header(printer, 'QRCode')
printer.init()
# showcase all default values
printer.text('QRCode (all defaults)')
printer.qrcode(data)
printer.lf()
# showcase all possible module size variations
def _qrcode_ecc_level_l(module_size, title):
printer.text('QRCode')
printer.text('Module size: {!r} ({})'.format(module_size, title))
printer.text(' ECC level: L')
printer.qrcode(
data,
qrcode_module_size=module_size,
qrcode_ecc_level=barcode.QRCODE_ERROR_CORRECTION_L
)
printer.lf()
for value, title in barcode.QRCODE_MODULE_SIZES:
_qrcode_ecc_level_l(value, title)
# showcase all possible error correction level variations
def _qrcode_module_size_4(ecc_level, ecc_title):
printer.text('QRCode')
printer.text('Module size: 4')
printer.text(' ECC level: {!r} ({})'.format(ecc_level, ecc_title))
printer.qrcode(
data,
qrcode_module_size=barcode.QRCODE_MODULE_SIZE_4,
qrcode_ecc_level=ecc_level
)
printer.lf()
for value, title in barcode.QRCODE_ERROR_CORRECTION_LEVELS:
_qrcode_module_size_4(value, title)
def _header(printer, title):
ruler = _get_ruler(printer)
printer.init()
printer.text(ruler)
printer.justify_center()
printer.text(title)
printer.justify_left()
printer.text(ruler)
def _get_ruler(printer, char='-'):
return char * printer.feature.columns.normal
def _build_item_mask(width, alignments=None, column_widths=None, gap=1):
# <alignments> str, for example "<>^" (left, right, center)
# <column_widths> list(float, ...)
if len(alignments) != len(column_widths):
raise ValueError('Alignment spec and number of columns must match')
if sum(column_widths) > 100:
raise ValueError('Sum of column widths must not be greater than 100%')
width = width - (len(alignments) * gap) - gap
columns = []
for i, perc in enumerate(column_widths):
col_len = int(math.ceil(perc * width))
columns.append('{{:{:s}{:d}s}}'.format(alignments[i], col_len))
return (' ' * gap).join(columns)
|
<gh_stars>10-100
import pickle
from pathlib import Path
import numpy as np
import pandas as pd
import dill
from os import listdir
import json
from tqdm import tqdm
import re
import subprocess
import random
import time
from multiprocessing import Pool
from shutil import rmtree
from os import path
import sys
sys.path.append("TiramisuCodeGenerator")
from TiramisuCodeGenerator import code_generator
class cluster_utilities():
def __init__(self, data_path, generator_script, wrappers_script, execute_script, log_path, batchName, nb_nodes, tmp_files_dir):
self.batchName = batchName
self.data_path = Path(data_path)
self.generator_script = Path(generator_script)
self.wrappers_script = Path(wrappers_script)
self.execute_script = Path(execute_script)
self.log_path = Path(log_path)
self.nb_nodes=nb_nodes
self.tmp_files_dir = tmp_files_dir
self.compile_jobs_ids = self.wrap_jobs_ids = self.exec_jobs_ids = '1'
if path.exists(self.log_path):
rmtree(self.log_path)
if path.exists(self.tmp_files_dir+ "job_files_"+self.batchName):
rmtree(self.tmp_files_dir+ "job_files_"+self.batchName)
def generate_prog_list(self):
"""
Create the list of programs that will be executed.
The result is a list of tuples of the following format :
(function_id, schedule_id)
Example:
(function524, function524_schedule_125)
"""
# Path to where to store the list of programs
dst_path = Path(self.tmp_files_dir + "progs_list_"+self.batchName+".pickle")
self.progs_list = []
for func_path in self.data_path.iterdir():
if (str(func_path.parts[-1]).startswith('.')):
continue
# We discard programs that have no schedule.
# We don't need to execute those programs as they just have a speedup of 1,
# and they have no programs with schedules.
# If you want them in the dataset, just include them with speedup = 1.
if len(list(func_path.iterdir())) <= 2:
rmtree(str(func_path))
# func_path = func_path.rename(str(self.data_path)+'/_'+str(func_path.parts[-1]))
continue
for sched_path in func_path.iterdir():
if not sched_path.is_dir():
continue
if (str(sched_path.parts[-1]).startswith('.')):
continue
func_id = func_path.parts[-1]
sched_id = sched_path.parts[-1]
self.progs_list.append((func_id, sched_id))
random.Random(42).shuffle(self.progs_list) # shuffling the prog list for having a similar exec time per node
with open(dst_path, "wb") as f:
pickle.dump(self.progs_list, f)
print("Total number of schedules generated " + str(len(self.progs_list)) )
def generate_compile_jobs(self):
"""
Generate the job files needed by the sbatch command.
Here's an example of a job file :
#!/bin/bash
#SBATCH --job-name=comp2
#SBATCH --output=log/log_comp_2_6842_10263
#SBATCH -N 1
#SBATCH --exclusive
#SBATCH -p research
srun python3 compile_tiramisu_code.py 6842 10263 2
"""
# Path to the list of programs
self.progs_list_path = Path(self.tmp_files_dir + "progs_list_"+self.batchName+".pickle")
# Path where to store the job files
dst_path = Path(self.tmp_files_dir+ "job_files_"+self.batchName)
dst_path.mkdir(parents=True, exist_ok=True)
# Path to the script that will be distributed
# self.generator_script = Path("/data/scratch/mmerouani/data_scripts/compile_tiramisu_code.py")
# Path to where to store the logs of the jobs
self.log_path = Path(self.tmp_files_dir + "log_"+self.batchName+"/")
self.log_path.mkdir(parents=True, exist_ok=True)
# Content of the job files
job_file_content = "\
#!/bin/bash\n\
#SBATCH --job-name=comp{2}_{3}\n\
#SBATCH --output=%s/log_comp_{2}_{0}_{1}\n\
#SBATCH -N 1\n\
#SBATCH --exclusive\n\
#SBATCH -p lanka-v3\n\
#SBATCH --exclude=lanka21,lanka33\n\
srun python3 %s {0} {1} {3}" % (str(self.log_path), str(self.generator_script)) # This replaces the %s
# SBATCH --exclude=lanka21,lanka04\n\
# Content of the submit script
submit_script_content = '\
#!/bin/bash\n\
\n\
for file in %sjob_files_%s/compile_job*\n\
do\n\
sbatch "$file"\n\
done' % (self.tmp_files_dir, self.batchName) # This replaces the %s
with open(self.tmp_files_dir + 'submit_compile_jobs_' + self.batchName + '.sh', "w") as f:
f.write(submit_script_content)
with open(self.progs_list_path, "rb") as f:
self.progs_list = pickle.load(f)
nb_progs = len(self.progs_list)
progs_per_node = nb_progs // self.nb_nodes
for i in range(self.nb_nodes):
# Each node will process the programs in the range progs_list[start, end)
start = i * progs_per_node
if i < self.nb_nodes - 1:
end = (i + 1) * progs_per_node
else:
end = nb_progs
with open(dst_path / ("compile_job_%s_%s.batch" % (start, end)), "w") as f:
f.write(job_file_content.format(start, end, i, self.batchName))
def submit_compile_jobs(self):
exec_output = subprocess.check_output(['sh', self.tmp_files_dir + 'submit_compile_jobs_' + self.batchName + '.sh'])
print(exec_output.decode("utf-8"))
self.compile_jobs_ids = ','.join(re.findall(r'\d+',exec_output.decode("utf-8")))
def check_compile_progress(self):
print(subprocess.check_output(["squeue --format='%.18i %.9P %.15j %.8u %.2t %.10M %.6D %R'| grep 'comp\|JOBID' "], shell=True).decode("utf-8"))
print(subprocess.check_output(['tail -n 3 '+ str(self.log_path) + '/log_comp*'], shell=True).decode("utf-8"))
def generate_wrapper_jobs(self):
"""
Generate the job files needed by the sbatch command.
Two type of job files are generated :
- One for editing and compiling the wrappers.
- The other for executing the compiled wrappers and measuring execution time.
Here's an example of a job file of type execute :
#!/bin/bash
#SBATCH --job-name=exec17
#SBATCH --output=/data/scratch/k_abdous/autoscheduling_tiramisu/execute_all/log/log_exec_17_1096959_1161486
#SBATCH -N 1
#SBATCH --exclusive
srun python3 /data/scratch/k_abdous/autoscheduling_tiramisu/execute_all/execute_programs.py 1096959 1161486 17
"""
# Path to the list of programs
self.progs_list_path = Path(self.tmp_files_dir + "progs_list_"+self.batchName+".pickle")
# Path where to store the job files
# This script will use two subdirectories (don't forget to create them first) : wrappers and execute
dst_path = Path(self.tmp_files_dir + "job_files_"+self.batchName)
Path(dst_path / "wrappers").mkdir(parents=True, exist_ok=True)
Path(dst_path / "execute").mkdir(parents=True, exist_ok=True)
# Path to the scrip "wrappers"t that edits and compiles the wrappers
# If your wrappers are already in the good format, point this script to compile_tiramisu_wrappers.py
# wrappers_script = Path("/data/scratch/mmerouani/data_scripts/compile_tiramisu_wrappers.py")
# Path to the script that execute the compiled wrappers
# execute_script = Path("/data/scratch/mmerouani/data_scripts/execute_programs.py")
# Path to where to store the logs of the jobs
log_path = Path(self.tmp_files_dir + "log_"+self.batchName+"/")
log_path.mkdir(parents=True, exist_ok=True)
# Content of the job files of type wrappers
wrappers_job = "\
#!/bin/bash\n\
#SBATCH --job-name=wrap{2}_{3}\n\
#SBATCH --output=%s/log_wrap_{2}_{0}_{1}\n\
#SBATCH -N 1\n\
#SBATCH --exclusive\n\
#SBATCH -p lanka-v3\n\
#SBATCH --exclude=lanka21,lanka33\n\
srun python3 %s {0} {1} {3}" % (str(self.log_path), str(self.wrappers_script)) # This replaces the %s
#SBATCH --exclude=lanka21,lanka04\n\
# Content of the job files of type execute
execute_job = "\
#!/bin/bash\n\
#SBATCH --job-name=exec{2}_{3}\n\
#SBATCH --output=%s/log_exec_{2}_{0}_{1}\n\
#SBATCH -N 1\n\
#SBATCH --exclusive\n\
#SBATCH -p lanka-v3\n\
#SBATCH --exclude=lanka21,lanka33\n\
srun python3 %s {0} {1} {2} {3}" % (str(self.log_path), str(self.execute_script)) # This replaces the %s
#SBATCH --exclude=lanka21,lanka04\n\
submit_wrap_script_content='\
#!/bin/bash \n\
\n\
for file in %sjob_files_%s/wrappers/wrappers_job*\n\
do\n\
sbatch --dependency=afterok:%s "$file"\n\
done' % (self.tmp_files_dir, self.batchName, self.compile_jobs_ids) # This replaces the %s
with open(self.tmp_files_dir + 'submit_wrapper_jobs_' + self.batchName + '.sh', "w") as f:
f.write(submit_wrap_script_content)
with open(self.progs_list_path, "rb") as f:
self.progs_list = pickle.load(f)
nb_progs = len(self.progs_list)
progs_per_node = nb_progs // self.nb_nodes
for i in range(self.nb_nodes):
# Each node will process the programs in the range progs_list[start, end)
start = i * progs_per_node
if i < self.nb_nodes - 1:
end = (i + 1) * progs_per_node
else:
end = nb_progs
with open(dst_path / "wrappers" / ("wrappers_job_%s_%s.batch" % (start, end)), "w") as f:
f.write(wrappers_job.format(start, end, i, self.batchName))
with open(dst_path / "execute" / ("execute_job_%s_%s.batch" % (start, end)), "w") as f:
f.write(execute_job.format(start, end, i, self.batchName))
def submit_wrapper_compilation_jobs(self):
wrap_cmd_output = subprocess.check_output(['sh', self.tmp_files_dir + 'submit_wrapper_jobs_' + self.batchName + '.sh'])
print(wrap_cmd_output.decode("utf-8"))
self.wrap_jobs_ids = ','.join(re.findall(r'\d+',wrap_cmd_output.decode("utf-8")))
def check_wrapper_compilation_progress(self):
print(subprocess.check_output(["squeue --format='%.18i %.9P %.15j %.8u %.2t %.10M %.6D %R' | grep 'wrap\|JOBID' "], shell=True).decode("utf-8"))
print(subprocess.check_output(['tail -n 3 '+ str(self.log_path) + '/log_wrap*'], shell=True).decode("utf-8"))
def generate_execution_slurm_script(self):
submit_exec_script_content='\
#!/bin/bash \n\
\n\
for file in %sjob_files_%s/execute/execute_job*\n\
do\n\
sbatch --dependency=afterok:%s "$file"\n\
done' % (self.tmp_files_dir, self.batchName, self.wrap_jobs_ids) # This replaces the %s
with open(self.tmp_files_dir + 'submit_execute_jobs_' + self.batchName + '.sh', "w") as f:
f.write(submit_exec_script_content)
def submit_execution_jobs(self):
exec_cmd_output = subprocess.check_output(['sh', self.tmp_files_dir + 'submit_execute_jobs_' + self.batchName + '.sh'])
print(exec_cmd_output.decode("utf-8"))
self.exec_jobs_ids = ','.join(re.findall(r'\d+',exec_cmd_output.decode("utf-8")))
def check_execution_progress(self):
print(subprocess.check_output(["squeue --format='%.18i %.9P %.15j %.8u %.2t %.10M %.6D %R' | grep 'exec\|JOBID' "], shell=True).decode("utf-8"))
print(subprocess.check_output(['tail -n 3 '+ str(self.log_path) + '/log_exec*'], shell=True).decode("utf-8"))
class annotation_utilities():
def __init__(self):
pass
def get_function_annotation(self, filename):
with open(filename,'r') as file:
code = file.read()
#Constantas
cst_lines=re.findall(r'\n\s*constant\s+.*;',code) # list of lines [constant c0("c0", 64), c1("c1", 128);]
cst_def=[]
for line in cst_lines:
cst_def.extend(re.findall(r'\w+\s*\(\s*\"\w+\s*\"\s*,\s*\d+\s*\)',line)) # list ['c0("c0", 64)','c1("c1", 128)','c20("c20", 64)'...
constants_dict=dict()
for cst in cst_def:
name= re.findall(r'(\w+)\s*\(', cst)[0] #the name before parenthesis
# name2= re.findall(r'\"\w+\"', cst)[0][1:-1] #the name between ""
value= re.findall(r'(\d+)\s*\)', cst)[0] #the value before )
constants_dict[name]=int(value)
#Inputs
input_lines = re.findall(r'\n\s*input\s+.*;',code) # gets the lines where inputs are difined
input_defs = []
buffer_id=1
for line in input_lines:
input_defs.extend(re.findall(r'\w*\s*\(\s*\"\w*\"\s*,\s*{\s*\w*\s*(?:,\s*\w*\s*)*}\s*,\s*\w*\s*\)',line)) # get the input buffers definition
inputs_dict = dict()
for inp in input_defs:
name = re.findall(r'(\w*)\s*\(', inp)[0]
inp_interators = re.findall(r'\w+',re.findall(r'{([\w\s,]*)}',inp)[0]) # gets a list of iterators
inp_type = re.findall(r',\s*(\w+)\s*\)',inp)[0] # gets data type of the input
inputs_dict[name]=dict()
inputs_dict[name]['id'] = buffer_id
buffer_id += 1
inputs_dict[name]['iterators_list']=inp_interators
inputs_dict[name]['data_type']=inp_type
# Computations
comp_lines = re.findall(r'\n\s*computation\s+.*;',code)
computation_dict=dict()
for comp_def in comp_lines: #assuming that each computation is declared in a separate line
computation_is_reduction = False #by default the computation is not a reduction
name = re.findall(r'(\w*)\s*\(\s*\"',comp_def)[0]
comp_interators = re.findall(r'\w+',re.findall(r'{([\w\s,]*)}',comp_def)[0]) # gets a list of iterators
comp_assingment = re.findall(r'}\s*,\s*(.*)\);', comp_def)[0] # gets the assignment expression
comp_accesses = re.findall(r'\w*\((?:\s*[^()]\s*,?)+\)',comp_assingment) # gets the list of buffer accesses form the assingment
if (comp_accesses == []): #if assignment is not specified in computation definition, search for assignment with the .set_expression expression
comp_assingment = re.findall(r''+name+'\.set_expression\(\s*(.*)\);',code)# gets the assignment expression
if (comp_assingment!=[]): # if the .set_expression found
comp_assingment = comp_assingment[0]
comp_accesses = re.findall(r'\w*\((?:\s*[^()]\s*,?)+\)',comp_assingment)# gets the list of buffer accesses form the assingment
if (comp_assingment != []):
nb_addition = len(re.findall(r'\)\s*\+|\+\s*\w*[^)]\(',comp_assingment)) #gets the number of additions in assignment, support input + input , cst + input, input + cst
nb_multiplication = len(re.findall(r'\)\s*\*|\*\s*\w*[^)]\(',comp_assingment)) #gets the number of multiplications in assignment, support input * input , cst * input, input * cst
nb_division = len(re.findall(r'\)\s*\/|\/\s*\w*[^)]\(',comp_assingment)) #gets the number of multiplications in assignment, support input * input , cst * input, input * cst
nb_subtraction = len(re.findall(r'\)\s*\-|\-\s*\w*[^)]\(',comp_assingment)) #gets the number of subtracrions in assignment, support input - input , cst - input, input - cst
else:
nb_addition=0
nb_multiplication=0
nb_division=0
nb_subtraction=0
accesses_list=[]
for access in comp_accesses:
access_is_reduction = False #by default the acces is not a reduction
accessed_buf_name = re.findall(r'(\w*)\s*\(',access)[0] # gets the accessed input buffer name
if (accessed_buf_name == name): # if the the computation itself is used in assignment, we assume that it is a reduction
access_is_reduction = True
computation_is_reduction = True
accessed_buf_id = buffer_id
access_matrix = np.zeros([len(comp_interators), len(comp_interators)+1], dtype = int) # initialises the access matrix
else: #the accessed is an input buffer
accessed_buf_id = inputs_dict[accessed_buf_name]['id'] # gets the accessed input buffer id
access_matrix = np.zeros([len(inputs_dict[accessed_buf_name]['iterators_list']), len(comp_interators)+1], dtype = int) # initialises the access matrix
dim_accesses = re.findall(r'[\w\s\+\-\*\/]+', re.findall(r'\(([\w\s,+\-*/]*)\)',access)[0]) # returns a list where n'th element is the access to the n'th dimension of the buffer
for i, dim_acc in enumerate(dim_accesses):
left_coef_iter1, used_iterator_iter1, right_coef_iter1, left_coef_iter2, used_iterator_iter2, right_coef_iter2 = re.findall(r'(?:(\d+)\s*\*\s*)?([A-Za-z]\w*)(?:\s*\*\s*(\d+))?(?:\s*[\+\*\-]\s*(?:(\d+)\s*\*\s*)?([A-Za-z]\w*)(?:\s*\*\s*(\d+))?)?', dim_acc)[0] # gets iterator and coeficients, only supports patterns like 4*i1*2 + 6*i2*5 + 1
cst_shift = re.findall(r'(?:\s*[\-\+]\s*\d+)+',dim_acc) # gets the '+ cst' of the access
cst_shift= 0 if ( not(cst_shift) ) else eval(cst_shift[0])
left_coef_iter1 = 1 if (left_coef_iter1=='') else int(left_coef_iter1)
right_coef_iter1 = 1 if (right_coef_iter1=='') else int(right_coef_iter1)
coef_iter1 =left_coef_iter1*right_coef_iter1
j = comp_interators.index(used_iterator_iter1)
access_matrix[i,j]=coef_iter1
access_matrix[i,-1]=cst_shift
if (used_iterator_iter2 != ''):
left_coef_iter2 = 1 if (left_coef_iter2=='') else int(left_coef_iter2)
right_coef_iter2 = 1 if (right_coef_iter2=='') else int(right_coef_iter2)
coef_iter2 =left_coef_iter2*right_coef_iter2
j = comp_interators.index(used_iterator_iter2)
access_matrix[i,j]=coef_iter2
access_dict=dict()
access_dict['access_is_reduction']=access_is_reduction
access_dict['buffer_id']=accessed_buf_id
access_dict['buffer_name']=accessed_buf_name
access_dict['access_matrix']=access_matrix.tolist()
accesses_list.append(access_dict)
real_dimensions = re.findall(r'\w+',re.findall(r''+name+'\.store_in\s*\(\s*&\w*\s*,?\s*{?([\w\s,]*)}?',code)[0]) #get the list of this computation's dimensions that are saved to a buffer
computation_dict[name]=dict()
computation_dict[name]['id']=buffer_id
buffer_id +=1
computation_dict[name]['absolute_order'] = None # None means not explicitly ordered, this value is updated in computation oredering is specified
computation_dict[name]['iterators']=comp_interators
if (real_dimensions!=[]): # if dimensions are explicitly specified in the store_in instruction
computation_dict[name]['real_dimensions'] = real_dimensions
else: # else by default the real dimensions are the iterators
computation_dict[name]['real_dimensions'] = comp_interators
computation_dict[name]['comp_is_reduction']=computation_is_reduction
computation_dict[name]['number_of_additions']=nb_addition
computation_dict[name]['number_of_subtraction']=nb_subtraction
computation_dict[name]['number_of_multiplication']=nb_multiplication
computation_dict[name]['number_of_division']=nb_division
computation_dict[name]['accesses']=accesses_list
#Iterators
iterator_lines=re.findall(r'\n\s*var\s+.*;',code) #list of lines var [var i0("i0", 0, c0), i1("i1", 0, c1),......]
iterator_def = []
iterator_def_no_bounds = []
for line in iterator_lines:
iterator_def.extend(re.findall(r'\w+\s*\(\s*\"\w+\s*\"\s*,\s*\d+\s*,(?:\s*\w*\s*[+\-\*/]?\s*)*\w+\s*\)',line)) # find iteratros with lower and upper bound deifined [i0("i0", 0, c0), i1("i1", 0, c1 - 2)...]
iterator_def_no_bounds.extend(re.findall(r'\w+\s*\(\s*\"\w+\s*\"\s*\)', line)) #finds iteratros with bounds not defined [i01("i01"), i02("i02"), i03("i03") ....]
iterators_dict=dict()
for iterator in iterator_def:
name=re.findall(r'(\w+)\s*\(',iterator)[0]
lower_bound = re.findall(r',((?:\s*\w*\s*[+\-\*/]?\s*)*\w+\s*),',iterator)[0] # gets the lower bound of the iterator
upper_bound = re.findall(r',((?:\s*\w*\s*[+\-\*/]?\s*)*\w+\s*)\)',iterator)[0] #gets the uppper bound of the iterator
iterators_dict[name]=dict()
iterators_dict[name]['lower_bound'] = eval(lower_bound,{},constants_dict) #evaluates the bounds in case it's and expression
iterators_dict[name]['upper_bound'] = eval(upper_bound,{},constants_dict)
iterators_dict[name]['parent_iterator'] = None # None mean no parent, hierarchy attributes are set later
iterators_dict[name]['child_iterators'] = []
iterators_dict[name]['iterator_order'] = None # None means not explicitly ordered, this value is updated in computation oredering is specified
iterators_dict[name]['computations_list'] = []
for iterator in iterator_def_no_bounds:
name=re.findall(r'(\w+)\s*\(',iterator)[0]
iterators_dict[name]=dict()
iterators_dict[name]['lower_bound']=None
iterators_dict[name]['upper_bound']=None
iterators_dict[name]['parent_iterator'] = None
iterators_dict[name]['child_iterators'] = []
iterators_dict[name]['iterator_order'] = None
iterators_dict[name]['computations_list'] = []
#Setting hierarchy attributes
for comp in computation_dict:
for i in range(1, len(computation_dict[comp]['iterators'])):
iterators_dict[computation_dict[comp]['iterators'][i]]['parent_iterator'] = computation_dict[comp]['iterators'][i-1]
for i in range(0, len(computation_dict[comp]['iterators'])-1):
iterators_dict[computation_dict[comp]['iterators'][i]]['child_iterators'].append(computation_dict[comp]['iterators'][i+1])
iterators_dict[computation_dict[comp]['iterators'][-1]]['computations_list'].append(comp)
#Removing duplicates in lists
for iterator in iterators_dict:
iterators_dict[iterator]['child_iterators'] = list(set(iterators_dict[iterator]['child_iterators']))
iterators_dict[iterator]['computations_list'] = list(set(iterators_dict[iterator]['computations_list']))
#Ordering
orderings_list = self.get_ordering_sequence(code)
# for ordering in orderings_list:
ordering = orderings_list
for rank, ord_tuple in enumerate(ordering):
computation_dict[ord_tuple[0]]['absolute_order'] = rank + 1
# update iterator order
if (rank != 0): # if not first comp
interator_index = computation_dict[ord_tuple[0]]['iterators'].index(ord_tuple[1])
if (interator_index<len(computation_dict[ord_tuple[0]]['iterators']) - 1):
iterators_dict[computation_dict[ord_tuple[0]]['iterators'][interator_index+1]]['iterator_order'] = rank+1
if (iterators_dict[ord_tuple[1]]['iterator_order'] == None):
iterators_dict[ord_tuple[1]]['iterator_order'] = rank+1
else: # if first comp
interator_index = computation_dict[ord_tuple[0]]['iterators'].index(ordering[rank+1][1])
if (interator_index<len(computation_dict[ord_tuple[0]]['iterators']) - 1):
iterators_dict[computation_dict[ord_tuple[0]]['iterators'][interator_index+1]]['iterator_order'] = rank+1
#Creating Json
function_name = re.findall(r'tiramisu::init\s*\(\s*\"(\w*)\"\s*\)', code)[0]
function_json = {
'function_name': function_name,
'constants': constants_dict,
'iterators': iterators_dict,
'inputs': inputs_dict,
'computations': computation_dict
}
json_dump = json.dumps(function_json, indent = 4)
def format_array(array_str):
array_str = array_str.group()
array_str = array_str.replace('\n','')
array_str = array_str.replace('\t','')
array_str = array_str.replace(' ','')
array_str = array_str.replace(',',', ')
return array_str
json_dump = re.sub(r'\[[\s*\w*,\"]*\]',format_array, json_dump)
return json_dump
def get_ordering_sequence(self, code): #returns the ordering instructions as one sequence
#Ordering
comps_order_instructions = re.findall(r'\w+(?:\s*\n*\s*\.\s*then\(\s*\w+\s*,\s*\w+\s*\))+;',code) # gets the lines where the computations are orderd eg: comp3.then(comp4, i1).then(comp0, i1020).then(comp2, i1).then(comp1, i1);
orderings_list=[]
for comps_order_instr in comps_order_instructions:
first_comp_name = re.findall(r'(\w+)\s*\n*\s*.then', comps_order_instr)[0] # gets the name of the first computation {comp3} .then(...).then..
ordered_comps_tuples = re.findall(r'.\s*then\s*\(\s*(\w+)\s*,\s*(\w+)\s*\)', comps_order_instr) # gets an orderd list (of tuples) of rest of the computations ordering eg: comp3.then( {comp4, i1} ).then( {comp0, i1020} )
ordered_comps_tuples.insert(0, (first_comp_name, ''))
if (len(orderings_list)==0): #first oreding line
orderings_list.extend(ordered_comps_tuples)
elif (ordered_comps_tuples[0][0]==orderings_list[-1][0]): #the new ordering line starts at the end of the previous
ordered_comps_tuples.remove(ordered_comps_tuples[0])
orderings_list.extend(ordered_comps_tuples)
elif (ordered_comps_tuples[-1][0]==orderings_list[0][0]): #the new ordering line ends at the begining of the previous
orderings_list.remove(orderings_list[0])
orderings_list= ordered_comps_tuples + orderings_list
else: # WARNING: this can cause an infinite loop if the input code is incoherent
comps_order_instructions.append(comps_order_instr) # if we can't yet chain it with the other instructions, postpone the instruction till later
# print(orderings_list)
# print(ordered_comps_tuples)
# raise Exception('This case of ordering is not yet supported by the parser ', comps_order_instructions,re.findall(r'tiramisu::init\s*\(\s*\"(\w*)\"\s*\)', code))
return orderings_list
def get_schedule_annotation(self, filename):
with open(filename,'r') as file:
code = file.read()
tile_2d_list = re.findall(r'(\w*)\s*.\s*tile\s*\(\s*(\w*)\s*,\s*(\w*)\s*,\s*(\d*)\s*,\s*(\d*)\s*,\s*(\w*)\s*,\s*(\w*)\s*,\s*(\w*)\s*,\s*(\w*)\s*\);',code) # gets tuples comp0.tile(i0, i1, 32, 64, i01, i02, i03, i04); --> (comp0,i0, i1, 32, 64, i01, i02, i03, i04)
tile_3d_list = re.findall(r'(\w*)\s*.\s*tile\s*\(\s*(\w*)\s*,\s*(\w*)\s*,\s*(\w*)\s*,\s*(\d*)\s*,\s*(\d*)\s*,\s*(\d*)\s*,\s*(\w*)\s*,\s*(\w*)\s*,\s*(\w*)\s*,\s*(\w*)\s*,\s*(\w*)\s*,\s*(\w*)\s*\);',code) # same as previous with 3d tiling
interchange_list = re.findall(r'(\w*)\s*.\s*interchange\s*\(\s*(\w*)\s*,\s*(\w*)\s*\);', code) #gets tuples comp0.interchange(i1, i2); --> (comp0, i1, i2);
unroll_list = re.findall(r'(\w*)\s*.\s*unroll\s*\(\s*(\w*)\s*,\s*(\d*)\s*\);', code) # gets tuples comp3.unroll(i1100, 8); --> (comp3, i1100, 8)
schedules_dict = dict()
schedule_name = re.findall(r'tiramisu::init\s*\(\s*\"(\w*)\"\s*\)', code)[0]
schedules_dict['schedule_name'] = schedule_name
#get all the computation names
computations_list = []
comp_lines = re.findall(r'\n\s*computation\s+.*;',code)
for comp_def in comp_lines: #assuming that each computation is declared in a separate line
computations_list.append(re.findall(r'(\w*)\s*\(\s*\"',comp_def)[0])
for comp in computations_list:
schedules_dict[comp] = {'interchange_dims':[], 'tiling' : {}, 'unrolling_factor' : None }
for tile_2d_tuple in tile_2d_list:
schedules_dict[tile_2d_tuple[0]]['tiling']['tiling_depth'] = 2
schedules_dict[tile_2d_tuple[0]]['tiling']['tiling_dims'] = [tile_2d_tuple[1], tile_2d_tuple[2]]
schedules_dict[tile_2d_tuple[0]]['tiling']['tiling_factors'] = [tile_2d_tuple[3], tile_2d_tuple[4]]
for tile_3d_tuple in tile_3d_list:
schedules_dict[tile_3d_tuple[0]]['tiling']['tiling_depth'] = 3
schedules_dict[tile_3d_tuple[0]]['tiling']['tiling_dims'] = [tile_3d_tuple[1], tile_3d_tuple[2], tile_3d_tuple[3]]
schedules_dict[tile_3d_tuple[0]]['tiling']['tiling_factors'] = [tile_3d_tuple[4], tile_3d_tuple[5], tile_3d_tuple[6]]
for interchange_tuple in interchange_list :
schedules_dict[interchange_tuple[0]]['interchange_dims']= [interchange_tuple[1], interchange_tuple[2]]
for unroll_tuple in unroll_list:
schedules_dict[unroll_tuple[0]]['unrolling_factor'] = unroll_tuple[2]
#Computation ordering
#get the computations iterators first
comp_lines = re.findall(r'\n\s*computation\s+.*;',code)
computation_dict=dict()
for comp_def in comp_lines: #assuming that each computation is declared in a separate line
name = re.findall(r'(\w*)\s*\(\s*\"',comp_def)[0]
comp_iterators = re.findall(r'\w+',re.findall(r'{([\w\s,]*)}',comp_def)[0]) # gets a list of iterators
computation_dict[name]=dict()
computation_dict[name]['iterators']=comp_iterators
#get the ordering seq
orderings_list = self.get_ordering_sequence(code)
unfuse_iterators=[]
if len(orderings_list)>0:# Computation ordering is defined
for i in range(1,len(orderings_list)):
fused_at = orderings_list[i][1]
for depth in range(min(len(computation_dict[orderings_list[i][0]]['iterators']),len(computation_dict[orderings_list[i-1][0]]['iterators']))):
if computation_dict[orderings_list[i][0]]['iterators'][depth]==computation_dict[orderings_list[i-1][0]]['iterators'][depth]:
deepest_shared_loop = computation_dict[orderings_list[i][0]]['iterators'][depth]
else:
break
if deepest_shared_loop!=fused_at: #the fuse iterator is not the deepest shared loop between the two computations
unfuse_iterators.append(fused_at)
schedules_dict['unfuse_iterators']=unfuse_iterators
# custructing the tree structure from ordering instruction
tree = dict()
last_path = []
if orderings_list==[] : #no ordering is defined, we assume that there is only one computation
dummy_ord_tuple = (list(computation_dict.keys())[0], '')
orderings_list.append(dummy_ord_tuple)
for comp_name,fuse_at in orderings_list:
if fuse_at == '': # first computation
fuse_at = computation_dict[comp_name]['iterators'][0]
tree['loop_name']=fuse_at+'_c'+comp_name[-1] # the tree root is the first loop, adding suffix for readability
tree['computations_list']=[]
# index_lastpath = 0
last_path=[fuse_at+'_c'+comp_name[-1]]
for index,i in enumerate(last_path):
if i.startswith(fuse_at):
index_lastpath=index
break
tree_browser = tree
index_comp_iterators = computation_dict[comp_name]['iterators'].index(fuse_at)
del last_path[index_lastpath+1:]
for itr in computation_dict[comp_name]['iterators'][index_comp_iterators+1:]: # update last_path to with the new computation iterators
last_path.append(itr+'_c'+comp_name[-1])
# print(comp_name,fuse_at,last_path,last_path[:index_lastpath+1])
for itr in last_path[:index_lastpath+1]: #browse untill the node of fuse_at
if tree_browser['loop_name'] == itr: #for the first computation, TODO: check if this intruction in necessary
continue
# print(filename, itr, tree_browser['loop_name'])
for child in tree_browser['child_list']:
if child['loop_name']==itr:
tree_browser=child
break
for itr in computation_dict[comp_name]['iterators'][index_comp_iterators+1:]: #builld the rest of the branch
new_tree = dict()
new_tree['loop_name'] = itr+'_c'+comp_name[-1]
new_tree['computations_list']=[]
# new_tree['has_comps'] = False
tree_browser['child_list'] = tree_browser.get('child_list',[])
tree_browser['child_list'].append(new_tree)
tree_browser = new_tree
# tree_browser['has_comps'] = True
tree_browser['computations_list'] = tree_browser.get('computations_list', [])
tree_browser['computations_list'].append(comp_name)
tree_browser['child_list'] = tree_browser.get('child_list',[])
schedules_dict['tree_structure'] = tree
# custructing the tree structure from ordering instruction
tree = dict()
last_path = []
for comp_name,fuse_at in orderings_list:
if fuse_at == '': # first computation
fuse_at = computation_dict[comp_name]['iterators'][0]
tree['loop_name']=fuse_at+'_c'+comp_name[-1] # the tree root is the first loop, adding suffix for readability
tree['computations_list']=[]
# index_lastpath = 0
last_path=[fuse_at+'_c'+comp_name[-1]]
for index,i in enumerate(last_path):
if i.startswith(fuse_at):
index_lastpath=index
break
tree_browser = tree
index_comp_iterators = computation_dict[comp_name]['iterators'].index(fuse_at)
del last_path[index_lastpath+1:]
for itr in computation_dict[comp_name]['iterators'][index_comp_iterators+1:]: # update last_path to with the new computation iterators
last_path.append(itr+'_c'+comp_name[-1])
# print(comp_name,fuse_at,last_path,last_path[:index_lastpath+1])
for itr in last_path[:index_lastpath+1]: #browse untill the node of fuse_at
if tree_browser['loop_name'] == itr: #for the first computation, TODO: check if this intruction in necessary
continue
# print(filename, itr, tree_browser['loop_name'])
for child in tree_browser['child_list']:
if child['loop_name']==itr:
tree_browser=child
break
for itr in computation_dict[comp_name]['iterators'][index_comp_iterators+1:]: #builld the rest of the branch
new_tree = dict()
new_tree['loop_name'] = itr+'_c'+comp_name[-1]
new_tree['computations_list']=[]
# new_tree['has_comps'] = False
tree_browser['child_list'] = tree_browser.get('child_list',[])
tree_browser['child_list'].append(new_tree)
tree_browser = new_tree
# tree_browser['has_comps'] = True
tree_browser['computations_list'] = tree_browser.get('computations_list', [])
tree_browser['computations_list'].append(comp_name)
tree_browser['child_list'] = tree_browser.get('child_list',[])
schedules_dict['tree_structure'] = tree
schedules_json = schedules_dict
sched_json_dump = json.dumps(schedules_json, indent = 4)
def format_array(array_str):
array_str = array_str.group()
array_str = array_str.replace('\n','')
array_str = array_str.replace('\t','')
array_str = array_str.replace(' ','')
array_str = array_str.replace(',',', ')
return array_str
sched_json_dump = re.sub(r'\[[\s*\w*,\"]*\]',format_array, sched_json_dump)
return sched_json_dump
def generate_json_annotations(self, programs_folder):
program_names = listdir(programs_folder)
program_names = sorted(filter(lambda x:not (x.endswith('.json') or x.startswith('.') or x.startswith('-') or x.startswith('_')), program_names))
for program in tqdm(program_names):
function_annotation = self.get_function_annotation(programs_folder + '/' + program + '/' + program + '_no_schedule' + '/' + program + '_no_schedule.cpp')
with open(programs_folder + '/' + program + '/' + program + '_fusion_v3.json', 'w') as file:
file.write(function_annotation)
schedule_names = sorted(filter(lambda x:not (x.endswith('.json') or x.startswith('.') or x.startswith('-') or x.startswith('_')), listdir(programs_folder + '/' + program)))
for schedule in schedule_names:
schedule_annotation = self.get_schedule_annotation(programs_folder + '/' + program + '/' + schedule + '/' + schedule +'.cpp')
with open(programs_folder + '/' + program + '/' + schedule + '/' + schedule + '_fusion_v3.json', 'w') as file:
file.write(schedule_annotation)
def generate_json_annotations_parallel(self, programs_folder,nb_threads=24): # same as previous but parallelized
program_names = listdir(programs_folder)
program_names = sorted(filter(lambda x:not (x.endswith('.json') or x.startswith('.') or x.startswith('-') or x.startswith('_')), program_names))
programs_folder_list = [programs_folder]*len(program_names)
args_tuples = list(zip(programs_folder_list,program_names))
with Pool(nb_threads) as pool:
pool.map(self.write_annots_parallel, args_tuples)
def write_annots_parallel(self, args_tuple):
programs_folder, program = args_tuple
function_annotation = self.get_function_annotation(programs_folder + '/' + program + '/' + program + '_no_schedule' + '/' + program + '_no_schedule.cpp')
with open(programs_folder + '/' + program + '/' + program + '_fusion_v3.json', 'w') as file:
file.write(function_annotation)
schedule_names = sorted(filter(lambda x:not (x.endswith('.json') or x.startswith('.') or x.startswith('-') or x.startswith('_')), listdir(programs_folder + '/' + program)))
for schedule in schedule_names:
schedule_annotation = self.get_schedule_annotation(programs_folder + '/' + program + '/' + schedule + '/' + schedule +'.cpp')
with open(programs_folder + '/' + program + '/' + schedule + '/' + schedule + '_fusion_v3.json', 'w') as file:
file.write(schedule_annotation)
class dataset_utilities():
def __init__(self):
pass
# Path to the files generated by the execution jobs
# example src_path = "/data/scratch/mmerouani/time_measurement/results_batch11001-11500/parts/"
def post_process_exec_times(self, partial_exec_times_folder):
"""
Fuse the files generated by the execution jobs to a single file.
The final file will be a dictionary with this format:
{
'func_name1':{
'sched_name1': {
'exec_time':<>
'speedup':<>
}
'sched_name2': {
'exec_time':<>
'speedup':<>
}
...
}
'func_name2':{
'sched_name1': {
'exec_time':<>
'speedup':<>
}
'sched_name2': {
'exec_time':<>
'speedup':<>
}
...
}
...
}
"""
src_path = Path(partial_exec_times_folder)
final_exec_times = []
# Fuse all execution times to a single list
for file_path in src_path.iterdir():
if file_path.name.startswith("final_exec_times"):
with open(file_path, "rb") as f:
final_exec_times.extend(pickle.load(f))
print(f'nb schedules {len(final_exec_times)}')
# Compute the medians
final_exec_times_median = []
for x in final_exec_times:
func_id, sched_id, e = x
final_exec_times_median.append((func_id, sched_id, e, np.median(e)))
# Compute the speedups
ref_progs = dict()
for x in final_exec_times_median:
func_id, sched_id, _, median = x
if sched_id.endswith("no_schedule"):
if (func_id in ref_progs):
print("duplicate found, taking non zero "+ str((func_id, ref_progs[func_id],median )))
if (median==0):
continue # if zero keep the old value
ref_progs[func_id] = median
final_exec_times_median_speedup = []
for x in final_exec_times_median:
func_id, sched_id, e, median = x
if (median == 0):
speedup = np.NaN
else:
if not (func_id in ref_progs):
print("error ref_prog", func_id," not found")
continue
speedup = float(ref_progs[func_id] / median)
final_exec_times_median_speedup.append((func_id, sched_id, e, median, speedup))
# # Save results
# with open(dst_path, "wb") as f:
# pickle.dump(final_exec_times_median_speedup, f)
# Transform to dict
final_exec_times_median_speedup = sorted(final_exec_times_median_speedup, key=lambda x: x[1])
programs = dict()
for l in final_exec_times_median_speedup:
programs[l[0]]=programs.get(l[0], dict())
if (l[1] in programs[l[0]]): #duplicate found
if (l[3]==0): # if the new is zero, dont change the value
print("duplicate found, taking the oldest"+str((l[1], programs[l[0]][l[1]]['exec_time'],l[3])))
continue
else:
print("duplicate found, taking the latest"+str((l[1], programs[l[0]][l[1]]['exec_time'],l[3])))
programs[l[0]][l[1]]=dict()
programs[l[0]][l[1]]['exec_time']=l[3]/1000
programs[l[0]][l[1]]['speedup']=l[4]
return programs
def merge_datasets(self, datasets_file_list, result_dataset_file):
'''
merges multiple dataset file into a single one
eg:
merge_datasets(['/data/scratch/mmerouani/processed_datasets/dataset_batch250000-254999.pkl',
'/data/scratch/mmerouani/processed_datasets/dataset_batch255000-259999.pkl',
'/data/scratch/mmerouani/processed_datasets/dataset_batch260000-264999.pkl'],
result_dataset_file='/data/scratch/mmerouani/processed_datasets/dataset_batch250000-264999.pkl')
'''
merged_programs_dict = dict()
for dataset_filename in tqdm(datasets_file_list):
f = open(dataset_filename, 'rb')
programs_dict=pickle.load(f)
f.close()
merged_programs_dict.update(programs_dict)
f = open(result_dataset_file, 'wb')
pickle.dump(merged_programs_dict, f)
f.close()
def save_pkl_dataset(self, programs_folder, partial_exec_times_folder, output_filename):
'''
Creates and dumps a dataset as dictionary with this format
programs_dict={
'func_name1':{
'json':<function annotation file content>
'schedules':{
'schedule_name1':{
'json':<schedule annotation file content>
'exec_time': <value>
'speedup': <value>
}
'schedule_name2':{
'json':<schedule annotation file content>
'exec_time': <value>
'speedup': <value>
}
...
}
}
'func_name2':{
'json':<function annotation file content>
'schedules':{
'schedule_name1':{
'json':<schedule annotation file content>
'exec_time': <value>
'speedup': <value>
}
'schedule_name2':{
'json':<schedule annotation file content>
'exec_time': <value>
'speedup': <value>
}
...
}
}
...
}
'''
exec_dict= self.post_process_exec_times(partial_exec_times_folder)
program_names = listdir(programs_folder)
program_names = sorted(filter(lambda x:not (x.startswith('.') or x.startswith('-') or x.startswith('_')),program_names))
programs_dict = dict()
for program in tqdm(program_names):
programs_dict[program] = dict()
programs_dict[program]['json'] = json.load(open(programs_folder + '/' + program + '/' + program + '_fusion_v3.json', 'r'))
schedule_names = sorted(filter(lambda x:not (x.endswith('.json') or x.startswith('.') or x.startswith('-') or x.startswith('_')), listdir(programs_folder + '/' + program)))
programs_dict[program]['schedules'] = dict()
if not (program in exec_dict):
print('error ',program,' not found in exec_dict')
continue
for schedule in schedule_names:
if not (schedule in exec_dict[program]):
print ("error schedules ",schedule," not found in exec_dict")
continue
programs_dict[program]['schedules'][schedule] = dict()
# print(schedule)
programs_dict[program]['schedules'][schedule]['json'] = json.load(open(programs_folder + '/' + program +
'/' + schedule + '/' + schedule + '_fusion_v3.json', 'r'))
programs_dict[program]['schedules'][schedule]['exec_time'] = exec_dict[program][schedule]['exec_time']
programs_dict[program]['schedules'][schedule]['speedup'] = exec_dict[program][schedule]['speedup']
f = open(output_filename, 'wb')
pickle.dump(programs_dict, f)
f.close()
def get_dataset_df(self, dataset_filename):
dataset_name=dataset_filename
f = open(dataset_filename, 'rb')
programs_dict=pickle.load(f)
f.close()
Y = []
program_names = []
schedule_names = []
exec_time = []
nb_nan=0
for function_name in tqdm(programs_dict):
program_json = programs_dict[function_name]['json']
for schedule_name in programs_dict[function_name]['schedules']:
if ((np.isnan(programs_dict[function_name]['schedules'][schedule_name]['speedup']))
or(programs_dict[function_name]['schedules'][schedule_name]['speedup']==0)): #Nan value means the schedule didn't run, zero values means exec time<1 micro-second, skip them
nb_nan+=1
continue
schedule_json = programs_dict[function_name]['schedules'][schedule_name]['json']
Y.append(programs_dict[function_name]['schedules'][schedule_name]['speedup'])
program_names.append(function_name)
schedule_names.append(schedule_name)
exec_time.append(programs_dict[function_name]['schedules'][schedule_name]['exec_time'])
print('Dataset location: ',dataset_filename)
print(f'Number of schedules {len(Y)}')
df = pd.DataFrame(data=list(zip(program_names,schedule_names,exec_time,Y)),
columns=['function', 'schedule', 'exec_time', 'speedup'])
print("Schedules that didn't run: {:.2f}%".format(nb_nan/len(Y)*100) )
print('Speedups >1 :{:.2f}%'.format(len(df['speedup'][df['speedup']>1])/len(df['speedup'])*100))
print('Speedups >2 :{:.2f}%'.format(len(df['speedup'][df['speedup']>2])/len(df['speedup'])*100))
print('Speedups <0.1 :{:.2f}%'.format(len(df['speedup'][df['speedup']<0.1])/len(df['speedup'])*100))
print('Speedups 0.9<s<1.1 :{:.2f}%'.format((len(df['speedup'][df['speedup']<1.1])-len(df['speedup'][df['speedup']<0.9]))/len(df['speedup'])*100))
print('Mean speedup: {:.2f}'.format( df['speedup'].mean()))
print('Median speedup: {:.2f}'.format( np.median(df['speedup'])))
print('Max speedup: {:.2f}'.format(df['speedup'].max()))
print('Min speedup: {:.3f}'.format(df['speedup'].min()))
print('Speedup variance : {:.3f}'.format((df['speedup']).var()))
print('Mean execution time: {:.3f}s'.format(df.exec_time.mean()/1000))
print('Max execution time: {:.3f}s'.format(df.exec_time.max()/1000))
return df |
<filename>plugins/modules/oci_opsi_summarize_exadata_insight_resource_forecast_trend_facts.py
#!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_opsi_summarize_exadata_insight_resource_forecast_trend_facts
short_description: Fetches details about one or multiple SummarizeExadataInsightResourceForecastTrend resources in Oracle Cloud Infrastructure
description:
- Fetches details about one or multiple SummarizeExadataInsightResourceForecastTrend resources in Oracle Cloud Infrastructure
- Get historical usage and forecast predictions for an exadata system with breakdown by databases, hosts or storage servers.
Additionally resources can be filtered using databaseInsightId, hostInsightId or storageServerName query parameters.
Top five resources are returned if total exceeds the limit specified.
Valid values for ResourceType DATABASE are CPU,MEMORY,IO and STORAGE. Database name is returned in name field. DatabaseInsightId , cdbName and hostName
query parameter applies to ResourceType DATABASE.
Valid values for ResourceType HOST are CPU and MEMORY. HostName s returned in name field. HostInsightId and hostName query parameter applies to
ResourceType HOST.
Valid values for ResourceType STORAGE_SERVER are STORAGE, IOPS and THROUGHPUT. Storage server name is returned in name field for resourceMetric IOPS and
THROUGHPUT
and asmName is returned in name field for resourceMetric STORAGE. StorageServerName query parameter applies to ResourceType STORAGE_SERVER.
Valid value for ResourceType DISKGROUP is STORAGE. Comma delimited (asmName,diskgroupName) is returned in name field.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
resource_type:
description:
- Filter by resource.
Supported values are HOST , STORAGE_SERVER and DATABASE
type: str
required: true
resource_metric:
description:
- Filter by resource metric.
Supported values are CPU , STORAGE, MEMORY, IO, IOPS, THROUGHPUT
type: str
required: true
exadata_insight_id:
description:
- L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of exadata insight resource.
type: str
required: true
analysis_time_interval:
description:
- Specify time period in ISO 8601 format with respect to current time.
Default is last 30 days represented by P30D.
If timeInterval is specified, then timeIntervalStart and timeIntervalEnd will be ignored.
Examples P90D (last 90 days), P4W (last 4 weeks), P2M (last 2 months), P1Y (last 12 months), . Maximum value allowed is 25 months prior to
current time (P25M).
type: str
time_interval_start:
description:
- Analysis start time in UTC in ISO 8601 format(inclusive).
Example 2019-10-30T00:00:00Z (yyyy-MM-ddThh:mm:ssZ).
The minimum allowed value is 2 years prior to the current day.
timeIntervalStart and timeIntervalEnd parameters are used together.
If analysisTimeInterval is specified, this parameter is ignored.
type: str
time_interval_end:
description:
- Analysis end time in UTC in ISO 8601 format(exclusive).
Example 2019-10-30T00:00:00Z (yyyy-MM-ddThh:mm:ssZ).
timeIntervalStart and timeIntervalEnd are used together.
If timeIntervalEnd is not specified, current time is used as timeIntervalEnd.
type: str
database_insight_id:
description:
- Optional list of database insight resource L(OCIDs,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm).
type: list
elements: str
host_insight_id:
description:
- Optional list of host insight resource L(OCIDs,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm).
type: list
elements: str
storage_server_name:
description:
- Optional storage server name on an exadata system.
type: list
elements: str
exadata_type:
description:
- Filter by one or more Exadata types.
Possible value are DBMACHINE, EXACS, and EXACC.
type: list
elements: str
statistic:
description:
- Choose the type of statistic metric data to be used for forecasting.
type: str
choices:
- "AVG"
- "MAX"
forecast_start_day:
description:
- Number of days used for utilization forecast analysis.
type: int
forecast_days:
description:
- Number of days used for utilization forecast analysis.
type: int
forecast_model:
description:
- "Choose algorithm model for the forecasting.
Possible values:
- LINEAR: Uses linear regression algorithm for forecasting.
- ML_AUTO: Automatically detects best algorithm to use for forecasting.
- ML_NO_AUTO: Automatically detects seasonality of the data for forecasting using linear or seasonal algorithm."
type: str
choices:
- "LINEAR"
- "ML_AUTO"
- "ML_NO_AUTO"
cdb_name:
description:
- Filter by one or more cdb name.
type: list
elements: str
host_name:
description:
- Filter by hostname.
type: list
elements: str
confidence:
description:
- This parameter is used to change data's confidence level, this data is ingested by the
forecast algorithm.
Confidence is the probability of an interval to contain the expected population parameter.
Manipulation of this value will lead to different results.
If not set, default confidence value is 95%.
type: int
sort_order:
description:
- The sort order to use, either ascending (`ASC`) or descending (`DESC`).
type: str
choices:
- "ASC"
- "DESC"
sort_by:
description:
- The order in which resource Forecast trend records are listed
type: str
choices:
- "id"
- "name"
- "daysToReachCapacity"
extends_documentation_fragment: [ oracle.oci.oracle, oracle.oci.oracle_name_option ]
"""
EXAMPLES = """
- name: List summarize_exadata_insight_resource_forecast_trends
oci_opsi_summarize_exadata_insight_resource_forecast_trend_facts:
# required
resource_type: resource_type_example
resource_metric: resource_metric_example
exadata_insight_id: "ocid1.exadatainsight.oc1..xxxxxxEXAMPLExxxxxx"
# optional
analysis_time_interval: analysis_time_interval_example
time_interval_start: 2013-10-20T19:20:30+01:00
time_interval_end: 2013-10-20T19:20:30+01:00
database_insight_id: [ "$p.getValue()" ]
host_insight_id: [ "$p.getValue()" ]
storage_server_name: [ "$p.getValue()" ]
exadata_type: [ "$p.getValue()" ]
statistic: AVG
forecast_start_day: 56
forecast_days: 56
forecast_model: LINEAR
cdb_name: [ "$p.getValue()" ]
host_name: [ "$p.getValue()" ]
confidence: 56
sort_order: ASC
sort_by: id
"""
RETURN = """
summarize_exadata_insight_resource_forecast_trends:
description:
- List of SummarizeExadataInsightResourceForecastTrend resources
returned: on success
type: complex
contains:
id:
description:
- The L(OCID,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the database insight resource.
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
name:
description:
- The name of the resource.
returned: on success
type: str
sample: name_example
days_to_reach_capacity:
description:
- Days to reach capacity for a storage server
returned: on success
type: int
sample: 56
pattern:
description:
- Time series patterns used in the forecasting.
returned: on success
type: str
sample: LINEAR
historical_data:
description:
- Time series data used for the forecast analysis.
returned: on success
type: complex
contains:
end_timestamp:
description:
- The timestamp in which the current sampling period ends in RFC 3339 format.
returned: on success
type: str
sample: "2020-05-01T00:00:00.000Z"
usage:
description:
- Total amount used of the resource metric type (CPU, STORAGE).
returned: on success
type: float
sample: 34.5
projected_data:
description:
- Time series data result of the forecasting analysis.
returned: on success
type: complex
contains:
end_timestamp:
description:
- The timestamp in which the current sampling period ends in RFC 3339 format.
returned: on success
type: str
sample: "2020-05-01T00:00:00.000Z"
usage:
description:
- Total amount used of the resource metric type (CPU, STORAGE).
returned: on success
type: float
sample: 34.5
high_value:
description:
- Upper uncertainty bound of the current usage value.
returned: on success
type: float
sample: 1.2
low_value:
description:
- Lower uncertainty bound of the current usage value.
returned: on success
type: float
sample: 1.2
sample: [{
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"name": "name_example",
"days_to_reach_capacity": 56,
"pattern": "LINEAR",
"historical_data": [{
"end_timestamp": "2020-05-01T00:00:00.000Z",
"usage": 34.5
}],
"projected_data": [{
"end_timestamp": "2020-05-01T00:00:00.000Z",
"usage": 34.5,
"high_value": 1.2,
"low_value": 1.2
}]
}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.opsi import OperationsInsightsClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class SummarizeExadataInsightResourceForecastTrendFactsHelperGen(
OCIResourceFactsHelperBase
):
"""Supported operations: list"""
def get_required_params_for_list(self):
return [
"resource_type",
"resource_metric",
"exadata_insight_id",
]
def list_resources(self):
optional_list_method_params = [
"analysis_time_interval",
"time_interval_start",
"time_interval_end",
"database_insight_id",
"host_insight_id",
"storage_server_name",
"exadata_type",
"statistic",
"forecast_start_day",
"forecast_days",
"forecast_model",
"cdb_name",
"host_name",
"confidence",
"sort_order",
"sort_by",
"name",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.list_all_resources(
self.client.summarize_exadata_insight_resource_forecast_trend,
resource_type=self.module.params.get("resource_type"),
resource_metric=self.module.params.get("resource_metric"),
exadata_insight_id=self.module.params.get("exadata_insight_id"),
**optional_kwargs
)
SummarizeExadataInsightResourceForecastTrendFactsHelperCustom = get_custom_class(
"SummarizeExadataInsightResourceForecastTrendFactsHelperCustom"
)
class ResourceFactsHelper(
SummarizeExadataInsightResourceForecastTrendFactsHelperCustom,
SummarizeExadataInsightResourceForecastTrendFactsHelperGen,
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(
resource_type=dict(type="str", required=True),
resource_metric=dict(type="str", required=True),
exadata_insight_id=dict(type="str", required=True),
analysis_time_interval=dict(type="str"),
time_interval_start=dict(type="str"),
time_interval_end=dict(type="str"),
database_insight_id=dict(type="list", elements="str"),
host_insight_id=dict(type="list", elements="str"),
storage_server_name=dict(type="list", elements="str"),
exadata_type=dict(type="list", elements="str"),
statistic=dict(type="str", choices=["AVG", "MAX"]),
forecast_start_day=dict(type="int"),
forecast_days=dict(type="int"),
forecast_model=dict(
type="str", choices=["LINEAR", "ML_AUTO", "ML_NO_AUTO"]
),
cdb_name=dict(type="list", elements="str"),
host_name=dict(type="list", elements="str"),
confidence=dict(type="int"),
sort_order=dict(type="str", choices=["ASC", "DESC"]),
sort_by=dict(type="str", choices=["id", "name", "daysToReachCapacity"]),
name=dict(type="str"),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="summarize_exadata_insight_resource_forecast_trend",
service_client_class=OperationsInsightsClient,
namespace="opsi",
)
result = []
if resource_facts_helper.is_list():
result = resource_facts_helper.list()
else:
resource_facts_helper.fail()
module.exit_json(summarize_exadata_insight_resource_forecast_trends=result)
if __name__ == "__main__":
main()
|
<reponame>ChuckCottrill/hack-rank
abs(num)
any(iterable)
all(iterable)
ascii(object)
bin(num)
bool(value)
bytearray() # bytearray(source, encoding, errors), mutable
bytes() # bytes(source, encoding, errors), immutable
callable(object)
chr(num) # num in [0,1_114_111], returns unicode codepoint
compile() # compile(source, filename, mode, flags=0, dont_inherit=False, optimize=-1)
complex() # complex([real[, imag]])
hasattr() # hasattr(object, name)
getattr() # getattr(object, name[, default])
delattr() # delattr(object, name)
setattr() # setattr(object, name, value)
dict() # class dict(**kwarg)
# class dict(mapping, **kwarg)
# class dict(iterable, **kwarg)
list() # list([iterable])
set() # set(iterable)
tuple() # tuple(iterable)
dir() # dir([object])
divmod() # divmod(x,y)
enumerate() # enumerate(iterable, start=0)
eval() # eval(expression, globals=None, locals=None)
exec() # exec(object, globals, locals)
float() # float([x])
format() # format(value[, format_spec])
# see: https://www.programiz.com/python-programming/methods/built-in/format
frozenset() # frozenset([iterable])
help() # help(object)
hex() # hex(x)
oct() # oct(x)
ord() # ord(ch)
hash() # hash(object)
input() # input([prompt])
id() # id(object)
isinstance() # isinstance(object, classinfo)
int() # int(x=0, base=10)
issubclass() # issubclass(class, classinfo)
iter() # iter(object, sentinel)
len() # len(s)
# to find the largest item in an iterable
max() # max(iterable, *iterables, key, default)
# to find the largest item between two or more objects
max() # max(arg1, arg2, *args, key)
# to find the smallest item in an iterable
min() # min(iterable, *iterables, key, default)
# to find the smallest item between two or more objects
min() # min(arg1, arg2, *args, key)S
filter() # filter(function, iterable)
map() # map(function, iterable, ...)
next() # next(iterator, default)
memoryview() # memoryview(object)
object() # object()
open() # open(file, mode='r', buffering=-1, encoding=None, errors=None, newline=None, closefd=True, opener=None)
pow() # pow(x, y, z)
print() # print(*objects, sep=' ', end='\n', file=sys.stdout, flush=False)
property() # property(fget=None, fset=None, fdel=None, doc=None)
range() # range(stop)
# range(start, stop[, step])
repr() # repr(obj)
reversed() # reversed(seq)
round() # round(number, ndigits)
slice() # slice(start, stop, step)
sorted() # sorted(iterable, key=None, reverse=False)
# sorted(iterable, key=len)
str() # str(object, encoding='utf-8', errors='strict')
sum() # sum(iterable, start)
type() # type(object)
# type(name, bases, dict)
vars() # vars(object)
zip() # zip(*iterables)
super() # super()
__import__() # __import__(name, globals=None, locals=None, fromlist=(), level=0)
globals() # globals() dictionary of global symbol table
locals() # locals() dictionary of global symbol table
classmethod() # classmethod(function) (unpythonic)
@classmethod
def func(cls, args...)
# see: https://www.programiz.com/python-programming/methods/built-in/classmethod
staticmethod() # staticmethod(function)
@staticmethod
def func(args, ...)
# see: https://www.programiz.com/python-programming/methods/built-in/staticmethod
|
import html
import os
import sys
import threading
import flask
from flask import Flask, request
from flask_socketio import SocketIO, emit, join_room
from lyrics_launcher import run_launcher
app = Flask('lyrics')
socketio = SocketIO(app)
if hasattr(sys, '_MEIPASS'):
root_dir = sys._MEIPASS
else:
root_dir = os.path.dirname(os.path.abspath(__file__))
def get_data_file(rel):
return os.path.join(root_dir, 'data', rel)
cached_lyrics = {}
lyrics_mutex = threading.Lock()
def get_lyrics(file):
file = str(file)
if file not in cached_lyrics:
physical_file = os.path.join('.', 'config', 'lyrics', file + '.txt')
if os.path.isfile(physical_file):
lyrics = ['']
with open(physical_file) as fp:
for line in fp:
if not line.strip() and lyrics[-1]:
lyrics.append('')
else:
lyrics[-1] += html.escape(line).replace('\n', '<br>')
with lyrics_mutex:
cached_lyrics[file] = lyrics
else:
return []
with lyrics_mutex:
lyric_list = cached_lyrics[file]
return lyric_list
def get_lyric(file, paragraph):
return get_lyrics(file)[paragraph]
def clear_lyrics_cache():
with lyrics_mutex:
cached_lyrics = {}
@socketio.on('join controller')
def on_join_controller():
join_room('controller')
@socketio.on('join')
def on_join_controller():
join_room('client')
@socketio.on('set server lyrics')
def on_set_lyrics(json):
file = json['file']
paragraph = json['paragraph']
text = get_lyric(file, paragraph)
emit('set client lyrics', text, room='client')
@socketio.on('clear lyrics')
def on_set_lyrics():
emit('set client lyrics', '', room='client')
@socketio.on('set server style')
def on_set_lyrics(file):
emit('set client style', file, room='client')
@app.route('/list-files')
def list_files():
return flask.jsonify([html.escape(os.path.splitext(f)[0])
for f in os.listdir('./config/lyrics')])
@app.route('/list-styles')
def list_styles():
return flask.jsonify([html.escape(f)
for f in os.listdir('./config/styles')])
@app.route('/get-file', methods=['GET', 'POST'])
def get_file():
file = request.args.get('file')
return flask.jsonify(get_lyrics(file))
def ensure_dirs():
if not os.path.exists('./config/styles'):
import shutil
os.makedirs('./config/styles')
shutil.copy(get_data_file('default.css'), './config/styles/default.css')
shutil.copy(get_data_file('bottom.css'), './config/styles/bottom.css')
if not os.path.exists('./config/lyrics'):
os.makedirs('./config/lyrics')
if not os.path.exists('./config/backgrounds'):
os.makedirs('./config/backgrounds')
@app.route('/styles/<fname>.css')
def stylesheet(fname):
path = './config/styles/%s.css' % fname
if os.path.isfile(path):
return flask.send_file(path)
return ''
@app.route('/')
def client():
return flask.send_file(get_data_file('client.html'))
@app.route('/controller')
def controller():
return flask.send_file(get_data_file('controller.html'))
def main(have_launcher=True):
def bind_addr_type(value:str):
if ':' in value:
res = value.rsplit(':', 1)
else:
res = value, 59742
return res[0], int(res[1])
import argparse
parser = argparse.ArgumentParser(description='Renders lyrics on screen')
parser.add_argument('bind_addr', metavar='HOST:PORT', nargs='?',
default='127.0.0.1:59742', type=bind_addr_type)
parser.add_argument('--no-launcher', dest='have_launcher', action='store_false')
args = parser.parse_args()
ensure_dirs()
if args.have_launcher:
launcher_thread = threading.Thread(target=run_launcher,
kwargs={
'interrupt_main': True,
'clear_lyrics_cache': clear_lyrics_cache,
},
daemon=True)
launcher_thread.start()
socketio.run(app, host=args.bind_addr[0], port=args.bind_addr[1])
if __name__ == '__main__':
main('--no-launcher' not in sys.argv)
|
<reponame>MrSometimeswinmid/DoAn
from team_config import *
from PyQt5.QtWidgets import QWidget, QComboBox, QPushButton, QListWidget, QApplication, QDialog
from PyQt5 import uic
from class_convertType import ConvertThisQObject
import sys
import json
class FillMajorData:
def __init__(self, file):
self.file = file
with open(file, 'r', encoding='utf-8') as f:
self.data = json.load(f)
def getListMajor(self) -> list:
listMajor = []
for dict in self.data:
major = dict['major']
listMajor.append(major)
return listMajor
def getListSemesterInfo(self, index) -> list:
major = self.data[index]
semesterInfo = major['semester_info']
listSemesterInfo = []
for semester in semesterInfo:
name = semester['semester']
listSemesterInfo.append(name)
return listSemesterInfo
def getSubject(self, indexMajor, indexSemester) -> list:
major = self.data[indexMajor]
listSubject = major['semester_info'][indexSemester]['subjects']
listSubjectInfo = []
for subject in listSubject:
subjectInfo = subject['id'] +': '+ subject['name']
listSubjectInfo.append(subjectInfo)
return listSubjectInfo
class PredictSubject(QDialog):
def __init__(self, jsonPath) -> None:
super(QWidget, self).__init__()
self.jsonPath = jsonPath
self.filldata = FillMajorData(jsonPath)
uic.loadUi("guis/predict_subject.ui", self)
#uic.loadUi(team_config.FOLDER_UI+"/"+team_config.PREDICT_SUBJECT, self)
self.button_find = ConvertThisQObject(self, QPushButton,'pushButtonFind').toQPushButton()
self.button_sort = ConvertThisQObject(self, QPushButton,'pushButtonSort').toQPushButton()
self.comboBox_major = ConvertThisQObject(self, QComboBox, 'comboBoxMajor').toQComboBox()
self.comboBox_semester = ConvertThisQObject(self, QComboBox, 'comboBoxSemester').toQComboBox()
self.listWidget_listSubject = ConvertThisQObject(self, QListWidget, 'listWidget_listSubject').toQListWidget()
self.connectSignal()
self.fillDataToMajorComboBox()
def connectSignal(self):
self.button_find.clicked.connect(self.fillSubjectToListWidget)
self.comboBox_major.currentIndexChanged.connect(self.fillDataToSemesterCombobox)
def fillDataToMajorComboBox(self):
print('major',self.comboBox_major.currentIndex)
data = self.filldata.getListMajor()
self.comboBox_major.addItems(data)
def fillDataToSemesterCombobox(self, index):
print('major',index, 'semester',self.comboBox_semester.currentIndex)
self.comboBox_semester.clear()
data = self.filldata.getListSemesterInfo(index)
self.comboBox_semester.addItems(data)
def fillSubjectToListWidget(self):
self.listWidget_listSubject.clear()
indexMajor = self.comboBox_major.currentIndex()
indexSemester = self.comboBox_semester.currentIndex()
data = self.filldata.getSubject(indexMajor, indexSemester)
self.listWidget_listSubject.addItems(data)
'''class AutoSortingClass:
def __init__(self, file):
self.file = file
with open(file, 'r', encoding='utf-8') as f:
self.data = json.load(f)
def getListMajor(self) -> list:
listMajor = []
for dict in self.data:
major = dict['major']
listMajor.append(major)
print(listMajor)
return listMajor
def getListSemesterInfo(self, index) -> list:
major = self.data[index]
semesterInfo = major['semester_info']
listSemesterInfo = []
for semester in semesterInfo:
name = semester['semester']
listSemesterInfo.append(name)
print(listSemesterInfo)
return listSemesterInfo
def getSubjectID(self, indexMajor, indexSemester) -> list:
major = self.data[indexMajor]
listSubject = major['semester_info'][indexSemester]['subjects']
listSubjectID = []
for subject in listSubject:
listSubjectID.append(subject['id'])
print(listSubjectID)
return listSubjectID
'''
app = QApplication(sys.argv)
window = PredictSubject('semester_info.json')
window.show()
sys.exit(app.exec_())
|
<filename>freeze_model.py<gh_stars>10-100
import argparse
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_util
from tensorflow.python.tools.optimize_for_inference_lib import optimize_for_inference as opt_inference
from tensorflow.python.training import saver as saver_lib
import data_util
# modified from https://blog.metaflow.fr/tensorflow-how-to-freeze-a-model-and-serve-it-with-a-python-api-d4f3596b3adc
def freeze_graph(layer, unit, input_names, output_names, accuracy):
frozen_model_path = "data/{}layer{}unit.pb".format(layer, unit)
checkpoint_file = "data/{}layer{}unit.ckpt".format(layer, unit)
if not saver_lib.checkpoint_exists(checkpoint_file):
print("Checkpoint file '" + checkpoint_file + "' doesn't exist!")
exit(-1)
print("begin loading model")
saver = tf.train.import_meta_graph(checkpoint_file + '.meta', clear_devices=True)
# We retrieve the protobuf graph definition
graph = tf.get_default_graph()
input_graph_def = graph.as_graph_def()
with tf.Session() as sess:
saver.restore(sess, checkpoint_file)
print("model loaded")
# export network weights and biases to text files
if not __debug__:
output_nodes = "w_in,b_in,w_out,b_out"
rnn_nodes = [",rnn/multi_rnn_cell/cell_{}/basic_lstm_cell/weights," \
"rnn/multi_rnn_cell/cell_{}/basic_lstm_cell/biases".format(i, i) for i in range(args.layer)]
weights = output_nodes + "".join(rnn_nodes)
for name in weights.split(","):
v = sess.run("{}:0".format(name))
var_file_name = "data/{}.csv".format(name.replace("/", "_"))
print("save {} to file: {}".format(name, var_file_name))
np.savetxt(var_file_name, v, delimiter=",")
# We use a built-in TF helper to export variables to constants
output_graph_def = graph_util.convert_variables_to_constants(
sess, # The session is used to retrieve the weights
input_graph_def, # The graph_def is used to retrieve the nodes
output_node_names=output_names.split(",") # The output node names are used to select the useful nodes
)
# optimize graph
output_graph_def = opt_inference(output_graph_def, input_names.split(","),
output_names.split(","), dtypes.float32.as_datatype_enum)
# Finally we serialize and dump the output graph to the filesystem
with tf.gfile.GFile(frozen_model_path, "wb") as f:
f.write(output_graph_def.SerializeToString())
print("frozen graph binary saved to: {}".format(frozen_model_path))
frozen_model_text_path = "data/{}layer{}unit{}.pb.txt".format(layer, unit, accuracy)
with tf.gfile.FastGFile(frozen_model_text_path, "wb") as f:
f.write(str(output_graph_def))
print("frozen graph text saved to: {}".format(frozen_model_text_path))
print("%d ops in the final graph." % len(output_graph_def.node))
def freeze_data(data_size=500, data_filename="phone_data"):
input_name = "x"
label_name = "y"
np.random.seed(0)
x, y = data_util.get_data("test")
samples = np.random.randint(0, len(y), data_size)
print("use {} data samples".format(data_size))
if not __debug__:
print("use samples: {}".format(samples))
x = x[samples]
y = np.argmax(y[samples], axis=1) + 1
print("{} shape: {}".format(input_name, x.shape))
print("{} shape: {}".format(label_name, y.shape))
print("save {} to text file at: {}".format(input_name, "data/data.{}.txt".format(input_name)))
print("save {} to text file at: {}".format(label_name, "data/data.{}.txt".format(label_name)))
np.savetxt("data/data.{}.txt".format(input_name), np.reshape(x, [data_size, np.prod(x.shape)/data_size]), '%.7e')
np.savetxt("data/data.{}.txt".format(label_name), y, '%d')
frozen_data_path = "data/data.pb"
frozen_data_text_path = "data/data.pb.txt"
input_const = tf.constant(x, dtype=tf.float32, shape=x.shape, name=input_name)
label_const = tf.constant(y, dtype=tf.int32, shape=y.shape, name=label_name)
graph = tf.get_default_graph()
with tf.Session() as sess:
sess.run(input_const)
sess.run(label_const)
with tf.gfile.GFile(frozen_data_path, "wb") as f:
f.write(graph.as_graph_def().SerializeToString())
print("frozen {} and {} to binary file at: {}".format(input_name, label_name, frozen_data_path))
with tf.gfile.FastGFile(frozen_data_text_path, "wb") as f:
f.write(str(graph.as_graph_def()))
print("frozen {} and {} to text file at: {}".format(input_name, label_name, frozen_data_text_path))
with tf.gfile.GFile(frozen_data_path, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def, input_map=None, return_elements=None,
name="", op_dict=None, producer_op_list=None)
session = tf.Session(graph=graph)
input_op = graph.get_operation_by_name(input_name).outputs[0]
label_op = graph.get_operation_by_name(label_name).outputs[0]
input_op_result = session.run(input_op)
label_op_result = session.run(label_op)
assert input_op_result.shape == x.shape
assert label_op_result.shape == y.shape
assert np.allclose(x, input_op_result)
assert np.allclose(y, label_op_result)
data_util.zip_files("model/{}.zip".format(data_filename), "data/data.*")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--layer", type=int, default=2,
help="lay size of the LSTM model")
parser.add_argument("--unit", type=int, default=64,
help="hidden unit of the LSTM model")
parser.add_argument("--input_names", type=str, default="input",
help="Input node names, comma separated")
parser.add_argument("--output_names", type=str, default="output",
help="The name of the output nodes, comma separated")
parser.add_argument("--accuracy", type=str, default="",
help="accuracy of the LSTM model")
args = parser.parse_args()
# freeze_graph(args.layer, args.unit, args.input_names, args.output_names, args.accuracy)
freeze_data()
|
<filename>utils.py
import os
import numpy as np
import cv2
import matplotlib.pyplot as plt
from preprocessing import warp_image
# output image folder
output_folder = 'output_images/'
def display_images(images, labels, fname='', path=output_folder, figsize=None, cmap=None):
assert len(images) > 0
assert len(images) == len(labels)
if figsize is not None:
plt.figure(figsize=figsize)
else:
plt.figure()
for idx in range(len(images)):
plt.subplot((len(images)//3)+1,3,idx+1)
plt.title(labels[idx])
if cmap is not None:
plt.imshow(images[idx], cmap=cmap)
else:
plt.imshow(images[idx])
if fname:
plt.savefig(os.path.join(path, fname), bbox_inches='tight')
plt.show()
def plot_lane_lines(img, left_points, left_fit, right_points, right_fit, left_fit_smooth=None, right_fit_smooth=None):
'''
highlights lane pixels and plot lane lines on the image
'''
assert len(img.shape) == 2
output = np.dstack((img, img, img)) * 255
output[left_points[1], left_points[0], :1] = 0
output[right_points[1], right_points[0], 2:] = 0
# plot lines
ploty = np.linspace(0, img.shape[0]-1, img.shape[0] )
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
cv2.polylines(output, [np.vstack((left_fitx,ploty)).astype(np.int32).T], False, (255, 0, 0), 10)
cv2.polylines(output, [np.vstack((right_fitx,ploty)).astype(np.int32).T], False, (255, 0, 0), 10)
if left_fit_smooth is not None:
left_smooth_fitx = left_fit_smooth[0]*ploty**2 + left_fit_smooth[1]*ploty + left_fit_smooth[2]
cv2.polylines(output, [np.vstack((left_smooth_fitx,ploty)).astype(np.int32).T], False, (175, 16, 204), 5)
if right_fit_smooth is not None:
right_smooth_fitx = right_fit_smooth[0]*ploty**2 + right_fit_smooth[1]*ploty + right_fit_smooth[2]
cv2.polylines(output, [np.vstack((right_smooth_fitx,ploty)).astype(np.int32).T], False, (175, 16, 204), 5)
return output.astype(np.uint8)
def draw_lanes(img, left_fit, right_fit, M_inv):
'''
draw lane lines
'''
ploty = np.linspace(0, img.shape[0]-1, img.shape[0] )
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
# Create an image to draw the lines on
color_warp = np.zeros_like(img)
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
# Warp the blank back to original image space using inverse perspective matrix (Minv)
newwarp = warp_image(color_warp, M_inv)
return newwarp
def embed_image(img1, img2, size=(240,135), offset=(50,50)):
'''
embeds a scaled image into another image
'''
output = np.copy(img1)
# scale second image
scaled = cv2.resize(img2,(size[0],size[1]))
if len(img2.shape) == 2:
scaled = np.dstack((scaled*255, scaled*255, scaled*255))
# embed image
output[offset[1]:offset[1]+scaled.shape[0], offset[0]:offset[0]+scaled.shape[1]] = scaled
return output |
<filename>RiboCode/RiboCode_onestep.py
#!/usr/bin/env python
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
# -*- coding:UTF-8 -*-
__author__ = '<NAME>'
"""
One-step script for running RiboCode program.
Usage:
RiboCode_onestep.py -g <gtf.txt> -f <genome.fa> -r <ribo_toTranscript.bam> -o <result.txt>
Details:
Type "RiboCode_onestep.py -h" to get the help information.
"""
from .prepare_transcripts import *
import argparse
def main():
"""
Master function to call different functionalities of RiboCode
"""
import sys
import os
from .parsing_opts import parsing_ribo_onestep
args = parsing_ribo_onestep()
#1. prepare transcript annotation
verboseprint("Preparing annotation files ...")
if not os.path.exists("annot"):
try:
os.mkdir("annot")
except OSError as e:
raise e
gene_dict, transcript_dict = processTranscripts(args.genomeFasta,args.gtfFile,"annot")
verboseprint("The step of preparing transcript annotation has been completed!")
#2. determine p-sites locations
from . import metaplots
args_metaplot = argparse.ArgumentParser(description="Storing the arguments for metaplots analysis")
args_metaplot.rpf_mapping_file = args.rpf_mapping_file
args_metaplot.stranded = args.stranded
args_metaplot.minLength = args.minLength
args_metaplot.maxLength = args.maxLength
args_metaplot.frame0_percent = args.frame0_percent
args_metaplot.pvalue1_cutoff = args_metaplot.pvalue2_cutoff = 0.001
args_metaplot.outname = "metaplots"
metaplots.meta_analysis(gene_dict,transcript_dict,args_metaplot)
#3 detectORF
# reading the bam file
from . import process_bam
from .loadconfig import LoadConfig
# read the config file
configIn = LoadConfig(args_metaplot.outname + "_pre_config.txt")
tpsites_sum, total_psites_number = process_bam.psites_count(configIn.configList,transcript_dict,thread_num=1)
from . import detectORF
if args.longest_orf == "yes":
longest_orf = True
else:
longest_orf = False
if args.start_codon:
START_CODON = args.start_codon.strip().split(",")
if args.alternative_start_codons:
ALTERNATIVE_START_CODON_LIST = args.alternative_start_codons.strip().split(",")
else:
ALTERNATIVE_START_CODON_LIST = None
if args.stop_codon:
STOP_CODON_LIST = args.stop_codon.strip().split(",")
output_gtf = args.output_gtf
output_bed = args.output_bed
if args.dependence_test == "none":
dependence_test = False
else:
dependence_test = args.dependence_test
stouffer_adj = args.stouffer_adj
pval_adj = args.pval_adj
detectORF.main(gene_dict=gene_dict, transcript_dict=transcript_dict, annot_dir = "annot",
tpsites_sum=tpsites_sum, total_psites_number=total_psites_number,
pval_cutoff = args.pval_cutoff, only_longest_orf=longest_orf, START_CODON=START_CODON,
ALTERNATIVE_START_CODON_LIST=ALTERNATIVE_START_CODON_LIST, STOP_CODON_LIST=STOP_CODON_LIST,
MIN_AA_LENGTH=args.min_AA_length, outname=args.output_name,
output_gtf=output_gtf, output_bed=output_bed, dependence_test=dependence_test,
stouffer_adj = stouffer_adj, pval_adj = pval_adj)
if __name__ == "__main__":
verboseprint("Detecting ORFs ...")
main()
verboseprint("Finished !")
|
<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#----------------------------------------------------------------------------------------------------------------------------------
# includes
# 2+3 compat
from __future__ import absolute_import, division, print_function, unicode_literals
# standards
import json
from os import path
# alcazar
from alcazar.husker import HuskerNotUnique, JmesPathHusker, ListHusker, ScalarHusker, TextHusker
# tests
from .plumbing import AlcazarTest
#----------------------------------------------------------------------------------------------------------------------------------
class JmesPathHuskerTest(AlcazarTest):
def setUp(self):
with open(path.join(path.dirname(__file__), 'fixtures', 'comprehensive.json'), 'rb') as file_in:
self.data = json.loads(file_in.read().decode('UTF-8'))
self.husker = JmesPathHusker(self.data)
#----------------------------------------------------------------------------------------------------------------------------------
class ComprehensiveJmesPathTests(JmesPathHuskerTest):
def test_one_simple_string(self):
self.assertEqual(
self.husker.one("string"),
"oon",
)
def test_one_simple_int(self):
self.assertEqual(
self.husker.one("int"),
1,
)
def test_one_simple_boolean(self):
self.assertFalse(
self.husker.one("boolean"),
)
def test_all_simple_string(self):
self.assertEqual(
self.husker.all("string"),
["oon"],
)
def test_all_simple_int(self):
self.assertEqual(
self.husker.all("int"),
[1],
)
def test_all_simple_boolean(self):
self.assertEqual(
self.husker.all("boolean"),
[False],
)
### list_of_ints
def test_list_of_ints(self):
result = self.husker.one("list_of_ints")
self.assertIsInstance(result, ListHusker)
self.assertEqual(result, [1, 2, 3])
def test_list_of_ints_element(self):
result = self.husker.one("list_of_ints[0]")
self.assertIsInstance(result, ScalarHusker)
self.assertEqual(result, 1)
def test_list_of_ints_getitem_external(self):
result = self.husker.one("list_of_ints")[0]
self.assertIsInstance(result, ScalarHusker)
self.assertEqual(result, 1)
def test_list_of_ints_one_star(self):
with self.assertRaises(HuskerNotUnique):
self.husker.one("list_of_ints[*]")
def test_list_of_ints_all_star(self):
self.assertEqual(
self.husker.all("list_of_ints[*]"),
[1, 2, 3]
)
### list_of_strings
def test_list_of_strings(self):
result = self.husker.one("list_of_strings")
self.assertIsInstance(result, ListHusker)
self.assertEqual(result, ["one", "too", "tree"])
def test_list_of_strings_element(self):
result = self.husker.one("list_of_strings[0]")
self.assertIsInstance(result, TextHusker)
self.assertEqual(result, "one")
def test_list_of_strings_getitem_external(self):
result = self.husker.one("list_of_strings")[0]
self.assertIsInstance(result, TextHusker)
self.assertEqual(result, "one")
def test_list_of_strings_one_star(self):
with self.assertRaises(HuskerNotUnique):
self.husker.one("list_of_strings[*]")
def test_list_of_strings_all_star(self):
self.assertEqual(
self.husker.all("list_of_strings[*]"),
["one", "too", "tree"]
)
### list_of_bools
def test_list_of_bools(self):
result = self.husker.one("list_of_bools")
self.assertIsInstance(result, ListHusker)
self.assertEqual(result, [True, False, True])
def test_list_of_bools_element(self):
result = self.husker.one("list_of_bools[1]")
self.assertIsInstance(result, ScalarHusker)
self.assertFalse(result)
def test_list_of_bools_getitem_external(self):
result = self.husker.one("list_of_bools")[1]
self.assertIsInstance(result, ScalarHusker)
self.assertFalse(result)
def test_list_of_bools_one_star(self):
with self.assertRaises(HuskerNotUnique):
self.husker.one("list_of_bools[*]")
def test_list_of_bools_all_star(self):
self.assertEqual(
self.husker.all("list_of_bools[*]"),
[True, False, True]
)
### list_of_lists_of_ints
def test_list_of_lists_of_ints(self):
result = self.husker.one("list_of_lists_of_ints")
self.assertIsInstance(result, ListHusker)
self.assertEqual(result, [[1, 2, 3], [4, 5, 6], [7, 8, 9]])
def test_list_of_lists_of_ints_element(self):
result = self.husker.one("list_of_lists_of_ints[1]")
self.assertIsInstance(result, ListHusker)
self.assertEqual(result, [4, 5, 6])
def test_list_of_lists_of_ints_getitem_external(self):
result = self.husker.one("list_of_lists_of_ints")[1]
self.assertIsInstance(result, ListHusker)
self.assertEqual(result, [4, 5, 6])
def test_list_of_lists_one_star(self):
with self.assertRaises(HuskerNotUnique):
self.husker.one("list_of_lists_of_ints[*]")
def test_list_of_lists_all_star(self):
self.assertEqual(
self.husker.all("list_of_lists_of_ints[*]"),
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
)
### list_of_dicts_of_ints
def test_list_of_dicts_of_ints(self):
result = self.husker.one("list_of_dicts_of_ints")
self.assertIsInstance(result, ListHusker)
self.assertEqual(result, [
{"a": 1, "b": 2},
{"b": 2, "c": 3},
{"c": 3, "d": 4}
])
def test_list_of_dicts_of_ints_element(self):
result = self.husker.one("list_of_dicts_of_ints[1]")
self.assertIsInstance(result, JmesPathHusker)
self.assertEqual(result, {"b": 2, "c": 3})
def test_list_of_dicts_of_ints_getitem_external(self):
result = self.husker.one("list_of_dicts_of_ints")[1]
self.assertIsInstance(result, JmesPathHusker)
self.assertEqual(result, {"b": 2, "c": 3})
def test_list_of_dicts_one_star(self):
with self.assertRaises(HuskerNotUnique):
self.husker.one("list_of_dicts_of_ints[*]")
def test_list_of_dicts_all_star(self):
self.assertEqual(
self.husker.all("list_of_dicts_of_ints[*]"),
[
{"a": 1, "b": 2},
{"b": 2, "c": 3},
{"c": 3, "d": 4}
],
)
def test_list_of_dicts_of_ints_element_property(self):
result = self.husker.one("list_of_dicts_of_ints[1].b")
self.assertIsInstance(result, ScalarHusker)
self.assertEqual(result, 2)
def test_list_of_dicts_of_ints_one_star_property(self):
result = self.husker.one("list_of_dicts_of_ints[*].d")
self.assertIsInstance(result, ScalarHusker)
self.assertEqual(result, 4)
def test_list_of_dicts_of_ints_all_star_property(self):
# NB requires `all`, returns 2 matches
result = self.husker.all("list_of_dicts_of_ints[*].b")
self.assertIsInstance(result, ListHusker)
self.assertEqual(result, [2, 2])
### dict_of_ints
def test_dict_of_ints_keys(self):
result = self.husker.all("dict_of_ints | keys(@)")
self.assertIsInstance(result, ListHusker)
self.assertEqual(sorted(result.raw), ["one", "too", "tree"])
def test_dict_of_ints_values(self):
result = self.husker.all("dict_of_ints | values(@)")
self.assertIsInstance(result, ListHusker)
self.assertEqual(sorted(result.raw), [1, 2, 3])
def test_dict_of_ints_to_array(self):
result = self.husker.all("dict_of_ints | values(@) | to_array(@)")
self.assertIsInstance(result, ListHusker)
self.assertEqual(sorted(result.raw), [1, 2, 3])
def test_dict_of_ints_sort(self):
result = self.husker.all("dict_of_ints | values(@) | sort(@)")
self.assertIsInstance(result, ListHusker)
self.assertEqual(sorted(result.raw), [1, 2, 3])
# def test_simple_string_funny_chars(self):
# self.assertEqual(
# self.husker.one("`funny chars in key: '\\\"\\`!#$%^&*()_=-+[{}];:\\|/?,<>.`"),
# "boom",
# )
#----------------------------------------------------------------------------------------------------------------------------------
|
<gh_stars>0
import json
import re
import requests
import argparse
import en_core_web_sm
from flask import jsonify, request
from nltk.tokenize import sent_tokenize
import numpy as np
import pandas as pd
from scipy import spatial
TOPK = 3
"""
This is a simple script for invoking summarization server.
It needs a running OpenNMT server with extractive model 0 and abstractive sumamrizator as 1
"""
class USPServer():
# parameters are id's of the models in available_models/conf.json
def __init__(self, extractive_id=0, abstractive_id=1, host="0.0.0.0", port=5000, server_path="translator/translate"):
self.extractive_id = extractive_id
self.abstractive_id = abstractive_id
self.nlp = en_core_web_sm.load()
self.SEPARATOR = "│" #│|
self.host = host
self.port = port
self.server_path = server_path
self.splitregex = re.compile("(?<!^)\.(?=[A-Z])")
self.review_clean_regex = re.compile('[\'\-\(\)&]')
# TO DO move these constants to config
self.HOST_USE = "0.0.0.0"
self.PORT_USE = 8501
self.PATH_USE = "v1/models/universal_encoder:predict"
#add features for abstarctive sumamrization input
def tag(self, reviews):
ret = ""
for r in reviews:
self.review_clean_regex.sub(' ', r)
doc = self.nlp(r)
word_id = -1
for word in doc:
ner = "_"
if word.ent_type_!="":
ner = word.ent_type_
word_id += 1
lemma = word.lemma_
if lemma == "-PRON-":
lemma = word.text_with_ws.strip().lower()
if lemma!="":
ret += lemma + self.SEPARATOR + word.pos_ + self.SEPARATOR + word.dep_ + self.SEPARATOR + ner + self.SEPARATOR + word.ent_iob_+" "
ret += "\n"
return ret
def _summarize_single(self, text):
extractive_tagged, extractive_original, scores, pred_vec = self._extractive_summary(text)
abstractive = []
for tagged in extractive_tagged:
abstractive.append(self._query_server(tagged, self.abstractive_id )[0] )
return abstractive, extractive_original, scores, pred_vec
def _query_server(self, text, model_id):
url = 'http://' + self.host + ":" + str(self.port) + "/" + self.server_path
data = [{"src": text, "id": model_id}]
headers = {'content-type': 'application/json', 'Accept-Charset': 'UTF-8'}
r = requests.post(url, data=json.dumps(data), headers=headers)
obj = json.loads(r.text) #[0][0]["tgt"]
ret = []
ret_src = []
ret_scores = []
if len(obj)>0 and isinstance(obj, list):
for o in obj[0]:
ret.append(o["tgt"])
ret_src.append((o["src"]))
ret_scores.append((o["pred_score"]))
else:
print("PROBLEM "+str(obj))
return ret, ret_src, ret_scores
def get_USE_vector(self, texts):
url = 'http://' + self.HOST_USE + ":" + str(self.PORT_USE) + "/" + self.PATH_USE
data = {"instances": [texts]}
headers = {'content-type': 'application/json', 'Accept-Charset': 'UTF-8'}
r = requests.post(url, data=json.dumps(data), headers=headers)
ret = json.loads(r.text)
return ret
def _extractive_summary(self, data):
distances_pred = []
predicted_vector, srcs, scores = self._query_server(data, self.extractive_id)
predicted_vector = np.array(predicted_vector)
srcs = np.array(srcs[0][0])
all_sets = data
reversed = False
ret_tagged = []
ret_untagged = []
ret_best_scores = []
for target_id in range(predicted_vector.shape[1]):
distances_pred.append([])
sent_id = 0.0
for vec in srcs:
v = np.array(vec)
dist = spatial.distance.cosine(v , predicted_vector[0,target_id])
distances_pred[target_id].append(dist ) # + sent_id/100
sent_id += 1.0
sortd = sorted(zip(distances_pred[target_id], all_sets, range(len(all_sets)) ), reverse=reversed)
index = 0
best_sents = []
best_scores= []
while len(best_sents) < TOPK:
if index >= len(sortd):
break
curr = sortd[index]
best_sents.append(curr[1])
best_scores.append(curr[0])
index += 1
ret_best_scores.append(best_scores)
local_review_string = ""
untagged = best_sents[0:TOPK]
for best in untagged[0:TOPK]:
local_review_string += self.tag([best])
ret_tagged.append(local_review_string)
ret_untagged += untagged
return ret_tagged, ret_untagged, ret_best_scores, predicted_vector
def select_best_summary(self, in_summaries, extractive_texts, pred_vec=None):
target_vec = np.array(pred_vec)
dists = []
best_dist = 10.3
best_str = ""
for s in in_summaries:
vec = np.array(self.get_USE_vector(s)["predictions"])
dist = spatial.distance.cosine(target_vec, vec)
# print(s+" "+str(dist))
dists.append((s, dist))
if dist < best_dist:
best_str = s
best_dist = dist
# dists.append((dist))
# if best_str=="":
# print(dists)
return best_str
def clean_inputs(self, inputs):
ret = []
for inp in inputs:
r = self.splitregex.split(inp)
for r2 in r:
ret.append(r2)
return ret
def summarize(self, inputs):
out_summaries = []
out_extracted = []
for input in inputs:
input = sent_tokenize(input)
summary, extracted,scores, pred_vec = self._summarize_single(input)
summary = self.select_best_summary(summary[0][0], extracted, pred_vec)
out_summaries.append(summary)
out_extracted.append(extracted)
return out_summaries, out_extracted
def extractive(self, inputs):
out_extracted = []
out_scores = []
for input in inputs:
input = sent_tokenize(input)
extracted_tagged, extracted, scores, pred_vec = self._extractive_summary(input)
out_extracted.append(extracted)
out_scores.append(scores)
return out_extracted
def read_sources(in_path):
content=[]
with open(in_path) as f:
content = f.readlines()
return content
def save_text(summaries, extracted, file_name):
if summaries is not None:
df = pd.DataFrame({"Summaries": summaries, "Extracted": extracted})
else:
df = pd.DataFrame({"Extracted": extracted})
df.to_csv(file_name, sep=';', quotechar='"', encoding='utf-8')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="USE summarization inference")
parser.add_argument("--server_ip", "-ip", type=str, default="0.0.0.0", help="Summarization server ip address")
parser.add_argument("--port", type=int, default=5000, help="Summarization server port")
parser.add_argument("--path", type=str, default="translator/translate", help="Summarization server path")
parser.add_argument("--input_file", "-i", type=str, default="data/sample/src-test-txt.txt", help="File containing input for summarization")
parser.add_argument("--output_file", "-o", type=str, default="data/sample/out.csv", help="File where the output is stored")
parser.add_argument("--type", "-t", type=str, default="e2e", choices=['extractive','e2e'], help='type of summarization. Available options are extractive and e2e . e2e stands for end-to-end summarization.')
args = parser.parse_args()
summarization_input = read_sources(args.input_file)
usp = USPServer(extractive_id=0, abstractive_id=1, host=args.server_ip, port=args.port, server_path=args.path)
if args.type=="extractive":
extracted = usp.extractive(summarization_input)
summaries = None
else:
summaries, extracted = usp.summarize(summarization_input)
save_text(summaries=summaries, extracted=extracted, file_name=args.output_file)
|
<reponame>Sajjad2117/FastAPI-OAuth2-JWT-based<filename>main.py
from datetime import timedelta, datetime
from fastapi import FastAPI, Depends, HTTPException, status
from fastapi.security import OAuth2PasswordBearer, OAuth2PasswordRequestForm
from passlib.hash import bcrypt
from tortoise import fields
from tortoise.contrib.fastapi import register_tortoise
from tortoise.contrib.pydantic import pydantic_model_creator
from tortoise.models import Model
import jwt
app = FastAPI()
JWT_SECRET = 'myjwtsecret'
class User(Model):
id = fields.IntField(pk=True)
username = fields.CharField(32, unique=True)
password_hash = fields.CharField(128)
def verify_password(self, password):
return bcrypt.verify(password, self.password_hash)
User_Pydantic = pydantic_model_creator(User, name='User')
UserIn_Pydantic = pydantic_model_creator(User, name='UserIn', exclude_readonly=True)
oauth2_scheme = OAuth2PasswordBearer(tokenUrl='token')
async def authenticate_user(username: str, password: str):
user = await User.get(username=username)
if not user:
return False
if not user.verify_password(password):
return False
return user
@app.post('/api/auth/api_key/')
async def generate_token(form_data: OAuth2PasswordRequestForm = Depends()):
user = await authenticate_user(form_data.username, form_data.password)
if not user:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail='Invalid username or password'
)
# if expires_delta:
# expire = datetime.utcnow() + expires_delta
# else:
# expire = datetime.utcnow() + timedelta(minutes=15)
#
# to_encode = {"exp": expire}
user_obj = await User_Pydantic.from_tortoise_orm(user)
token = jwt.encode(user_obj.dict(), JWT_SECRET)
return {'access_token': token, 'token_type': 'bearer'}
async def get_current_user(token: str = Depends(oauth2_scheme)):
try:
payload = jwt.decode(token, JWT_SECRET, algorithms=['HS256'])
user = await User.get(id=payload.get('id'))
except:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail='Invalid username or password'
)
return await User_Pydantic.from_tortoise_orm(user)
@app.post('/auth', response_model=User_Pydantic)
async def create_user(user: UserIn_Pydantic):
user_obj = User(username=user.username, password_hash=<PASSWORD>(user.password_hash))
await user_obj.save()
return await User_Pydantic.from_tortoise_orm(user_obj)
@app.get('/api/auth/me/', response_model=User_Pydantic)
async def get_user(user: User_Pydantic = Depends(get_current_user)):
return user
register_tortoise(
app,
db_url='sqlite://db.sqlite3',
modules={'models': ['main']},
generate_schemas=True,
add_exception_handlers=True
)
# class User(BaseModel):
# username: str
# password: str
#
# @validator('password')
# def password_alphanumeric(cls, v):
# assert v.isalnum(), 'must be alphanumeric'
# return v
#
#
# @app.post("/api/auth/api_key/")
# def api_key():
# return {"Hello": "World"}
#
#
# @app.get("/api/auth/me/")
# def read_me(item_id: int, q: Optional[str] = None):
# return {"item_id": item_id, "q": q}
#
#
# @app.put("/api/users/reset_password/")
# def reset_password(item_id: int, item: Item):
# return {"item_name": item.name, "item_id": item_id}
|
<filename>common/python_common/sql_manipulate.py
# -*- coding: utf-8 -*-
#
# python_common/sql_manipulate.py
#
# Sep/04/2017
#
# --------------------------------------------------------
import sys
import string
import datetime
from time import localtime,strftime
#
from to_utf8 import to_utf8_proc
#
# --------------------------------------------------------
def sql_to_dict_proc (cursor):
dict_aa = {}
sql_str=u"select id, name, population, date_mod from cities order by id"
cursor.execute (sql_str)
rows = cursor.fetchall ()
for row in rows:
# print (row)
if (row['id'][0] == "t"):
# if (row[0][0] == "t"):
# dd_str = "%s" % row[3]
unit_aa = {}
unit_aa['name'] = row['name']
unit_aa['population'] = row['population']
unit_aa['date_mod'] = str (row['date_mod'])
dict_aa[row['id']] = unit_aa
#
return dict_aa
#
# --------------------------------------------------------
def sql_display_proc (cursor):
print ("*** display start ***")
sql_str=u"select id, name, population, date_mod from cities order by id"
cursor.execute (sql_str)
rows = cursor.fetchall ()
for row in rows:
print (row[0],row[1],row[2],row[3])
print ("*** display end ***")
#
# --------------------------------------------------------
def sql_update_proc (cursor_aa,id_in,ipop_in):
#
sql_str = sql_update_string_gen_proc (id_in,ipop_in)
cursor_aa.execute (sql_str)
# --------------------------------------------------------
def sql_update_string_gen_proc (id_in,ipop_in):
date_mod = strftime ("%Y-%m-%d",localtime ())
ft_aa=u"update cities set population = %d, " % ipop_in
ft_bb=u"date_mod='%s' " % date_mod
ft_cc=u"where id = '%s'" % id_in
sql_str=ft_aa + ft_bb + ft_cc
print (sql_str)
#
return sql_str
# --------------------------------------------------------
def sql_delete_proc (cursor_aa,key_in):
print ("*** sql_delete_proc ***")
#
sql_str = sql_delete_string_gen_proc (key_in)
cursor_aa.execute (sql_str)
# --------------------------------------------------------
def sql_delete_string_gen_proc (key_in):
sql_str=u"delete from cities where id = '%s'" % key_in
print (sql_str)
#
return sql_str
# --------------------------------------------------------
def table_insert_proc (cursor,dict_aa):
for key in dict_aa:
unit = dict_aa[key]
try:
sql_insert_proc (cursor,key,unit['name'], \
unit['population'],unit['date_mod'])
except Exception as ee:
sys.stderr.write ("*** error *** table_insert_proc ***\n")
sys.stderr.write (str (ee) + "\n")
sys.stderr.write (key + "\n")
#
# --------------------------------------------------------
def sql_insert_proc (cursor_aa,id_in,name_in,ipop_in,date_mod_in):
# print ("*** sql_insert_proc ***")
#
ft_aa="insert into cities (id,name,population,date_mod) values ("
ft_bb ="'%s','%s',%d,'%s')" % (id_in,name_in,ipop_in,date_mod_in)
sql_str=ft_aa + ft_bb
# print (sql_str)
cursor_aa.execute (sql_str)
#
#
# --------------------------------------------------------
def create_table_proc (cursor_aa):
sql_str="create table cities (id varchar(10) primary key, name varchar(20)," \
+ " population int, date_mod date)"
#
try:
cursor_aa.execute (sql_str)
except Exception as ee:
sys.stderr.write ("*** error *** create_table_proc ***\n")
sys.stderr.write (str (ee) + "\n")
#
# --------------------------------------------------------
def drop_table_proc (cursor_aa):
sql_str=u"drop table cities"
try:
cursor_aa.execute (sql_str)
except Exception as ee:
sys.stderr.write ("*** error *** drop_table_proc ***\n")
sys.stderr.write (str (ee) + "\n")
#
# --------------------------------------------------------
|
<reponame>U8NWXD/vivarium<filename>vivarium/library/dict_utils.py
from __future__ import absolute_import, division, print_function
import collections
import copy
from functools import reduce
import operator
tuple_separator = '___'
def merge_dicts(dicts):
merge = {}
for d in dicts:
merge.update(d)
return merge
def deep_merge_check(dct, merge_dct):
'''
Recursive dict merge, which throws exceptions for conflicting values
This mutates dct - the contents of merge_dct are added to dct (which is also returned).
If you want to keep dct you could call it like deep_merge(dict(dct), merge_dct)'''
for k, v in merge_dct.items():
if (k in dct and isinstance(dct[k], dict)
and isinstance(merge_dct[k], collections.Mapping)):
try:
deep_merge_check(dct[k], merge_dct[k])
except:
raise Exception('dict merge mismatch: key "{}" has values {} AND {}'.format(k, dct[k], merge_dct[k]))
elif k in dct and (dct[k] is not merge_dct[k]):
raise Exception('dict merge mismatch: key "{}" has values {} AND {}'.format(k, dct[k], merge_dct[k]))
else:
dct[k] = merge_dct[k]
return dct
def deep_merge_combine_lists(dct, merge_dct):
'''
Recursive dict merge, combines values that are lists
This mutates dct - the contents of merge_dct are added to dct (which is also returned).
If you want to keep dct you could call it like deep_merge(dict(dct), merge_dct)'''
for k, v in merge_dct.items():
if (k in dct and isinstance(dct[k], dict)
and isinstance(merge_dct[k], collections.Mapping)):
deep_merge(dct[k], merge_dct[k])
elif k in dct and isinstance(dct[k], list) and isinstance(v, list):
dct[k].extend(v)
else:
dct[k] = merge_dct[k]
return dct
def deep_merge(dct, merge_dct):
'''
Recursive dict merge
This mutates dct - the contents of merge_dct are added to dct (which is also returned).
If you want to keep dct you could call it like deep_merge(dict(dct), merge_dct)'''
for k, v in merge_dct.items():
if (k in dct and isinstance(dct[k], dict)
and isinstance(merge_dct[k], collections.Mapping)):
deep_merge(dct[k], merge_dct[k])
else:
dct[k] = merge_dct[k]
return dct
def flatten_port_dicts(dicts):
'''
Input:
dicts (dict): embedded state dictionaries with the {'port_id': {'state_id': state_value}}
Return:
merge (dict): flattened dictionary with {'state_id_port_id': value}
'''
merge = {}
for port, states_dict in dicts.items():
for state, value in states_dict.items():
merge.update({state + '_' + port: value})
return merge
def tuplify_port_dicts(dicts):
'''
Input:
dicts (dict): embedded state dictionaries with the {'port_id': {'state_id': state_value}}
Return:
merge (dict): tuplified dictionary with {(port_id','state_id'): value}
'''
merge = {}
for port, states_dict in dicts.items():
if states_dict:
for state, value in states_dict.items():
merge.update({(port, state): value})
return merge
def flatten_timeseries(timeseries):
'''Flatten a timeseries in the style of flatten_port_dicts'''
flat = {}
for port, store_dict in timeseries.items():
if port == 'time':
flat[port] = timeseries[port]
continue
for variable_name, values in store_dict.items():
key = "{}_{}".format(port, variable_name)
flat[key] = values
return flat
def tuple_to_str_keys(dictionary):
# take a dict with tuple keys, and convert them to strings with tuple_separator as a delimiter
new_dict = copy.deepcopy(dictionary)
make_str_dict(new_dict)
return new_dict
def make_str_dict(dictionary):
# get down to the leaves first
for k, v in dictionary.items():
if isinstance(v, dict):
make_str_dict(v)
# convert tuples in lists
if isinstance(v, list):
for idx, var in enumerate(v):
if isinstance(var, tuple):
v[idx] = tuple_separator.join(var)
if isinstance(var, dict):
make_str_dict(var)
# which keys are tuples?
tuple_ks = [k for k in dictionary.keys() if isinstance(k, tuple)]
for tuple_k in tuple_ks:
str_k = tuple_separator.join(tuple_k)
dictionary[str_k] = dictionary[tuple_k]
del dictionary[tuple_k]
return dictionary
def str_to_tuple_keys(dictionary):
# take a dict with keys that have tuple_separator, and convert them to tuples
# get down to the leaves first
for k, v in dictionary.items():
if isinstance(v, dict):
str_to_tuple_keys(v)
# convert strings in lists
if isinstance(v, list):
for idx, var in enumerate(v):
if isinstance(var, str) and tuple_separator in var:
v[idx] = tuple(var.split(tuple_separator))
if isinstance(var, dict):
str_to_tuple_keys(var)
# which keys are tuples?
str_ks = [k for k in dictionary.keys() if isinstance(k, str) and tuple_separator in k]
for str_k in str_ks:
tuple_k = tuple(str_k.split(tuple_separator))
dictionary[tuple_k] = dictionary[str_k]
del dictionary[str_k]
return dictionary
def keys_list(d):
return list(d.keys())
def value_in_embedded_dict(data, timeseries={}):
'''converts data from a single time step into an embedded dictionary with lists of values'''
for key, value in data.items():
if isinstance(value, dict):
if key not in timeseries:
timeseries[key] = {}
timeseries[key] = value_in_embedded_dict(value, timeseries[key])
else:
if key not in timeseries:
timeseries[key] = []
timeseries[key].append(value)
return timeseries
def get_path_list_from_dict(dictionary):
paths_list = []
for key, value in dictionary.items():
if isinstance(value, dict):
subpaths = get_path_list_from_dict(value)
for subpath in subpaths:
path = (key,) + subpath
paths_list.append(path)
else:
path = (key,)
paths_list.append(path)
return paths_list
def get_value_from_path(dictionary, path):
return reduce(operator.getitem, path, dictionary)
def make_path_dict(embedded_dict):
'''converts embedded dict to a flat dict with path names as keys'''
path_dict = {}
paths_list = get_path_list_from_dict(embedded_dict)
for path in paths_list:
path_dict[path] = get_value_from_path(embedded_dict, path)
return path_dict
|
<reponame>microsoft/ATAC
import os
import time
from collections import namedtuple
from dowel import logger, tabular
import numpy as np
from garage.trainer import Trainer as garageTrainer
from garage.trainer import TrainArgs, NotSetupError
from garage.experiment.experiment import dump_json
from .utils import read_attr_from_csv
class Trainer(garageTrainer):
""" A modifed version of the Garage Trainer.
This subclass adds
1) a light saving mode to minimze the stroage usage (only saving the
networks, not the trainer and the full algo.)
2) a ignore_shutdown flag for running multiple experiments.
3) a return_attr option.
4) a cpu data collection mode.
5) logging of sampling time.
6) logging of current epoch index.
"""
# Add a light saving mode to minimze the stroage usage.
# Add return_attr, return_mode options.
# Add a cpu data collection mode.
def setup(self, algo, env,
force_cpu_data_collection=False,
save_mode='light',
return_mode='average',
return_attr='Evaluation/AverageReturn'):
"""Set up trainer for algorithm and environment.
This method saves algo and env within trainer and creates a sampler.
Note:
After setup() is called all variables in session should have been
initialized. setup() respects existing values in session so
policy weights can be loaded before setup().
Args:
algo (RLAlgorithm): An algorithm instance. If this algo want to use
samplers, it should have a `_sampler` field.
env (Environment): An environment instance.
save_mode (str): 'light' or 'full'
return_mode (str): 'full', 'average', or 'last'
return_attr (str): the name of the logged attribute
"""
super().setup(algo, env)
assert save_mode in ('light', 'full')
assert return_mode in ('full', 'average', 'last')
self.save_mode = save_mode
self.return_mode = return_mode
self.return_attr = return_attr
self.force_cpu_data_collection = force_cpu_data_collection
self._sampling_time = 0.
# Add a light saving mode (which saves only policy and value functions of an algorithm)
def save(self, epoch):
"""Save snapshot of current batch.
Args:
epoch (int): Epoch.
Raises:
NotSetupError: if save() is called before the trainer is set up.
"""
if not self._has_setup:
raise NotSetupError('Use setup() to setup trainer before saving.')
logger.log('Saving snapshot...')
params = dict()
# Save arguments
params['seed'] = self._seed
params['train_args'] = self._train_args
params['stats'] = self._stats
if self.save_mode=='light':
# Only save networks
networks = self._algo.networks
keys = []
values = []
for k, v in self._algo.__dict__.items():
if v in networks:
keys.append(k)
values.append(v)
AlgoData = namedtuple(type(self._algo).__name__+'Networks',
field_names=keys,
defaults=values,
rename=True)
params['algo'] = AlgoData()
elif self.save_mode=='full':
# Default behavior: save everything
# Save states
params['env'] = self._env
params['algo'] = self._algo
params['n_workers'] = self._n_workers
params['worker_class'] = self._worker_class
params['worker_args'] = self._worker_args
else:
raise ValueError('Unknown save_mode.')
self._snapshotter.save_itr_params(epoch, params)
logger.log('Saved')
# Include ignore_shutdown flag
def train(self,
n_epochs,
batch_size=None,
plot=False,
store_episodes=False,
pause_for_plot=False,
ignore_shutdown=False):
"""Start training.
Args:
n_epochs (int): Number of epochs.
batch_size (int or None): Number of environment steps in one batch.
plot (bool): Visualize an episode from the policy after each epoch.
store_episodes (bool): Save episodes in snapshot.
pause_for_plot (bool): Pause for plot.
Raises:
NotSetupError: If train() is called before setup().
Returns:
float: The average return in last epoch cycle.
"""
if not self._has_setup:
raise NotSetupError(
'Use setup() to setup trainer before training.')
# Save arguments for restore
self._train_args = TrainArgs(n_epochs=n_epochs,
batch_size=batch_size,
plot=plot,
store_episodes=store_episodes,
pause_for_plot=pause_for_plot,
start_epoch=0)
self._plot = plot
self._start_worker()
log_dir = self._snapshotter.snapshot_dir
if self.save_mode !='light':
summary_file = os.path.join(log_dir, 'experiment.json')
dump_json(summary_file, self)
# Train the agent
last_return = self._algo.train(self)
# XXX Ignore shutdown, if needed
if not ignore_shutdown:
self._shutdown_worker()
# XXX Return other statistics from logged data
csv_file = os.path.join(log_dir,'progress.csv')
progress = read_attr_from_csv(csv_file, self.return_attr)
progress = progress if progress is not None else 0
if self.return_mode == 'average':
score = np.mean(progress)
elif self.return_mode == 'full':
score = progress
elif self.return_mode == 'last':
score = last_return
else:
NotImplementedError
return score
# Add a cpu data collection mode
def obtain_episodes(self,
itr,
batch_size=None,
agent_update=None,
env_update=None):
"""Obtain one batch of episodes.
Args:
itr (int): Index of iteration (epoch).
batch_size (int): Number of steps in batch. This is a hint that the
sampler may or may not respect.
agent_update (object): Value which will be passed into the
`agent_update_fn` before doing sampling episodes. If a list is
passed in, it must have length exactly `factory.n_workers`, and
will be spread across the workers.
env_update (object): Value which will be passed into the
`env_update_fn` before sampling episodes. If a list is passed
in, it must have length exactly `factory.n_workers`, and will
be spread across the workers.
Raises:
ValueError: If the trainer was initialized without a sampler, or
batch_size wasn't provided here or to train.
Returns:
EpisodeBatch: Batch of episodes.
"""
if self._sampler is None:
raise ValueError('trainer was not initialized with `sampler`. '
'the algo should have a `_sampler` field when'
'`setup()` is called')
if batch_size is None and self._train_args.batch_size is None:
raise ValueError(
'trainer was not initialized with `batch_size`. '
'Either provide `batch_size` to trainer.train, '
' or pass `batch_size` to trainer.obtain_samples.')
episodes = None
if agent_update is None:
policy = getattr(self._algo, 'exploration_policy', None)
if policy is None:
# This field should exist, since self.make_sampler would have
# failed otherwise.
policy = self._algo.policy
agent_update = policy.get_param_values()
# XXX Move the tensor to cpu.
if self.force_cpu_data_collection:
for k,v in agent_update.items():
if v.device.type != 'cpu':
agent_update[k] = v.to('cpu')
# XXX Time data collection.
_start_sampling_time = time.time()
episodes = self._sampler.obtain_samples(
itr, (batch_size or self._train_args.batch_size),
agent_update=agent_update,
env_update=env_update)
self._sampling_time = time.time() - _start_sampling_time
self._stats.total_env_steps += sum(episodes.lengths)
return episodes
# Log sampling time and Epoch
def log_diagnostics(self, pause_for_plot=False):
"""Log diagnostics.
Args:
pause_for_plot (bool): Pause for plot.
"""
logger.log('Time %.2f s' % (time.time() - self._start_time))
logger.log('EpochTime %.2f s' % (time.time() - self._itr_start_time)) # XXX
logger.log('SamplingTime %.2f s' % (self._sampling_time))
tabular.record('TotalEnvSteps', self._stats.total_env_steps)
tabular.record('Epoch', self.step_itr)
logger.log(tabular)
if self._plot:
self._plotter.update_plot(self._algo.policy,
self._algo.max_episode_length)
if pause_for_plot:
input('Plotting evaluation run: Press Enter to " "continue...')
class BatchTrainer(Trainer):
""" A batch version of Trainer that disables environment sampling. """
def obtain_samples(self,
itr,
batch_size=None,
agent_update=None,
env_update=None):
""" Return an empty list. """
return [] |
# -*- coding: utf-8 -*-
import logging
import datetime
from flask import request, render_template
from flask_jwt_extended import (
create_access_token,
decode_token
)
from jwt.exceptions import DecodeError
from flasgger import swag_from
from http import HTTPStatus
from pathlib import Path
from sqlalchemy.orm.exc import NoResultFound
from vantage6.common import logger_name
from vantage6.server import db
from vantage6.server.resource import (
ServicesResources
)
module_name = logger_name(__name__)
log = logging.getLogger(module_name)
def setup(api, api_base, services):
path = "/".join([api_base, module_name])
log.info(f'Setting up "{path}" and subdirectories')
api.add_resource(
ResetPassword,
path+'/reset',
endpoint="reset_password",
methods=('POST',),
resource_class_kwargs=services
)
api.add_resource(
RecoverPassword,
path+'/lost',
endpoint='recover_password',
methods=('POST',),
resource_class_kwargs=services
)
# ------------------------------------------------------------------------------
# Resources / API's
# ------------------------------------------------------------------------------
class ResetPassword(ServicesResources):
"""user can use recover token to reset their password."""
@swag_from(str(Path(r"swagger/post_reset_password.yaml")),
endpoint='reset_password')
def post(self):
""""submit email-adress receive token."""
# retrieve user based on email or username
body = request.get_json()
reset_token = body.get("reset_token")
password = body.get("password")
if not reset_token or not password:
return {"msg": "reset token and/or password is missing!"}, \
HTTPStatus.BAD_REQUEST
# obtain user
try:
user_id = decode_token(reset_token)['identity'].get('id')
except DecodeError:
return {"msg": "Invalid recovery token!"}, HTTPStatus.BAD_REQUEST
log.debug(user_id)
user = db.User.get(user_id)
# set password
user.set_password(password)
user.save()
log.info(f"Successfull password reset for '{user.username}'")
return {"msg": "password successfully been reset!"}, \
HTTPStatus.OK
class RecoverPassword(ServicesResources):
"""send a mail containing a recover token"""
@swag_from(str(Path(r"swagger/post_recover_password.yaml")),
endpoint='recover_password')
def post(self):
"""username or email generates a token which is mailed."""
# default return string
ret = {"msg": "If the username or email is our database you "
"will soon receive an email"}
# obtain username/email from request'
body = request.get_json()
username = body.get("username")
email = body.get("email")
if not (email or username):
return {"msg": "No username or email provided!"}, \
HTTPStatus.BAD_REQUEST
# find user in the database, if not here we stop!
try:
if username:
user = db.User.get_by_username(username)
else:
user = db.User.get_by_email(email)
except NoResultFound:
# we do not tell them.... But we won't continue either
return ret
log.info(f"Password reset requested for '{user.username}'")
# generate a token that can reset their password
expires = datetime.timedelta(hours=1)
reset_token = create_access_token(
{"id": str(user.id)}, expires_delta=expires
)
self.mail.send_email(
"password reset",
sender="<EMAIL>",
recipients=[user.email],
text_body=render_template("mail/reset_password_token.txt",
token=reset_token),
html_body=render_template("mail/reset_password_token.html",
token=reset_token)
)
return ret
|

<a href="https://hub.callysto.ca/jupyter/hub/user-redirect/git-pull?repo=https%3A%2F%2Fgithub.com%2Fcallysto%2Fcurriculum-notebooks&branch=master&subPath=Mathematics/FibonacciNumbers/fibonacci-numbers.ipynb&depth=1" target="_parent"><img src="https://raw.githubusercontent.com/callysto/curriculum-notebooks/master/open-in-callysto-button.svg?sanitize=true" width="123" height="24" alt="Open in Callysto"/></a>
# Fibonacci Numbers
The **Fibonacci** sequence is the set of numbers that starts out like this:
$$0,1,1,2,3,5,8,13,\ldots.$$
It's easy to recognize the pattern here. Each number is the sum of the previous two numbers in the sequence. Except, of course, the first two number, 0 and 1, which we put in there to get things started.
This sequence, or pattern of numbers, goes on forever.
These numbers are most commonly known as the **Fibonacci numbers**, after the Italian mathematician **L. Fibonacci** (c. 1200 C.E.). However, these numbers were actually first described hundreds of years before, by Indian mathematicians. The first such mathematician for whom we have written records was **Virahanka** (c. 700 C.E.).
It is difficult to overcome the usage of a name adopted hundreds of years ago, and so in what follows we will refer to the Virahanka-Fibonacci numbers as the "Fibonacci numbers."
**Exercise 1:** Check out the following article about the "so-called Fibonacci" numbers in ancient India: https://www.sciencedirect.com/science/article/pii/0315086085900217
For an excellent exposition about the motivation, poetry, and linguistics of these early mathematicians, and to learn about some fascinating properties of these numbers, check out the following video:
from IPython.display import YouTubeVideo
YouTubeVideo('LP253wHIoO8', start=2633)
It's convenient to label these numbers, so we write $F_0 = 0$, $F_1 = 1$ $F_2 = 2$ and so on. The list of numbers is thus defined **recursively** by the formula
$$ \qquad$$
$$ F_n = F_{n-1} + F_{n-2}.$$
We can check the few numbers in the Fibonacci sequence are obtained by that formula, by computing:
$$\begin{eqnarray*}
F_{2} &=&F_{1}+F_{0}=1+0=1 \\
F_{3} &=&F_{2}+F_{1}=1+1=2 \\
F_{4} &=&F_{3}+F_{2}=2+1=3 \\
F_{5} &=&F_{4}+F_{3}=3+2=5 \\
F_{6} &=&F_{5}+F_{4}=5+3=8 \\
F_{7} &=&F_{6}+F_{5}=8+5=13 \\
&&\vdots
\end{eqnarray*}
$$
Here is a list of the first 40 Fibonacci numbers:
$$
\begin{array}{rrrrrrrrrrrrrrr}
0 & & 1 & & 1 & & 2 & & 3 & & 5 & & 8 & & 13 \\
& & & & & & & & & & & & & & \\
21 & & 34 & & 55 & & 89 & & 144 & & 233 & & 377 & & 610 \\
& & & & & & & & & & & & & & \\
987 & & 1597 & & 2584 & & 4181 & & 6765 & & 10946 & & 17711 & & 28657
\\
& & & & & & & & & & & & & & \\
46368 & & 75025 & & 121393 & & 196418 & & 317811 & & 614229 & & 832040
& & 1346269 \\
& & & & & & & & & & & & & & \\
2178309 & & 3524578 & & 5702887 & & 9227465 & & 14930352 & & 24157817 &
& 39088169 & & 63245986%
\end{array}$$
**Exercise 2:** Write a code that computes the first N Fibonacci numbers, saves them into an array, and displays them on the screen.
**WAIT** -- before you read the next cell, try to do Exercise 2!
N = 20 # Set the size of the list we will compute
F=[0,1] # The first two numbers in the list
for i in range(2, N):
F.append(F[i-1]+F[i-2]) # append the next item on the list
print('First',N,'Fibonacci numbers:',F)
**For fun,** we can make a little widget to control how many numbers to print out.
from ipywidgets import interact
def printFib(N=10):
F=[0,1] # The first two numbers in the list
for i in range(2, N):
F.append(F[i-1]+F[i-2]) # append the next item on the list
print(F)
interact(printFib, N=(10,100,10));
By moving the slider above, print out the first 100 Fibonacci numbers
As we can see, this sequence grows pretty fast. The Fibonacci numbers seem
to have one more digit after about every five terms in the sequence.
## How fast does it grow?
One of the ways to study the growth of a sequence is to look at ratios between consecutive terms. We look at ratios of pairs of numbers in the Fibonacci sequence.
The first few values are
\begin{eqnarray}
F_2/F_1 &=& 1 \\
F_3/F_2 &=& 2/1 = 2 \\
F_4/F_3 &=& 3/2 = 1.5 \\
F_5/F_4 &=& 5/3 = 1.666... \\
F_6/F_5 &=& 8/5 = 1.6 \\
F_7/F_6 &=& 13/8 = 1.625
\end{eqnarray}
So the ratios are levelling out somewhere around 1.6. We observe that $1.6^5 \approx 10$, which is why after every five terms in the Fibonacci sequence, we get another digit. This tells us we have roughly **exponential growth,** where $F_n$ grows about as quickly as the exponential function $(1.6)^n$.
We can check this computation in Python. We use $ ** $ to take a power, as in the following cell.
(1.6)**5
## The Golden Ratio
We can print out a bunch of these ratios, and plot them, just to see that they do. The easiest way to do this is with a bit of Python code. Perhaps you can try this yourself.
**Exercise 3** Write some code that computes the first N ratios $F_{n+1}/F_n$, save them it into an array, and displays them on the screen.
**WAIT!** Don't read any further until you try the exercises.
%matplotlib inline
from matplotlib.pyplot import *
N = 20
F = [0,1]
R = []
for i in range(2, N):
F.append(F[i-1]+F[i-2]) # append the next item on the list
R.append(F[i]/F[i-1])
figure(figsize=(10,6));
plot(R,'o')
title('The first '+str(N-2)+' Ratios $F_{n+1}/F_n$')
xlabel('$n$')
ylabel('$Ratio$');
print('The first', N-2, 'ratios are:',R)
We see the numbers are levelling out at the value 1.6108034... This number may be familiar to you. It is called the **Golden Ratio.**
We can compute the exact value by observing the ratios satisfy a nice algebraic equation:
$$
\frac{F_{n+2}}{F_{n+1}}=\frac{F_{n+1}+F_{n}}{F_{n+1}}=1+\frac{F_{n}}{F_{n+1}}=1+\frac{1}{\frac{F_{n+1}}{F_{n}}},
$$
or more simply
$$\frac{F_{n+2}}{F_{n+1}}=1+\frac{1}{\frac{F_{n+1}}{F_{n}}}.$$
As $n$ gets larger and larger, the ratios $F_{n+2}/F_{n+1}$ and $F_{n+1}/F_{n}$ tend toward a final value, say $x$. This value must then solve the equation
$$x=1+\frac{1}{x}.$$
We rewrite this as a quadratic equation
$$x^2=x+1$$
which we solve from the quadratic formula
$$ x= \frac{1 \pm \sqrt{1+4}}{2} = \frac{1 \pm \sqrt{5}}{2}.$$
It is the positive solution $x= \frac{1 + \sqrt{5}}{2} = 1.6108034...$ which is called the Golden Ratio.
The **Golden ratio** comes up in art, geometry, and Greek mythology as a perfect ratio that is pleasing to the eye (and to the gods).
For instance, the rectangle shown below is said to have the dimensions of the Golden ratio, because the big rectangle has the same shape as the smaller rectangle inside. Mathematically, we have the ratios of lengths
$$ \frac{a+b}{a} = \frac{a}{b}.$$

Writing $x = \frac{a}{b}$, the above equation simplifies to
$$ 1 + \frac{1}{x} = x,$$
which is the same quadratic equation we saw for the limit of ratios of Fibonacci numbers.
For more information about the Golden ratio see
https://en.wikipedia.org/wiki/Golden_ratio
## A Formula for the Fibonacci Sequence $F_n$
Let's give the Golden ratio a special name. In honour of the ancient Greeks who used it so much, we call it `phi:'
$$ \varphi = \frac{1 + \sqrt{5}}{2}. $$
We'll call the other quadratic root 'psi:'
$$ \psi = \frac{1 - \sqrt{5}}{2}. $$
This number $\psi$ is called the **conjugate** of $\varphi$ because it looks the same, except for the negative sign in front of the $\sqrt{5}$.
Here's something **amazing.** It turns out that we have a remarkable formula for the Fibonnaci numbers, in terms of these two Greek numbers. The formula says
$$F_n = \frac{\varphi^n - \psi^n}{\sqrt{5}}.$$
#### Wow!
Seems amazing. And it is handy because now we can compute, say, the thousandth term in the sequence, $F_{1000}$ directly, without having to compute all the other terms that come before.
But, whenever someone gives you a formula, you should check it!
**Exercise 4:** Write a piece of code to show that the formula above, with $\varphi,\psi$ does produce, say, the first 20 Fibonnaci numbers.
**WAIT!** Don't go on until you try writing a program yourself, to compute the Fibonacci numbers using only powers of $\varphi, \psi$.
## SOLUTION (don't peak!)
from numpy import * ## We need this to define square roots
phi = (1 + sqrt(5))/2
psi = (1 - sqrt(5))/2
for n in range(20):
print( (phi**n - psi**n)/sqrt(5) )
Looking at that computer output, it does seem to give Fibonacci numbers, with a bit of numerical error.
## Checking the Math
Doing math, though, we like exact answers and we want to know why. So WHY does this formula $(\phi^n - \psi^n)/\sqrt{5}$ give Fibonacci numbers?
Well, we can check, step by step.
For $n=0$, the formula gives
$$\frac{\varphi^0 - \psi^0}{\sqrt{5}} = \frac{1-1}{\sqrt{5}} = 0,$$ which is $F[0]$, the first Fibonacci number.
For $n=1$, the formula gives
$$\frac{\varphi^1 - \psi^1}{\sqrt{5}} =
\frac{\frac{1 + \sqrt{5}}{2} - \frac{1 -\sqrt{5}}{2} }{\sqrt{5}} = \frac{\sqrt{5}}{\sqrt{5}} = 1,$$ which is $F[1]$, the next Fibonacci number.
For $n=2$, it looks harder because we get the squares $\varphi^2, \psi^2$ in the formula. But then remember that both $\varphi$ and $\psi$ solve the quadratic $x^2 = x+1$, so we know $\varphi^2 = \phi +1$ and $\psi^2 = \psi +1$. So we can write
$$\frac{\phi^2 - \psi^2}{\sqrt{5}} = \frac{\phi + 1 - \psi -1}{\sqrt{5}} = \frac{\phi - \psi }{\sqrt{5}} = 1,$$
since we already calculated this in the $n=1$ step. So this really is $F[2]=1$.
For $n=3,4,5,\ldots$ again it might seem like it will be hard because of the higher powers. But multiplying the formulas $\varphi^2 = \varphi +1$ and $\psi^2 = \psi +1$ by powers of $\phi$ and $\psi$, we get
$$\begin{eqnarray*}
\varphi^2 &=& \varphi +1,\quad \varphi^3 = \varphi^2+\varphi
,\quad \varphi^4=\varphi^3+\varphi^2,\qquad \dots \qquad %
\varphi^{n+2}=\varphi^{n+1}+{\varphi}^n,\quad \text{and} \\
\psi^2 &=&\psi +1,\quad \psi^3=\psi^2+\psi ,\quad \psi^4=\psi^3+\psi^2,\qquad
\dots \qquad \psi^{n+2}=\psi^{n+1}+\psi^n.
\end{eqnarray*}$$
So, assuming we know the generating formula already for $n$ and $n+1$ we can write the next term as
$$\frac{\varphi^{n+2} - \psi^{n+2}}{\sqrt{5}} = \frac{\varphi^{n+1} +\varphi^n - \psi^{n+1} - \psi^n}{\sqrt{5}}
= \frac{\varphi^{n+1} - \psi^{n+1}}{\sqrt{5}} + \frac{\varphi^{n} - \psi^{n}}{\sqrt{5}} = F[n+1] + F[n] = F[n+2].$$
So we do get $\frac{\varphi^{n+2} - \psi^{n+2}}{\sqrt{5}} = F[n+2]$, and the formula holds for all numbers n.
This method of verifying the formula for all n, based on previous values of n, is an example of **mathematical induction.**
## Why did this work?
Well, from the Golden ratio, we have the formula $\varphi^2 = \varphi + 1$, which then gives the formula $\varphi^{n+2} = \varphi^{n+1} + \varphi^n$. This looks a lot like the Fibonacci formula $$F[n+2] = F[n+1] + F[n].$$ Same powers of $\psi$.
If we take ANY linear combination of powers of $\varphi, \psi$, such as
$$f(n) = 3\varphi^n + 4\psi^n,$$
we will get a sequence that behaves like the Fibonacci sequence, with $f(n+2) = f(n+1) + f(n).$ To get the 'right' Fibonacci sequence, we just have to replace the 3 and 4 with the right coefficients.
## From sequences to functions
Wouldn't it be fun to extend Fibonacci numbers to a function, defined for all numbers $x$?
The problems is the function
$$F[x] = \frac{\varphi^x - \psi^x}{\sqrt{5}}$$
is not defined for values of $x$ other than integers.
The issue is the term $\psi^{x}=\left( \frac{1-\sqrt{5}}{2}\right) ^{x}$, which is the power of a negative number.
We don't really know how to define that. For instance, what is the square root of a negative number?
To
overcome this technical difficulty, we write
$$\psi ^{x}=\left( -\left( -\psi \right) \right) ^{x}=\left( -\left( \frac{%
\sqrt{5}-1}{2}\right) \right) ^{x}=\left( -1\right) ^{x}\left( \frac{\sqrt{5}%
-1}{2}\right) ^{x}. $$
Now the factor $\left( \frac{\sqrt{5}-1}{2} \right) ^{x}$ make sense since
the number inside the brackets is positive. We have localized the problem into the powers of $-1$ for the term $\left(
-1\right) ^{x}$. We would like to replace this term by a
continuous function $m(x)$ such that it takes the values $\pm1$ on the integers. That is,
$$m(n) =1\quad \text{if }n\text{ is even }\quad\text{and}\quad m(n) =-1\quad \text{if }n\text{ is odd.} $$
The cosine function works. That is
$$m\left( x\right) =\cos \left( \pi x\right) \qquad \text{does the job.} $$
That is:
$$\cos \left( n\pi \right) =1\quad \text{if }n\text{ is even}\quad\text{ and}\quad %
\cos \left( n\pi \right) =-1\quad \text{if }n\text{ is odd.}$$
Why this is a **good** choice would lead us to complex numbers and more!
Hence, we obtain the following closed formula for our function $F[x]:$
$$\begin{eqnarray*}
F[x] &=&\frac{{\varphi }^{x}-\left( -1\right) ^{x}\left( -\psi
\right) ^{x}}{{\varphi -\psi }}=\frac{1}{\sqrt{5}}\left( {\varphi }%
^{x}-\left( -1\right) ^{x}\left( -\psi \right) ^{x}\right) \\
&=&\frac{1}{\sqrt{5}}\left( \left( \frac{1+\sqrt{5}}{2}\right) ^{x}-\cos
\left( \pi x\right) \left( \frac{\sqrt{5}-1}{2}\right) ^{x}\right) .
\end{eqnarray*}$$
Let's plot this function, and the Fibonacci sequence.
## A plot of the continuous Fibonacci function
%matplotlib inline
from numpy import *
from matplotlib.pyplot import *
phi=(1+5**(1/2))/2
psi=(5**(1/2)-1)/2
x = arange(0,10)
y = (pow(phi,x) - cos(pi*x)*pow(psi,x))/sqrt(5)
xx = linspace(0,10)
yy = (pow(phi,xx) - cos(pi*xx)*pow(psi,xx))/sqrt(5)
figure(figsize=(10,6));
plot(x,y,'o',xx,yy);
title('The continuous Fibonacci function')
xlabel('$x$')
ylabel('$Fib(x)$');
## A plot with negative values
Well, with this general definition, we can even include negative numbers for $x$ in the function.
Let's plot this too.
%matplotlib inline
from numpy import *
from matplotlib.pyplot import *
phi=(1+5**(1/2))/2
psi=(5**(1/2)-1)/2
x = arange(-10,10)
y = (pow(phi,x) - cos(pi*x)*pow(psi,x))/sqrt(5)
xx = linspace(-10,10,200)
yy = (pow(phi,xx) - cos(pi*xx)*pow(psi,xx))/sqrt(5)
figure(figsize=(10,6));
plot(x,y,'o',xx,yy);
title('The Fibonacci function, extended to negative values')
xlabel('$x$')
ylabel('$Fib(x)$');
So we see we can even get negative Fibonacci numbers!
## The Golden Ratio and Continued Fractions
We have found that the Golden ratio ${\varphi =}\frac{{1+}\sqrt{5}}{2}$
satisfies the identity
$$
{\varphi =1+}\frac{1}{{\varphi }}.
$$
Substituting for ${\varphi }$ on the denominator in the right, we obtain
$$
{\varphi =1+}\frac{1}{{1+}\frac{1}{{\varphi }}}.
$$
Substituting again for ${\varphi }$ on the denominator in the right, we
obtain
$$
{\varphi =1+}\frac{1}{{1+}\dfrac{1}{{1+}\frac{1}{{\varphi }}}}.
$$
Repeating this again,
$$
{\varphi =1+}\frac{1}{{1+}\dfrac{1}{{1+}\dfrac{1}{{1+}\frac{1}{{\varphi }}}}}%
.$$
And again,
$$
{\varphi =1+}\frac{1}{{1+}\dfrac{1}{{1+}\dfrac{1}{{1+}\dfrac{1}{{1+}\frac{1}{%
{\varphi }}}}}}.
$$
And again,
$$
{\varphi =1+}\frac{1}{{1+}\dfrac{1}{{1+}\dfrac{1}{{1+}\dfrac{1}{{1+}\dfrac{1%
}{{1+}\frac{1}{{\varphi }}}}}}}.
$$
We see that this process can be $\textit{continued indefinitely}$. This results
in an $\textit{infinite expansion of a fraction}$. These type of expressions are known as
$\textbf{continued fractions}$:
$$
{\varphi =1+}\frac{1}{{1+}\dfrac{1}{{1+}\dfrac{1}{{1+}\dfrac{1}{{1+}\dfrac{1%
}{{1+}\dfrac{1}{{1+}\dfrac{1}{1+\dfrac{1}{{\vdots }}}}}}}}}.
$$
We can approximate continued fractions with the finite fractions obtained by
stopping the development at some point. In our case, we obtain the
approximates
$$
1,~1+1,~1+\frac{1}{1+1},~1+\frac{1}{1+\dfrac{1}{1+1}},~1+\frac{1}{1+\dfrac{1%
}{1+\dfrac{1}{1+1}}},~1+\frac{1}{1+\dfrac{1}{1+\dfrac{1}{1+\dfrac{1}{1+1}}}}%
,\dots
$$
Explicitly, these approximates are
$$
1,~2,~\frac{3}{2},~\frac{5}{3},~\frac{8}{5},~\frac{13}{8},\dots
$$
This looks like it is just the sequence of ratios $F_{n+1}/F_n$ we saw above! How can we prove this is the case for all $n$?
We know that the sequence $R_{n} = F_{n+1}/F_n$ satisfies the recursive relation.
$$
R_{n}=\frac{F_{n+1}}{F_{n}}=1+\frac{F_{n-1}}{F_{n}}=1+\frac{1}{R_{n-1}}%
,\qquad \text{with}\qquad R_{1}=1.
$$
Then, we can generate all the terms in the sequence $R_{n}$ by staring with $%
R_{1}=1$, and then using the relation $R_{n+1}=1+\frac{1}{R_{n}}:$
$$
\begin{eqnarray*}
R_{1} &=&1 \\
R_{2} &=&1+\frac{1}{R_{1}}=1+\frac{1}{1}=2 \\
R_{3} &=&1+\frac{1}{R_{2}}=1+\frac{1}{1+R_{1}}=1+\frac{1}{1+1} \\
R_{4} &=&1+\frac{1}{R_{3}}=1+\frac{1}{1+\frac{1}{1+1}} \\
R_{5} &=&1+\frac{1}{R_{4}}=1+\frac{1}{1+\frac{1}{1+\frac{1}{1+1}}} \\
&&\vdots
\end{eqnarray*}
$$
This confirms that both the sequence of rations $R_{n}$ and the sequence of
approximations to the continuous fraction of ${\varphi }$ are the same
sequence. $\square $
In general, continued fractions are expressions of the form
$$
a_{0}+\frac{1}{a_{1}+\dfrac{1}{a_{2}+\dfrac{1}{a_{3}+\dots }}}
$$
where $a_{0}$ is an integer and $a_{1},a_{2},a_{3},\dots $ are positive
integers. These type of fractions are abbreviated by the notation
$$
\left[ a_{0};a_{1},a_{2},a_{3},\dots \right] =a_{0}+\frac{1}{a_{1}+\dfrac{1}{%
a_{2}+\dfrac{1}{a_{3}+\dots }}}.
$$
For example
$$
\begin{eqnarray*}
\left[ 1;1,1,2\right] &=&1+\frac{1}{1+\dfrac{1}{1+\dfrac{1}{1+1}}}=\frac{8}{%
5} \\
&& \\
\left[ 1;1,1,1,1,\dots \right] &=&{1+}\frac{1}{{1+}\dfrac{1}{{1+}\dfrac{1}{%
{1+}\dfrac{1}{{1+}\dfrac{1}{{1+}\dfrac{1}{{1+}\dfrac{1}{1+\dfrac{1}{{\vdots }%
}}}}}}}}={\varphi }
\end{eqnarray*}
$$
For more information of continued fractions, see
https://en.wikipedia.org/wiki/Continued_fraction
## Conclusion
### What have we learned?
- a **sequence** is an ordered list of numbers, which may go on forever.
- the **Fibonacci sequence** 0,1,1,2,3,5,8,13,... is a famous list of numbers, well-studied since antiquity.
- each number in this sequence is the sum of the two coming before it in the sequence.
- the sequence grows fast, increasing by a **factor** of about **10** for every **five** terms.
- the **ratio** of pairs of Fibonacci numbers converges to the **Golden ratio,** known since the ancient Greeks as the number
$$\varphi = \frac{1 + \sqrt{5}}{2} \approx 1.6108.$$
- the Fibonacci numbers can be computed directly as the difference of powers of $\varphi$ and its **conjugate,** $\psi = \frac{1 - \sqrt{5}}{2}.$ This is sometimes faster than computing the whole list of Fibonnaci numbers.
- this formula with powers of $\varphi, \psi$ is verified using **induction.**
- The Fibonacci numbers can be **extended** to a **continuous function** $Fib(x)$, defined for all real numbers $x$ (including negatives). It **oscillates** (wiggles) on the negative x-axis.
- The **Golden Ratio** can also be expressed a **continued fraction,** which is an infinite expansion of fractions with sub-fraction terms. Many interesting numbers come from interesting continued fraction forms.
[](https://github.com/callysto/curriculum-notebooks/blob/master/LICENSE.md) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.