text stringlengths 957 885k |
|---|
<reponame>wezu/p3d_gpu_anim<filename>main.py<gh_stars>1-10
from panda3d.core import *
loadPrcFileData("", "show-frame-rate-meter 1")
from direct.showbase.ShowBase import ShowBase
from direct.gui.OnscreenText import OnscreenText
import random
from crowd import Crowd
from camera import *
# Function to put instructions on the screen.
def addInstructions(pos, msg):
return OnscreenText(text=msg, style=1, fg=(1, 1, 1, 1), scale=.05,
shadow=(0, 0, 0, 1), parent=base.a2dTopLeft,
pos=(0.08, -pos - 0.04), align=TextNode.ALeft)
class MyApp(ShowBase):
def __init__(self):
ShowBase.__init__(self)
#custom camera driver, not important for the demo
base.disable_mouse()
self.cc=CameraControler(pos=(0,0,50.0), offset=(0, 250, 250), speed=1.0, zoom_speed=10.0)
#add some light
spot = Spotlight("spot")
spot.set_color((1.0, 1.0, 0.85, 1))
spot.set_exponent(16)
spot.get_lens().set_near_far(5, 400)
spot.get_lens().set_fov(70)
#spot.show_frustum()
spot_path = render.attach_new_node(spot)
spot_path.set_pos(-80, -80, 180)
spot_path.look_at(Point3(20, 30, 40))
render.set_shader_input('spot',spot_path)
#add instractions in the 'good' old p3d sample way ;)
addInstructions(0.06, "[W][A][S][D] or [mouse3] to move the camera, [mouse1] to rotate, [mouse_wheel] to zoom ")
addInstructions(0.12, "[1]-[9] to make a row of characters play 'walk' once in sync")
addInstructions(0.18, "[0] to make the first row of characters loop 'walk' out of sync")
addInstructions(0.24, "[SPACE] to make them all move")
addInstructions(0.30, "[TAB] to turn inter frame blending on/off")
#make the crowd
self.crowd=Crowd(model="gpu_rocket.bam", num_actors=50)
#reparent the crowd to render
self.crowd.reparent_to(render)
#attachnig to joints
hat=loader.load_model('hat.egg')
#move the hat to the right place and flatten it
hat.set_pos(-0.5,-2.5, 53)
hat.set_hpr(0,-25, 0)
hat.flatten_strong()
self.crowd.attach_node_to_joint(node=hat, joint_name='head', actor_id=11)
#pose all the CrowdActors, else they might explode
for actor in self.crowd:
actor.pose(1)
#put the actors in 10 rows of 5
#...in a very stupid way
i=0
for x in range(5):
for y in range(10):
self.crowd[i].set_pos(x*30,y*40, 0)
i+=1
#key binding
#1-9 (and 0) for playing the walk anim for each row
for i in range(10):
self.accept(str(i), self.play_for_row, [i])
#space for playing the anim on all actors
self.accept('space', self.play_all)
#tab to flip inter frame blending
self.accept('tab', self.flip_frameblend)
def flip_frameblend(self):
self.crowd.set_frame_blend(not self.crowd.frame_blend)
print("Frame blend:", self.crowd.frame_blend)
def play_all(self):
for actor in self.crowd:
actor.set_h(random.randint(0, 360))
actor.loop('kneel', random.randrange(30, 60), False)
def play_for_row(self, id):
for i, actor in enumerate(self.crowd):
if i%10==id:
if id == 0:
actor.set_h(0)
actor.loop('walk', 30.0, False)
else:
actor.set_h(0)
actor.play('walk', 30.0, True)
app = MyApp()
app.run()
|
import pickle,os,glob
import numpy as np
import pandas as pd
from common import tflogs2pandas
with open("output_data/tmp/all_jobs_1xx.pickle", "rb") as f:
all_jobs = pickle.load(f)
cache_path = "output_data/tmp/1xx_tb_results.pandas"
bodies = np.arange(100,108).tolist()
def load_tb(force=0):
try:
if force:
raise FileNotFoundError
df = pd.read_pickle(cache_path)
except FileNotFoundError:
visited = {}
dfs = []
for job in all_jobs:
if job['str_md5'] in visited: # because I failed to seed while generating run seed, so I don't know what the run seeds are used.. :( need to use glob to find out.
continue
visited[job['str_md5']] = True
tb_path = f"output_data/tensorboard_1xx/1xx_mutate_{job['num_mutate']}/model-100-101-102-103-104-105-106-107-CustomAlignWrapper-md{job['str_md5']}-sd*/PPO_1"
paths = glob.glob(tb_path)
for tb_path in paths:
_tmp = tb_path.split("sd")[-1]
vacc_run_seed = _tmp.split("/")[0] # need to read the run seed from vacc..
print(f"Loading {tb_path}")
if not os.path.exists(tb_path):
continue
df = tflogs2pandas.tflog2pandas(tb_path)
df = df[df["metric"].str.startswith("eval/")]
df["num_mutate"] = job["num_mutate"]
df["body_seed"] = job["body_seed"]
df["custom_alignment"] = job["custom_alignment"]
df["str_md5"] = job["str_md5"]
df["vacc_run_seed"] = vacc_run_seed
dfs.append(df)
df = pd.concat(dfs)
print(df)
df.to_pickle(cache_path)
return df
df = load_tb(0)
print(df)
import seaborn as sns
import matplotlib.pyplot as plt
def check_finished():
sns.countplot(data=df, x="body_seed", hue="num_mutate") # check every run is here
plt.show()
plt.close()
# check_finished() # There are some Fly-away bug! exclude these bodies.
def get_unfinished():
_count = df.groupby("metric").count()
# _count = _count[(_count["value"]!=160)&(_count["value"]!=160)]
print(_count)
return
row = 0
n = 0
for i in range(40):
for j in range(5):
for k in range(8):
_row = _df.iloc[row]
assert _row["body_seed"]==i
_tmp = str(_row["metric"])
if _tmp == f"eval/10{k}_mean_reward":
row+=1
print(f"{_row} ..ok")
else:
print(f"{_tmp} ..bad")
return
n+=1
print(_df)
get_unfinished()
exit(0)
def exclude_bodies_with_fly_away_bug(df):
_df = df.groupby("body_seed").count()
_df = _df[_df["value"]==800] # only consider bodies that have 800 records. (Fly-away bug will abort the training early)
valid_body_seed = _df.index.values
df = df[df["body_seed"].isin(valid_body_seed)]
# print(df.head())
print(f"valid alignment {valid_body_seed} ({len(valid_body_seed)} in total)")
return df
df = exclude_bodies_with_fly_away_bug(df)
def plot_all():
g = sns.FacetGrid(data=df, row="num_mutate", col="metric", hue="body_seed")
g.map(sns.lineplot, "step", "value")
plt.savefig("output_data/tmp/all.png")
plt.close()
# plot_all()
def plot_best_vs_worst(num_mutate = 4, evaluate_at_step = 2007040.0, dry_run=False):
# _df = df[(df["step"]==evaluate_at_step)&(df["num_mutate"]==num_mutate)]
_df = df[(df["num_mutate"]==num_mutate)]
# print(_df)
mean_final_values = _df.groupby(['body_seed'], sort=False)['value'].mean().sort_values()
# print(mean_final_values)
# print(mean_final_values.index[0], mean_final_values.index[-1])
worst_body_seed = mean_final_values.index[0]
best_body_seed = mean_final_values.index[-1]
if not dry_run:
_df = df[(df["num_mutate"]==num_mutate) & ((df["body_seed"]==worst_body_seed)|(df["body_seed"]==best_body_seed))]
g = sns.FacetGrid(data=_df, col="metric", hue="body_seed")
g.map(sns.lineplot, "step", "value")
plt.savefig(f"output_data/tmp/best_vs_worst_1xx_{num_mutate}.png")
plt.close()
print("")
print(f"best_alignment in mutate {num_mutate}:")
_df = df[(df["body_seed"]==best_body_seed) & (df["num_mutate"]==num_mutate)]
print(_df.iloc[0]["custom_alignment"])
print(_df.iloc[0]["str_md5"])
print(f"worst_alignment in mutate {num_mutate}:")
_df = df[(df["body_seed"]==worst_body_seed) & (df["num_mutate"]==num_mutate)]
print(_df.iloc[0]["custom_alignment"])
print(_df.iloc[0]["str_md5"])
for i in [64]:
plot_best_vs_worst(i, dry_run=False)
# df_one_body = df[df["metric"]=="eval/904_mean_reward"]
# # sns.lineplot(data=df_one_body, x="step", y="value", hue="body_seed")
# # plt.savefig("output_data/tmp/904.png")
# # plt.close()
# for body_seed in range(20):
# _df = df_one_body[df_one_body["body_seed"]==body_seed]
# print(body_seed, _df["value"].max())
# df_one_body_two_alignment = df_one_body[(df_one_body["body_seed"]==0)|(df_one_body["body_seed"]==5)]
# sns.lineplot(data=df_one_body_two_alignment, x="step", y="value", hue="body_seed")
# plt.savefig("output_data/tmp/904_two_alignments.png")
# plt.close()
|
import numpy as np
from .evaluate_12ECG_score import (
compute_accuracy,
compute_auc,
compute_beta_measures,
compute_challenge_metric,
compute_f_measure,
is_number,
load_weights,
organize_labels_outputs,
)
def train_evaluate_score_batch_helper(data_eval, features, data_cache, loaded_model):
classes = []
labels = []
scores = []
for k, v in loaded_model.items():
if not is_number(k):
continue
classes.append(str(k))
labels.append(v.predict(features).tolist())
scores.append(v.predict_proba(features)[:, 1].tolist())
labels = np.array(labels).T
scores = np.array(scores).T
ground_truth = data_eval["dx"].tolist()
for dx_i, dx in enumerate(ground_truth):
ground_truth[dx_i] = [str(dv) for dv in dx]
return evaluate_score_batch(
predicted_classes=classes,
predicted_labels=labels,
predicted_probabilities=scores,
raw_ground_truth_labels=ground_truth,
)
def evaluate_score_batch(
predicted_classes=[], # list, len(num_classes), str(code)
predicted_labels=[], # shape (num_examples, num_classes), T/F for each code
predicted_probabilities=[], # shape (num_examples, num_classes), prob. [0-1] for each code
raw_ground_truth_labels=[], # list(('dx1', 'dx2'), ('dx1', 'dx3'), ...)
weights_file="evaluation-2020/weights.csv",
normal_class="426783006",
equivalent_classes=[
["713427006", "59118001"],
["284470004", "63593006"],
["427172004", "17338001"],
],
):
"""This is a helper function for getting
auroc, auprc, accuracy, f_measure, f_beta_measure, g_beta_measure, challenge_metric
without needing the directories of labels and prediction outputs.
It is useful for directly calculating the scores given the
classes, predicted labels, and predicted probabilities.
"""
label_classes, labels = _load_labels(
raw_ground_truth_labels,
normal_class=normal_class,
equivalent_classes_collection=equivalent_classes,
)
output_classes, binary_outputs, scalar_outputs = _load_outputs(
predicted_classes,
predicted_labels,
predicted_probabilities,
normal_class=normal_class,
equivalent_classes_collection=equivalent_classes,
)
classes, labels, binary_outputs, scalar_outputs = organize_labels_outputs(
label_classes, output_classes, labels, binary_outputs, scalar_outputs
)
weights = load_weights(weights_file, classes)
# Only consider classes that are scored with the Challenge metric.
indices = np.any(weights, axis=0) # Find indices of classes in weight matrix.
classes = [x for i, x in enumerate(classes) if indices[i]]
labels = labels[:, indices]
scalar_outputs = scalar_outputs[:, indices]
binary_outputs = binary_outputs[:, indices]
weights = weights[np.ix_(indices, indices)]
auroc, auprc = compute_auc(labels, scalar_outputs)
accuracy = compute_accuracy(labels, binary_outputs)
f_measure = compute_f_measure(labels, binary_outputs)
f_beta_measure, g_beta_measure = compute_beta_measures(
labels, binary_outputs, beta=2
)
challenge_metric = compute_challenge_metric(
weights, labels, binary_outputs, classes, normal_class
)
return (
auroc,
auprc,
accuracy,
f_measure,
f_beta_measure,
g_beta_measure,
challenge_metric,
)
# Load labels from header/label files.
def _load_labels(raw_labels, normal_class, equivalent_classes_collection):
# raw labels: list of str(dx) tuples
num_recordings = len(raw_labels)
# Identify classes.
classes = set.union(*map(set, raw_labels))
if normal_class not in classes:
classes.add(normal_class)
print(
"- The normal class {} is not one of the label classes, so it has been automatically added, but please check that you chose the correct normal class.".format(
normal_class
)
)
classes = sorted([str(c) for c in classes])
num_classes = len(classes)
# Use one-hot encoding for labels.
labels = np.zeros((num_recordings, num_classes), dtype=np.bool)
for i in range(num_recordings):
dxs = raw_labels[i]
for dx in dxs:
j = classes.index(dx)
labels[i, j] = 1
# For each set of equivalent class, use only one class as the representative class for the set and discard the other classes in the set.
# The label for the representative class is positive if any of the labels in the set is positive.
remove_classes = list()
remove_indices = list()
for equivalent_classes in equivalent_classes_collection:
equivalent_classes = [x for x in equivalent_classes if x in classes]
if len(equivalent_classes) > 1:
# representative_class = equivalent_classes[0]
other_classes = equivalent_classes[1:]
equivalent_indices = [classes.index(x) for x in equivalent_classes]
representative_index = equivalent_indices[0]
other_indices = equivalent_indices[1:]
labels[:, representative_index] = np.any(
labels[:, equivalent_indices], axis=1
)
remove_classes += other_classes
remove_indices += other_indices
for x in remove_classes:
classes.remove(x)
labels = np.delete(labels, remove_indices, axis=1)
# If the labels are negative for all classes, then change the label for the normal class to positive.
normal_index = classes.index(normal_class)
for i in range(num_recordings):
num_positive_classes = np.sum(labels[i, :])
if num_positive_classes == 0:
labels[i, normal_index] = 1
return classes, labels
def _load_outputs(
predicted_classes,
predicted_labels,
predicted_probabilities,
normal_class,
equivalent_classes_collection,
):
# The outputs should have the following form:
#
# diagnosis_1, diagnosis_2, diagnosis_3
# 0, 1, 1
# 0.12, 0.34, 0.56
#
num_recordings = len(predicted_labels)
tmp_labels = predicted_classes
tmp_binary_outputs = predicted_labels.tolist()
tmp_scalar_outputs = predicted_probabilities.tolist()
# Identify classes.
classes = set(tmp_labels)
if normal_class not in classes:
classes.add(normal_class)
print(
"- The normal class {} is not one of the output classes, so it has been automatically added, but please check that you identified the correct normal class.".format(
normal_class
)
)
classes = sorted(classes)
num_classes = len(classes)
# Use one-hot encoding for binary outputs and the same order for scalar outputs.
binary_outputs = np.zeros((num_recordings, num_classes), dtype=np.bool)
scalar_outputs = np.zeros((num_recordings, num_classes), dtype=np.float64)
for i in range(num_recordings):
for k, dx in enumerate(tmp_labels):
j = classes.index(dx)
binary_outputs[i, j] = tmp_binary_outputs[i][k]
scalar_outputs[i, j] = tmp_scalar_outputs[i][k]
# For each set of equivalent class, use only one class as the representative class for the set and discard the other classes in the set.
# The binary output for the representative class is positive if any of the classes in the set is positive.
# The scalar output is the mean of the scalar outputs for the classes in the set.
remove_classes = list()
remove_indices = list()
for equivalent_classes in equivalent_classes_collection:
equivalent_classes = [x for x in equivalent_classes if x in classes]
if len(equivalent_classes) > 1:
# representative_class = equivalent_classes[0]
other_classes = equivalent_classes[1:]
equivalent_indices = [classes.index(x) for x in equivalent_classes]
representative_index = equivalent_indices[0]
other_indices = equivalent_indices[1:]
binary_outputs[:, representative_index] = np.any(
binary_outputs[:, equivalent_indices], axis=1
)
scalar_outputs[:, representative_index] = np.nanmean(
scalar_outputs[:, equivalent_indices], axis=1
)
remove_classes += other_classes
remove_indices += other_indices
for x in remove_classes:
classes.remove(x)
binary_outputs = np.delete(binary_outputs, remove_indices, axis=1)
scalar_outputs = np.delete(scalar_outputs, remove_indices, axis=1)
# If any of the outputs is a NaN, then replace it with a zero.
binary_outputs[np.isnan(binary_outputs)] = 0
scalar_outputs[np.isnan(scalar_outputs)] = 0
# If the binary outputs are negative for all classes, then change the binary output for the normal class to positive.
normal_index = classes.index(normal_class)
for i in range(num_recordings):
num_positive_classes = np.sum(binary_outputs[i, :])
if num_positive_classes == 0:
binary_outputs[i, normal_index] = 1
return classes, binary_outputs, scalar_outputs
|
#!/usr/bin/env python
"""
Classes which define the individuals of a population with
its characteristic genes, generation, crossover and
mutation processes.
"""
from __future__ import annotations
import math
import pprint
import random
import multiprocessing as mp
from abc import ABC, abstractmethod
from tempfile import NamedTemporaryFile
from typing import List, Tuple
from .populations import split_list
import numpy as np
try:
from .models.xgboost_models import XgboostModel
except ImportError:
pass
try:
from .models.keras_models import GeneticCnnModel
except ImportError:
pass
def random_log_uniform(minimum, maximum, base, eps=1e-12):
"""Generate a random number which is uniform in a
logarithmic scale. If base > 0 scale goes from minimum
to maximum, if base < 0 vice versa, and if base is 0,
use a uniform scale.
"""
if base == 0:
return random.uniform(minimum, maximum)
minimum += eps # Avoid math domain error when minimum is zero
if base > 0:
return base ** random.uniform(math.log(minimum, base), math.log(maximum, base))
base = abs(base)
return maximum - base ** random.uniform(math.log(eps, base), math.log(maximum - minimum, base))
class Individual(ABC):
"""Basic definition of an individual containing
reproduction and mutation methods. Do not instantiate,
use a subclass which extends this object by defining a
genome and a random individual generator.
"""
def __init__(self, x_train: np.memmap, y_train, genome, genes, crossover_rate, mutation_rate, additional_parameters=None):
self.x_train = x_train
self.y_train = y_train
self.genome = genome
self.validate_genome()
self.genes = genes
self.validate_genes()
self.crossover_rate = crossover_rate
self.mutation_rate = mutation_rate
self.fitness = None # Until evaluated an individual fitness is unknown
assert additional_parameters is None
def validate_genome(self):
"""Check genome structure."""
if type(self.genome) != dict:
raise TypeError("Genome must be a dictionary.")
for gene, properties in self.genome.items():
if type(gene) != str:
raise TypeError("Gene names must be strings.")
def validate_genes(self):
"""Check that genes are compatible with genome."""
if set(self.genome.keys()) != set(self.genes.keys()):
raise ValueError("Genes passed don't correspond to individual's genome.")
def get_genes(self):
"""Return individual's genes."""
return self.genes
def get_genome(self):
"""Return individual's genome."""
return self.genome
def set_genes(self, new_genes):
self.genes = new_genes
@staticmethod
@abstractmethod
def generate_random_genes(genome):
pass
@abstractmethod
def evaluate_fitness(self):
pass
@abstractmethod
def get_additional_parameters(self):
pass
def get_fitness(self):
"""Compute individual's fitness if necessary and return it."""
if self.fitness is None:
self.evaluate_fitness()
return self.fitness
def reproduce(self, partner):
"""Mix genes from self and partner randomly and
return a new instance of an individual. Do not
mutate parents.
"""
assert self.__class__ == partner.__class__ # Can only reproduce if they're the same species
child_genes = {}
for name, value in self.get_genes().items():
if random.random() < self.crossover_rate:
child_genes[name] = partner.get_genes()[name]
else:
child_genes[name] = value
return self.__class__(
self.x_train, self.y_train, self.genome, child_genes, self.crossover_rate, self.mutation_rate,
**self.get_additional_parameters()
)
def crossover(self, partner):
"""Mix genes from self and partner randomly.
Mutates each parent instead of producing a
new instance (child).
"""
assert self.__class__ == partner.__class__ # Can only cross if they're the same species
for name in self.get_genes().keys():
if random.random() < self.crossover_rate:
self.get_genes()[name], partner.get_genes()[name] = partner.get_genes()[name], self.get_genes()[name]
self.set_fitness(None)
partner.set_fitness(None)
def mutate(self):
"""Mutate instance's genes with a certain probability."""
for name, value in self.get_genes().items():
if random.random() < self.mutation_rate:
default, minimum, maximum, log_scale = self.get_genome()[name]
if type(default) == int:
self.get_genes()[name] = random.randint(minimum, maximum)
else:
self.get_genes()[name] = round(random_log_uniform(minimum, maximum, log_scale), 4)
self.set_fitness(None) # The mutation produces a new individual
def get_fitness_status(self):
"""Return True if individual's fitness in known."""
return self.fitness is not None
def set_fitness(self, value):
"""Assign fitness."""
self.fitness = value
def copy(self):
"""Copy instance."""
individual_copy = self.__class__(
self.x_train, self.y_train, self.genome, self.genes.copy(), self.crossover_rate,
self.mutation_rate, **self.get_additional_parameters()
)
individual_copy.set_fitness(self.fitness)
return individual_copy
def __str__(self):
"""Return genes which identify the individual."""
return pprint.pformat(self.genes)
def clear_large_data(self):
"""To be overwritten in children"""
@staticmethod
def evaluate_fitness_and_return_results(individuals: List[Individual], queue: mp.Queue, indices: List[int], *args):
for individual in enumerate(individuals):
queue.put(individual.get_fitness())
def prepare_data_sharing(self, n_workers: int, *args) -> Tuple:
with NamedTemporaryFile(prefix='gentun-results-') as memfile:
outputs = np.memmap(filename=memfile.name, shape=(n_workers,), mode='w+', dtype=np.float)
return outputs,
@staticmethod
def update_individuals_from_remote_data(individuals: List[Individual], outputs: np.memmap, *args):
for i, individual in enumerate(individuals):
individual.set_fitness(outputs[i])
class XgboostIndividual(Individual):
def __init__(self, x_train, y_train, genome=None, genes=None,
crossover_rate=0.5, mutation_rate=0.015,
fixed_genes=None, model: XgboostModel = None):
if genome is None:
genome = {
# name: (default, min, max, logarithmic-scale-base)
'eta': (0.3, 0.001, 1.0, 10),
'min_child_weight': (1, 0, 10, None),
'max_depth': (6, 3, 10, None),
'gamma': (0.0, 0.0, 10.0, 10),
'max_delta_step': (0, 0, 10, None),
'subsample': (1.0, 0.0, 1.0, -10),
'colsample_bytree': (1.0, 0.0, 1.0, -10),
'colsample_bylevel': (1.0, 0.0, 1.0, -10),
'lambda': (1.0, 0.1, 10.0, 10),
'alpha': (0.0, 0.0, 10.0, 10),
'scale_pos_weight': (1.0, 0.0, 10.0, 0)
}
if genes is None:
genes = self.generate_random_genes(genome)
# overwrite random genes with fixed ones
if fixed_genes is not None:
genes = {**genes, **fixed_genes}
# Set individual's attributes
super(XgboostIndividual, self).__init__(x_train, y_train, genome, genes, crossover_rate, mutation_rate)
assert model is not None, 'Model has to be provided'
self.model = model
self.best_ntree_limit = None
self.fixed_genes = fixed_genes
self.oof_dict = {}
def mutate(self):
"""Mutate instance's genes with a certain probability."""
super(XgboostIndividual, self).mutate()
# overwrite random genes with fixed ones
if self.fixed_genes is not None:
genes = self.get_genes()
new_genes = {**genes, **self.fixed_genes}
self.set_genes(new_genes=new_genes)
@staticmethod
def generate_random_genes(genome):
"""Create and return random genes."""
genes = {}
for name, (default, minimum, maximum, log_scale) in genome.items():
if type(default) == int:
genes[name] = random.randint(minimum, maximum)
else:
genes[name] = round(random_log_uniform(minimum, maximum, log_scale), 4)
return genes
def evaluate_fitness(self):
"""perform cross-validation."""
self.model.update(self.genes)
self.fitness = self.model.cross_validate()
self.best_ntree_limit = self.model.best_ntree_limit
self.oof_dict = self.model.oof_dict.copy()
def get_additional_parameters(self):
return {
'model': self.model,
}
def clear_large_data(self):
self.model.d_train = None
@staticmethod
def evaluate_fitness_and_return_results(individuals: List[Individual], queue: mp.Queue, indices: List[int],
outputs: np.memmap, ntree_limits: np.memmap, cv_preds: np.memmap,
cv_trues: np.memmap, model: XgboostModel, *args):
for i, result_index in enumerate(indices):
individuals[i].model = model
fitness = individuals[i].get_fitness()
outputs[result_index] = fitness
ntree_limits[result_index] = individuals[i].best_ntree_limit
start = 0
for x in individuals[i].oof_dict['cv_preds']:
cv_preds[result_index, start:start + len(x)] = x
start += len(x)
start = 0
for x in individuals[i].oof_dict['cv_trues']:
cv_trues[result_index, start:start + len(x)] = x
start += len(x)
queue.put(fitness)
def prepare_data_sharing(self, n_workers: int, n_colums: int) -> Tuple:
with NamedTemporaryFile(prefix='gentun-results-') as memfile:
outputs = np.memmap(filename=memfile.name, shape=(n_workers,), mode='w+', dtype=np.float)
with NamedTemporaryFile(prefix='gentun-tree-limits-') as memfile2:
ntree_limits = np.memmap(filename=memfile2.name, shape=(n_workers,), mode='w+', dtype=np.int)
with NamedTemporaryFile(prefix='gentun-cv_preds-') as memfile3:
cv_preds = np.memmap(filename=memfile3.name, shape=(n_workers, n_colums), mode='w+', dtype=np.float)
with NamedTemporaryFile(prefix='gentun-cv_trues-') as memfile4:
cv_trues = np.memmap(filename=memfile4.name, shape=(n_workers, n_colums), mode='w+', dtype=np.int)
return outputs, ntree_limits, cv_preds, cv_trues, self.model
@staticmethod
def update_individuals_from_remote_data(individuals: List[XgboostIndividual], outputs: np.memmap,
ntree_limits: np.memmap, cv_preds: np.memmap, cv_trues: np.memmap, *args):
super(XgboostIndividual, XgboostIndividual).update_individuals_from_remote_data(individuals, outputs)
for i, individual in enumerate(individuals):
individual.best_ntree_limit = ntree_limits[i]
individual.oof_dict['cv_preds']= split_list(cv_preds[i,:], individual.model.kfold)
individual.oof_dict['cv_trues'] = split_list(cv_trues[i, :], individual.model.kfold)
class GeneticCnnIndividual(Individual):
def __init__(self, x_train, y_train, genome=None, genes=None, crossover_rate=0.3, mutation_rate=0.1, nodes=(3, 5),
input_shape=(28, 28, 1), kernels_per_layer=(20, 50), kernel_sizes=((5, 5), (5, 5)), dense_units=500,
dropout_probability=0.5, classes=10, kfold=5, epochs=(3,), learning_rate=(1e-3,), batch_size=32):
if genome is None:
genome = {'S_{}'.format(i + 1): int(K_s * (K_s - 1) / 2) for i, K_s in enumerate(nodes)}
if genes is None:
genes = self.generate_random_genes(genome)
# Set individual's attributes
super(GeneticCnnIndividual, self).__init__(x_train, y_train, genome, genes, crossover_rate, mutation_rate)
# Set additional parameters which are not tuned
assert len(nodes) == len(kernels_per_layer) and len(kernels_per_layer) == len(kernel_sizes)
self.nodes = nodes
self.input_shape = input_shape
self.kernels_per_layer = kernels_per_layer
self.kernel_sizes = kernel_sizes
self.dense_units = dense_units
self.dropout_probability = dropout_probability
self.classes = classes
self.kfold = kfold
self.epochs = epochs
self.learning_rate = learning_rate
self.batch_size = batch_size
@staticmethod
def generate_random_genes(genome):
"""Create and return random genes."""
genes = {}
for name, connections in genome.items():
genes[name] = ''.join([random.choice(['0', '1']) for _ in range(connections)])
return genes
def evaluate_fitness(self):
"""Create model and perform cross-validation."""
model = GeneticCnnModel(
self.x_train, self.y_train, self.genes, self.nodes, self.input_shape, self.kernels_per_layer,
self.kernel_sizes, self.dense_units, self.dropout_probability, self.classes,
self.kfold, self.epochs, self.learning_rate, self.batch_size
)
self.fitness = model.cross_validate()
def get_additional_parameters(self):
return {
'nodes': self.nodes,
'input_shape': self.input_shape,
'kernels_per_layer': self.kernels_per_layer,
'kernel_sizes': self.kernel_sizes,
'dense_units': self.dense_units,
'dropout_probability': self.dropout_probability,
'classes': self.classes,
'kfold': self.kfold,
'epochs': self.epochs,
'learning_rate': self.learning_rate,
'batch_size': self.batch_size
}
def mutate(self):
"""Mutate instance's genes with a certain probability."""
for name, connections in self.get_genes().items():
new_connections = ''.join([
str(int(int(byte) != (random.random() < self.mutation_rate))) for byte in connections
])
if new_connections != connections:
self.set_fitness(None) # A mutation means the individual has to be re-evaluated
self.get_genes()[name] = new_connections
|
# =============================================================================== #
# #
# This file has been generated automatically!! Do not change this manually! #
# #
# =============================================================================== #
from __future__ import annotations
from pydantic import Field
from ..base_object import BaseObject
class PassportElementErrorSource(BaseObject):
"""
Contains the description of an error in a Telegram Passport element
"""
ID: str = Field("passportElementErrorSource", alias="@type")
class PassportElementErrorSourceDataField(PassportElementErrorSource):
"""
One of the data fields contains an error. The error will be considered resolved when the value of the field changes
:param field_name: Field name
:type field_name: :class:`str`
"""
ID: str = Field("passportElementErrorSourceDataField", alias="@type")
field_name: str
@staticmethod
def read(q: dict) -> PassportElementErrorSourceDataField:
return PassportElementErrorSourceDataField.construct(**q)
class PassportElementErrorSourceFile(PassportElementErrorSource):
"""
The file contains an error. The error will be considered resolved when the file changes
:param file_index: Index of a file with the error
:type file_index: :class:`int`
"""
ID: str = Field("passportElementErrorSourceFile", alias="@type")
file_index: int
@staticmethod
def read(q: dict) -> PassportElementErrorSourceFile:
return PassportElementErrorSourceFile.construct(**q)
class PassportElementErrorSourceFiles(PassportElementErrorSource):
"""
The list of attached files contains an error. The error will be considered resolved when the list of files changes
"""
ID: str = Field("passportElementErrorSourceFiles", alias="@type")
@staticmethod
def read(q: dict) -> PassportElementErrorSourceFiles:
return PassportElementErrorSourceFiles.construct(**q)
class PassportElementErrorSourceFrontSide(PassportElementErrorSource):
"""
The front side of the document contains an error. The error will be considered resolved when the file with the front side changes
"""
ID: str = Field("passportElementErrorSourceFrontSide", alias="@type")
@staticmethod
def read(q: dict) -> PassportElementErrorSourceFrontSide:
return PassportElementErrorSourceFrontSide.construct(**q)
class PassportElementErrorSourceReverseSide(PassportElementErrorSource):
"""
The reverse side of the document contains an error. The error will be considered resolved when the file with the reverse side changes
"""
ID: str = Field("passportElementErrorSourceReverseSide", alias="@type")
@staticmethod
def read(q: dict) -> PassportElementErrorSourceReverseSide:
return PassportElementErrorSourceReverseSide.construct(**q)
class PassportElementErrorSourceSelfie(PassportElementErrorSource):
"""
The selfie with the document contains an error. The error will be considered resolved when the file with the selfie changes
"""
ID: str = Field("passportElementErrorSourceSelfie", alias="@type")
@staticmethod
def read(q: dict) -> PassportElementErrorSourceSelfie:
return PassportElementErrorSourceSelfie.construct(**q)
class PassportElementErrorSourceTranslationFile(PassportElementErrorSource):
"""
One of files with the translation of the document contains an error. The error will be considered resolved when the file changes
:param file_index: Index of a file with the error
:type file_index: :class:`int`
"""
ID: str = Field("passportElementErrorSourceTranslationFile", alias="@type")
file_index: int
@staticmethod
def read(q: dict) -> PassportElementErrorSourceTranslationFile:
return PassportElementErrorSourceTranslationFile.construct(**q)
class PassportElementErrorSourceTranslationFiles(PassportElementErrorSource):
"""
The translation of the document contains an error. The error will be considered resolved when the list of translation files changes
"""
ID: str = Field("passportElementErrorSourceTranslationFiles", alias="@type")
@staticmethod
def read(q: dict) -> PassportElementErrorSourceTranslationFiles:
return PassportElementErrorSourceTranslationFiles.construct(**q)
class PassportElementErrorSourceUnspecified(PassportElementErrorSource):
"""
The element contains an error in an unspecified place. The error will be considered resolved when new data is added
"""
ID: str = Field("passportElementErrorSourceUnspecified", alias="@type")
@staticmethod
def read(q: dict) -> PassportElementErrorSourceUnspecified:
return PassportElementErrorSourceUnspecified.construct(**q)
|
import warnings
import functools
import inspect
import time
import sys
import pathlib
from datetime import datetime
string_types = (type(b''), type(u''), type(f''))
class tcolor:
ResetAll = "\033[0m"
Bold = "\033[1m"
Dim = "\033[2m"
Underlined = "\033[4m"
Blink = "\033[5m"
Reverse = "\033[7m"
Hidden = "\033[8m"
ResetBold = "\033[21m"
ResetDim = "\033[22m"
ResetUnderlined = "\033[24m"
ResetBlink = "\033[25m"
ResetReverse = "\033[27m"
ResetHidden = "\033[28m"
Default = "\033[39m"
Black = "\033[30m"
Red = "\033[31m"
Green = "\033[32m"
Yellow = "\033[33m"
Blue = "\033[34m"
Magenta = "\033[35m"
Cyan = "\033[36m"
LightGray = "\033[37m"
DarkGray = "\033[90m"
LightRed = "\033[91m"
LightGreen = "\033[92m"
LightYellow = "\033[93m"
LightBlue = "\033[94m"
LightMagenta = "\033[95m"
LightCyan = "\033[96m"
White = "\033[97m"
BackgroundDefault = "\033[49m"
BackgroundBlack = "\033[40m"
BackgroundRed = "\033[41m"
BackgroundGreen = "\033[42m"
BackgroundYellow = "\033[43m"
BackgroundBlue = "\033[44m"
BackgroundMagenta = "\033[45m"
BackgroundCyan = "\033[46m"
BackgroundLightGray = "\033[47m"
BackgroundDarkGray = "\033[100m"
BackgroundLightRed = "\033[101m"
BackgroundLightGreen = "\033[102m"
BackgroundLightYellow = "\033[103m"
BackgroundLightBlue = "\033[104m"
BackgroundLightMagenta = "\033[105m"
BackgroundLightCyan = "\033[106m"
BackgroundWhite = "\033[107m"
def change_dir(directory = '.'):
'''
Simple funciton to change current directory.
'''
chdir(directory)
workbook = getcwd()
print(f'Using current directory for loading/saving: ' +
tcolor.Blue + tcolor.Bold + f'{workbook}' + tcolor.ResetAll)
print(f'To change current directory, call change_dir(...)')
change_dir('.')
def deprecated(reason):
'''
Decorator which can be used to mark functions
as deprecated. This will result in a warning being
emitted when the decorated funciton is called.
'''
if isinstance(reason, string_types):
# @deprecated is used with a reason attached
# Only works for functions
# ex: @deprecated("please use another function")
def decorator(func1):
fmt1 = "Call to deprecated function {name} ({reason})."
@functools.wraps(func1)
def new_func1(*args, **kwargs):
warnings.simplefilter('always', DeprecationWarning)
warnings.warn(fmt1.format(name=repr(func1.__name__), reason=reason),
category=DeprecationWarning,
stacklevel=2)
warnings.simplefilter('default', DeprecationWarning)
return func1(*args, **kwargs)
return new_func1
return decorator
elif inspect.isfunction(reason):
func2 = reason
fmt2 = "Call to deprecated function {name}."
@functools.wraps(func2)
def new_func2(*args, **kwargs):
warnings.simplefilter('always', DeprecationWarning)
warnings.warn(fmt2.format(name=repr(func2.__name__)),
category=DeprecationWarning,
stacklevel=2)
warnings.simplefilter('default', DeprecationWarning)
return func2(*args, **kwargs)
return new_func2
else:
raise TypeError(repr(type(reason)))
def timer(func):
'''
Decorator which will time funciton from start to
end.
'''
@functools.wraps(func)
def decorator_timer(*args, **kwargs):
print(f"Call to funciton {repr(func.__name__)} with timer set on")
start_time = time.perf_counter()
ret_val = func(*args, **kwargs)
end_time = time.perf_counter()
run_time = end_time - start_time
print(f"Finished {repr(func.__name__)} in {run_time:.4f} secs")
return ret_val
return decorator_timer
def print_log(func, outputfile = './out.txt', access = 'w'):
'''
Decorator which saves all print statement to a log file. Note
that this method temporarily changes sys.stdout which may not
be desired.
'''
@functools.wraps(func)
def decorator_log(*args, **kwargs):
print(f"Call to function {repr(func.__name__)} " +
f"with print output saved to {outputfile}.")
orig_stdout = sys.stdout
f = open(outputfile, access)
sys.stdout = f
header = (f'RUNNING FUNCTION {repr(func.__name__)} | ' +
f'MONTH-DAY-YEAR ' +
datetime.today().strftime("%b-%d-%Y | ") +
f'HOUR:MINUTE:SECOND ' +
datetime.today().strftime("%H:%M:%S"))
print('-'*90)
print("{:<90}".format(header))
print()
ret_val = func(*args, **kwargs)
f.close()
sys.stdout = orig_stdout
return ret_val
return decorator_log |
import socket
import ssl
import ipaddress
from urlparse import urlparse
import datetime
from datetime import timedelta
import json
import time
def validIP(ip):
ip = unicode(ip)
try:
ipaddress.ip_address(ip)
return True
except:
return False
def validPort(port):
try:
port = int(port)
if port >= 0 and port <= 65535:
return True
else:
return False
except:
return False
def getSocket(ip, port, use_ssl=False, socket_timeout=4, connection_retries=2):
'''
Returns a socket after connecting to the host
ip
String IP address for connection
port
Int port to use for connection
use_ssl
Try and connect using ssl wrapper. Required for https
socket_timeout
How long to wait before connecting to the socket
connection_retries
How many times to retry connecting
'''
if not validPort(port):
print("Port {} not valid".format(port))
return False
if not validIP(ip):
print("IP not valid: {}".format(ip))
return False
# Define socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(socket_timeout)
if use_ssl:
# Wrap socket with SSL
s = ssl.wrap_socket(s)
while connection_retries > 0:
try:
s.connect((ip, int(port)))
return s
except socket.timeout:
print('Socket timed out')
return False
except:
raise
connection_retries -= 1
def getIP(d):
'''
This method returns the first IP address string
that responds as the given domain name
d
domain name you want to look up
'''
try:
return socket.gethostbyname(d)
except:
return False
def sendData(s, data):
'''
Try to send data do a socket
s
Connected socket
data
String data to send over the socket
'''
try:
s.sendall(data)
return True
except:
print('Failed to send data')
return False
# TODO
def parseData(samples):
buckets={}
bucketms=15
# Store the data as sample buckets
startTime = samples[0][0]
print 'StartTime:', startTime
endTime = samples[-1][0]
print 'EndTime:', endTime
bucketEnd = startTime + timedelta(milliseconds=bucketms)
buckets[unicode(bucketEnd)] = 0
# Chcek each sample to see what bucket it belongs in
for sample in samples:
# If the current samples time is larger that the bucket
# window we know that we have entered a new bucket
if sample[0] > bucketEnd:
# Make a new bucket "bucketms" in the future
bucketEnd = bucketEnd + timedelta(milliseconds=bucketms)
buckets[unicode(bucketEnd)] = 0
# Add the bits to the bucket
buckets[unicode(bucketEnd)] += int(sample[1])
timeMultiplayer = 1000 / bucketms
bucketsMbps = {}
for timeStamp, bits in buckets.items():
mbps = (bits*timeMultiplayer)/1024.0/1024.0
bucketsMbps[timeStamp] = {
'bits': bits,
'Mbps': round(mbps, 2)
}
return bucketsMbps
# TODO
def timedRecv(s):
samples = []
while 1:
data = s.recv(100000)
if not data:
break
t = datetime.datetime.now()
samples.append((t, len(data)*8))
#print "received data {} bits {}".format(len(data)*8, time.time())
# Sleep ms 1/1000 of a sec
time.sleep(0.001)
#for sample in samples:
# print sample
#print json.dumps(parseData(samples), indent=4, sort_keys=True)
return parseData(samples)
def httpGet(url):
# Parse Url into parts
pUrl = urlparse(url)
ip = getIP(pUrl.netloc)
# Get socket
if pUrl.scheme == 'https':
s = getSocket(ip, 443, use_ssl=True)
else:
s = getSocket(ip, 80)
if s == False:
print('Failed to get socket for {}'.format(pUrl.netloc))
return False
# Remove protocal and domain
stripString = '{}://{}'.format(pUrl.scheme, pUrl.netloc)
getPath = pUrl.geturl().replace(stripString, '', 1)
# Get string for the request
getString='GET {} HTTP/1.1\r\n'.format(
getPath
)
# Headeds for for the get request
headers = [
'Host: {}'.format(pUrl.netloc),
'User-Agent: PyPerf/0.1'
]
# Build get request
getReqString = getString + "\r\n".join(headers) + '\r\n\r\n'
# Send request
sendData(s, getReqString)
# Get the timed data responce
d = timedRecv(s)
s.close()
for time_stamp in d:
print time_stamp, ",", d[time_stamp]['Mbps']
httpGet('http://https://www.lipsum.com/')
|
import json
import pytest
from asynctest import TestCase as AsyncTestCase
from asynctest import mock as async_mock
from ...wallet.basic import BasicWallet
from ..pds import *
from ...aathcf.credentials import verify_proof
from aries_cloudagent.connections.models.connection_record import ConnectionRecord
credential_test_schema = OrderedDict(
{
"@context": [
"https://www.w3.org/2018/credentials/v1",
"https://www.schema.org",
],
"type": [
"VerifiableCredential",
"@TODO should this be oca schema or what dri points to",
],
"issuer": "1234",
"issuanceDate": time_now(),
"credentialSubject": {
"id": "TODO: Did of subject",
"ocaSchema": {
"dri": "1234",
"dataDri": "1234",
},
},
"proof": {
"type": "Ed25519Signature2018",
"created": time_now(),
"proofPurpose": "assertionMethod",
"verificationMethod": "1234",
"jws": "1234",
},
}
)
def assert_that_contains(base: dict, to_verify: dict):
# assert that contains at least values of base or more
# and these least values are not NULL
for key in base:
if key == "@context":
key = "context"
assert to_verify[key] != None
assert to_verify[key] != {}
assert to_verify[key] != []
async def create_test_credential(issuer):
test_cred = {
"credentialSubject": {
"id": "TODO: Did of subject",
"ocaSchema": {
"dri": "1234",
"dataDri": "1234",
},
"first_name": "Karol",
},
}
credential, _ = await issuer.create_credential(
schema={"credential_type": "TestType"},
credential_values=test_cred["credentialSubject"],
credential_offer={},
credential_request={},
)
credential_dict = json.loads(credential, object_pairs_hook=OrderedDict)
assert credential_dict.get("proof") != None
assert credential_dict["credentialSubject"] == test_cred["credentialSubject"]
return credential_dict
class TestPDSIssuer(AsyncTestCase):
async def setUp(self):
self.wallet = BasicWallet()
self.issuer: PDSIssuer = PDSIssuer(self.wallet)
assert self.issuer.wallet is self.wallet
await self.wallet.create_public_did()
async def test_create_credential(self):
credential_dict = await create_test_credential(self.issuer)
assert_that_contains(credential_test_schema, credential_dict)
assert_that_contains(credential_test_schema["proof"], credential_dict["proof"])
assert await verify_proof(self.wallet, credential_dict) == True
async def test_create_credential_null(self):
connection = ConnectionRecord(my_did="1234-my", their_did="1234-their")
with self.assertRaises(IssuerError):
credential, _ = await self.issuer.create_credential(
schema={"credential_type": "TestType"},
credential_values={},
credential_offer={},
credential_request={"connection_record": connection},
)
with self.assertRaises(IssuerError):
credential, _ = await self.issuer.create_credential(
schema={},
credential_values={},
credential_offer={},
credential_request={"connection_record": connection},
)
async def test_credentials_are_equal(self):
test_cred = {
"credentialSubject": {
"id": "TODO: Did of subject",
"ocaSchema": {
"dri": "1234",
"dataDri": "1234",
},
"first_name": "Karol",
},
}
credential_ex = await self.issuer.create_credential_ex(
test_cred["credentialSubject"], "TestType"
)
credential_ex = json.loads(credential_ex)
credential = await create_test_credential(self.issuer)
assert credential["credentialSubject"] == credential_ex["credentialSubject"]
assert credential.keys() == credential_ex.keys()
class TestPDSIssuerNoActiveDid(AsyncTestCase):
async def test_create_credential_active_did_is_none(self):
self.wallet = BasicWallet()
self.issuer: PDSIssuer = PDSIssuer(self.wallet)
assert self.issuer.wallet is self.wallet
with self.assertRaises(IssuerError):
await self.issuer.create_credential_ex({})
with self.assertRaises(IssuerError):
credential, _ = await self.issuer.create_credential(
schema={"credential_type": "TestType"},
credential_values={},
credential_offer={},
credential_request={},
) |
# coding: UTF-8
### GPU 指定
import os
#os.environ["CUDA_VISIBLE_DEVICES"] = "4"
### import
import torch
device=torch.device('cuda')
import sys
import cv2
import numpy as np
import time
import torch.nn.functional as F
import matplotlib.pyplot as plt
import statistics
from sift_flow_torch import SiftFlowTorch
from third_party.flowiz import flowiz
import glob
from PIL import Image
def read_imgs(load_filepath, load_filename, index,image_resize_factor = 1):
"""画像をまとめて読み込む関数
filepath: str, 画像ファイルのpath
index: list, 画像のindex
image_resize_factor: int, 画像をリサイズする場合
"""
print(index)
imgs = [cv2.imread(load_filepath+"/"+load_filename.format(i)) for i in index]
#print([load_filepath+"/"+load_filename.format(i) for i in index])
imgs = [cv2.resize(im, (im.shape[1]//image_resize_factor, im.shape[0]//image_resize_factor)) for im in imgs]
print("img size: ", imgs[0].shape)
#imgs = torch.tensor(imgs)
#print(imgs[0])
#imgs = torch.squeeze(imgs, dim=-1)
#print(imgs[0])
return imgs
def choise_gt(noise_batch): # ノイズマップ10枚
gt = 0
small = sum(sum(noise_batch[0]))
noisy = []
for i in range(len(noise_batch)):
s = sum(sum(noise_batch[i]))
noisy.append([s, i])
if s < small:
small = s
gt = i
#noisy = list(range(len(noise_batch)))
print(gt, end=", ")
del noisy[gt]
noisy = sorted(noisy)
return gt, noisy # 最もきれいな画像のindex、その他の画像のindex(0~n-1, n+1~9)
def make_dataset(load_noise_filepath="result_noise",load_img_filepath="TIFF files",
load_img_filename="5um CNT.0_{:05}_1.spm.tif", save_filepath="dataset",
n_batch=4, n_burst=10, n_set=9, gamma_corr=False):
os.makedirs(save_filepath, exist_ok=True)
files = glob.glob(load_noise_filepath + "/*.pt")
n_set = int(len(files)/n_burst)
if gamma_corr:
gamma045LUT = [pow(x/255.0, 1.3/2.2)*255 for x in range(256)]
print(n_set, "set")
for i in range(n_set):
print("=="*10)
index = list(range(i*n_burst, (i+1)*n_burst))
imgs = read_imgs(load_img_filepath,load_img_filename, index)
noise_map_list = [torch.load(load_noise_filepath
+ "/" + "{:05}".format(i*n_burst + j)+".pt") for j in range(n_burst)]
# GPUに移動する
noise_map = torch.zeros(n_burst, *noise_map_list[0].shape).to(device)
for j in range(n_burst):
noise_map[j] = noise_map_list[j].to(device)
# もしn_batchで割り切れなかったらエラーにする
if (len(imgs[0]) % n_batch) or (noise_map.shape[1] % n_batch) :
print('Error: batch division faild', file=sys.stderr)
sys.exit(1)
print("GT=", end="")
noise_batch_size = int(len(noise_map[0]) / n_batch)
img_batch_size = int(len(imgs[0]) / n_batch)
#print(noise_batch_size, img_batch_size)
#noise_map = torch.tensor(noise_map)
os.makedirs(save_filepath+"/set{:04}".format(i), exist_ok=True)
noise_batch = torch.zeros(n_burst, noise_batch_size, noise_batch_size).to(device)
#img_batch = torch.zeros(n_burst, img_batch_size, img_batch_size, 3) # RGB3チャネルを保持している
img_batch = [0]*n_burst
for n1 in range(n_batch): # y方向のbatch分割
nsy,ngy = n1 *noise_batch_size, (n1+1) * noise_batch_size
isy,igy = n1 *img_batch_size, (n1+1) * img_batch_size
for n2 in range(n_batch): # x方向のbatch
save_filepath_batch = save_filepath + "/set{:04}".format(i) + "/batch{:02}".format(n1*n_batch + n2)
os.makedirs(save_filepath_batch, exist_ok=True)
# batch部分を切り出し
nsx,ngx = n2 * noise_batch_size, (n2+1) * noise_batch_size
isx,igx = n2 *img_batch_size, (n2+1) * img_batch_size
for n3 in range(n_burst): # 各画像についてn_burst枚数ずつimg, flowがある
noise_batch[n3] = noise_map[n3, nsy:ngy, nsx:ngx]
#rint(len(img_batch), len(imgs))
img_batch[n3] = imgs[n3][isy:igy, isx:igx]
# batchを保存する
gt_index, noisy_index = choise_gt(noise_batch)
#noise_batch = noise_batch.cpu().numpy()
#img_batch_numpy = img_batch.cpu().numpy()
pil_img = Image.fromarray(img_batch[gt_index]).convert("L")
if gamma_corr:
pil_img = pil_img.point(gamma045LUT)
#pil_img.save(save_filepath_batch+"/gt.png")
pil_img.save(save_filepath_batch+f"/gt{gt_index}.png")
#cv2.imwrite(save_filepath_batch+"/gt.png", img_batch_numpy[gt_index])
with open(save_filepath+"/sep_trainlist.txt", mode='a') as f:
for _, n in (noisy_index):
pil_img = Image.fromarray(img_batch[n]).convert("L")
if gamma_corr:
pil_img = pil_img.point(gamma045LUT)
filename = "input{:03}.png".format(n)
pil_img.save(save_filepath_batch + "/" + filename)
# file_listに追記
if i or n1 or n2 or n:
f.write("\n")
f.write("set{:04}".format(i) + "/batch{:02}".format(n1*n_batch + n2) + "/" + filename)
print()
#make_dataset()
|
<gh_stars>10-100
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow.keras.layers import Dense, Conv2D, GlobalAveragePooling2D, Concatenate
from tf2rl.algos.policy_base import OffPolicyAgent
from tf2rl.misc.target_update_ops import update_target_variables
from tf2rl.policies.tfp_gaussian_actor import GaussianActor
class CriticV(tf.keras.Model):
def __init__(self, state_shape, name='vf'):
super().__init__(name=name)
self.conv_layers = [Conv2D(16, 3, strides=3, activation='relu'), Conv2D(64, 3, strides=2, activation='relu'),
Conv2D(128, 3, strides=2, activation='relu'), Conv2D(256, 3, strides=2, activation='relu'),
GlobalAveragePooling2D()]
self.connect_layers = [Dense(128, activation='relu'), Dense(32, activation='relu')]
self.out_layer = Dense(1, name="V", activation='linear')
dummy_state = tf.constant(np.zeros(shape=(1,) + state_shape, dtype=np.float32))
self(dummy_state)
#self.summary()
def call(self, states):
features = states
for conv_layer in self.conv_layers:
features = conv_layer(features)
for connect_layer in self.connect_layers:
features = connect_layer(features)
values = self.out_layer(features)
return tf.squeeze(values, axis=1)
class CriticQ(tf.keras.Model):
def __init__(self, state_shape, action_dim, name='qf'):
super().__init__(name=name)
self.conv_layers = [Conv2D(16, 3, strides=3, activation='relu'), Conv2D(64, 3, strides=2, activation='relu'),
Conv2D(128, 3, strides=2, activation='relu'), Conv2D(256, 3, strides=2, activation='relu'),
GlobalAveragePooling2D()]
self.act_layers = [Dense(64, activation='relu')]
self.connect_layers = [Dense(128, activation='relu'), Dense(32, activation='relu')]
self.out_layer = Dense(1, name="Q", activation='linear')
dummy_state = tf.constant(np.zeros(shape=(1,) + state_shape, dtype=np.float32))
dummy_action = tf.constant(np.zeros(shape=[1, action_dim], dtype=np.float32))
self(dummy_state, dummy_action)
#self.summary()
def call(self, states, actions):
features = states
for conv_layer in self.conv_layers:
features = conv_layer(features)
action = self.act_layers[0](actions)
features_action = tf.concat([features, action], axis=1)
for connect_layer in self.connect_layers:
features_action = connect_layer(features_action)
values = self.out_layer(features_action)
return tf.squeeze(values, axis=1)
class SAC(OffPolicyAgent):
def __init__(
self,
state_shape,
action_dim,
name="SAC",
max_action=1.0,
lr=3e-4,
tau=5e-3,
alpha=0.2,
auto_alpha=False,
n_warmup=int(1e4),
memory_capacity=int(1e6),
**kwargs):
super().__init__(
name=name, memory_capacity=memory_capacity, n_warmup=n_warmup, **kwargs)
self._setup_actor(state_shape, action_dim, lr, max_action)
self._setup_critic_v(state_shape, lr)
self._setup_critic_q(state_shape, action_dim, lr)
# Set hyper-parameters
self.tau = tau
self.auto_alpha = auto_alpha
if auto_alpha:
self.log_alpha = tf.Variable(tf.math.log(alpha), dtype=tf.float32)
self.alpha = tfp.util.DeferredTensor(pretransformed_input=self.log_alpha, transform_fn=tf.exp, dtype=tf.float32)
self.target_alpha = -action_dim
self.alpha_optimizer = tf.keras.optimizers.Adam(learning_rate=lr, beta_1=0.5)
else:
self.alpha = alpha
self.state_ndim = len(state_shape)
def _setup_actor(self, state_shape, action_dim, lr, max_action=1.):
self.actor = GaussianActor(state_shape, action_dim, max_action, squash=True)
self.actor_optimizer = tf.keras.optimizers.Adam(learning_rate=lr)
def _setup_critic_q(self, state_shape, action_dim, lr):
self.qf1 = CriticQ(state_shape, action_dim, name="qf1")
self.qf2 = CriticQ(state_shape, action_dim, name="qf2")
self.qf1_optimizer = tf.keras.optimizers.Adam(learning_rate=lr)
self.qf2_optimizer = tf.keras.optimizers.Adam(learning_rate=lr)
def _setup_critic_v(self, state_shape, lr):
self.vf = CriticV(state_shape)
self.vf_target = CriticV(state_shape)
update_target_variables(self.vf_target.weights, self.vf.weights, tau=1.0)
self.vf_optimizer = tf.keras.optimizers.Adam(learning_rate=lr)
def get_action(self, state, test=False):
assert isinstance(state, np.ndarray)
is_single_state = len(state.shape) == self.state_ndim
state = np.expand_dims(state, axis=0).astype(np.float32) if is_single_state else state
action = self._get_action_body(tf.constant(state), test)
return action.numpy()[0] if is_single_state else action
@tf.function
def _get_action_body(self, state, test):
actions, log_pis, entropy = self.actor(state, test)
return actions
def train(self, states, actions, next_states, rewards, dones, weights=None):
if weights is None:
weights = np.ones_like(rewards)
td_errors, actor_loss, vf_loss, qf_loss, q_value, logp_min, logp_max, logp_mean, entropy_mean = self._train_body(
states, actions, next_states, rewards, dones, weights)
tf.summary.scalar(name=self.policy_name + "/actor_loss", data=actor_loss)
tf.summary.scalar(name=self.policy_name + "/critic_V_loss", data=vf_loss)
tf.summary.scalar(name=self.policy_name + "/critic_Q_loss", data=qf_loss)
tf.summary.scalar(name=self.policy_name + "/Q_value", data=q_value)
tf.summary.scalar(name=self.policy_name + "/logp_min", data=logp_min)
tf.summary.scalar(name=self.policy_name + "/logp_max", data=logp_max)
tf.summary.scalar(name=self.policy_name + "/logp_mean", data=logp_mean)
tf.summary.scalar(name=self.policy_name + "/entropy_mean", data=entropy_mean)
if self.auto_alpha:
tf.summary.scalar(name=self.policy_name + "/logp_mean+target", data=logp_mean + self.target_alpha)
tf.summary.scalar(name=self.policy_name + "/alpha", data=self.alpha)
return td_errors
@tf.function
def _train_body(self, states, actions, next_states, rewards, dones, weights):
with tf.device(self.device):
assert len(dones.shape) == 2
assert len(rewards.shape) == 2
rewards = tf.squeeze(rewards, axis=1)
dones = tf.squeeze(dones, axis=1)
not_dones = 1. - tf.cast(dones, dtype=tf.float32)
with tf.GradientTape(persistent=True) as tape:
# Compute loss of critic Q
current_q1 = self.qf1(states, actions)
current_q2 = self.qf2(states, actions)
next_v_target = self.vf_target(next_states)
target_q = tf.stop_gradient(rewards + not_dones * self.discount * next_v_target)
td_loss_q1 = tf.reduce_mean((target_q - current_q1) ** 2)
td_loss_q2 = tf.reduce_mean((target_q - current_q2) ** 2)
# Compute loss of critic V
current_v = self.vf(states)
# Resample actions to update V
sample_actions, logp, entropy = self.actor(states)
current_q1 = self.qf1(states, sample_actions)
current_q2 = self.qf2(states, sample_actions)
current_min_q = tf.minimum(current_q1, current_q2)
target_v = tf.stop_gradient(current_min_q - self.alpha * logp)
td_errors = target_v - current_v
td_loss_v = tf.reduce_mean(td_errors ** 2)
# Compute loss of policy
policy_loss = tf.reduce_mean(self.alpha * logp - current_min_q)
# Compute loss of temperature parameter for entropy
if self.auto_alpha:
alpha_loss = -tf.reduce_mean((self.alpha * tf.stop_gradient(logp + self.target_alpha)))
# Critic Q1 loss
q1_grad = tape.gradient(td_loss_q1, self.qf1.trainable_variables)
self.qf1_optimizer.apply_gradients(zip(q1_grad, self.qf1.trainable_variables))
# Critic Q2 loss
q2_grad = tape.gradient(td_loss_q2, self.qf2.trainable_variables)
self.qf2_optimizer.apply_gradients(zip(q2_grad, self.qf2.trainable_variables))
# Critic V loss
vf_grad = tape.gradient(td_loss_v, self.vf.trainable_variables)
self.vf_optimizer.apply_gradients(zip(vf_grad, self.vf.trainable_variables))
# Update Target V
update_target_variables(self.vf_target.weights, self.vf.weights, self.tau)
# Actor loss
actor_grad = tape.gradient(policy_loss, self.actor.trainable_variables)
self.actor_optimizer.apply_gradients(zip(actor_grad, self.actor.trainable_variables))
# Alpha loss
if self.auto_alpha:
alpha_grad = tape.gradient(alpha_loss, [self.log_alpha])
self.alpha_optimizer.apply_gradients(zip(alpha_grad, [self.log_alpha]))
del tape
return td_errors, policy_loss, td_loss_v, td_loss_q1, tf.reduce_mean(current_min_q), tf.reduce_min(logp), \
tf.reduce_max(logp), tf.reduce_mean(logp), tf.reduce_mean(entropy)
def compute_td_error(self, states, actions, next_states, rewards, dones):
if isinstance(actions, tf.Tensor):
rewards = tf.expand_dims(rewards, axis=1)
dones = tf.expand_dims(dones, 1)
td_errors = self._compute_td_error_body(states, actions, next_states, rewards, dones)
return td_errors.numpy()
@tf.function
def _compute_td_error_body(self, states, actions, next_states, rewards, dones):
with tf.device(self.device):
not_dones = 1. - tf.cast(dones, dtype=tf.float32)
# Compute TD errors for Q-value func
current_q1 = self.qf1(states, actions)
vf_next_target = self.vf_target(next_states)
target_q = tf.stop_gradient(rewards + not_dones * self.discount * vf_next_target)
td_errors_q1 = target_q - current_q1
return td_errors_q1
@staticmethod
def get_argument(parser=None):
parser = OffPolicyAgent.get_argument(parser)
parser.add_argument('--alpha', type=float, default=0.2)
parser.add_argument('--auto-alpha', action="store_true")
return parser
|
import torch
import torchvision
from tool import imutils
import argparse
import importlib
import numpy as np
import cv2
import voc12.data
from torch.utils.data import DataLoader
import scipy.misc
import torch.nn.functional as F
import os.path
from tool import imutils, pyutils
import time
from PIL import Image
import os
#voc12_root=os.environ['voc_root']
voc12_root="../voc_root"
def get_palette(num_cls):
n = num_cls
palette = [0] * (n * 3)
for j in range(0, n):
lab = j
palette[j * 3 + 0] = 0
palette[j * 3 + 1] = 0
palette[j * 3 + 2] = 0
i = 0
while lab:
palette[j * 3 + 0] |= (((lab >> 0) & 1) << (7 - i))
palette[j * 3 + 1] |= (((lab >> 1) & 1) << (7 - i))
palette[j * 3 + 2] |= (((lab >> 2) & 1) << (7 - i))
i += 1
lab >>= 3
return palette
def mask2png(mask,saven):
palette = get_palette(256)
mask=Image.fromarray(mask.astype(np.uint8))
mask.putpalette(palette)
mask.save(saven)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
#parser.add_argument("--weights", required=True, type=str)
parser.add_argument("--infer_list", default="voc12/train_aug.txt", type=str)
parser.add_argument("--num_workers", default=8, type=int)
parser.add_argument("--alpha", default=16, type=int)
parser.add_argument("--beta", default=8, type=int)
parser.add_argument("--logt", default=8, type=int)
args = parser.parse_args()
model = getattr(importlib.import_module("network.resnet38_aff"), 'Net')()
model.load_state_dict(torch.load("../pretrained_models/res38_aff.pth"))
model.eval()
model.cuda()
infer_dataset = voc12.data.VOC12ClsDataset(args.infer_list, voc12_root=voc12_root,
transform=torchvision.transforms.Compose(
[np.asarray,
model.normalize,
imutils.HWC_to_CHW]))
infer_data_loader = DataLoader(infer_dataset, shuffle=False, num_workers=args.num_workers, pin_memory=True)
#save_dir=str(args.compatg)+"_"+str(args.compatb)+"_"+str(args.gxy)+"_"+str(args.bxy)+"_"+str(args.brgb)
save_dir_cam = "results/out_cam"
save_dir_aff = "results/out_aff"
os.makedirs(save_dir_aff, exist_ok=True)
save_dir_aff_crf = "results/out_aff_crf"
os.makedirs(save_dir_aff_crf, exist_ok=True)
for iter, (name, img, label) in enumerate(infer_data_loader):
name = name[0]; label = label[0]
img_path=voc12_root+'/JPEGImages/'+name+'.jpg'
orig_img = np.asarray(Image.open(img_path))
orig_img_size = orig_img.shape[:2]
print(iter)
orig_shape = img.shape
padded_size = (int(np.ceil(img.shape[2]/8)*8), int(np.ceil(img.shape[3]/8)*8))
p2d = (0, padded_size[1] - img.shape[3], 0, padded_size[0] - img.shape[2])
img = F.pad(img, p2d)
dheight = int(np.ceil(img.shape[2]/8))
dwidth = int(np.ceil(img.shape[3]/8))
cam = np.load(os.path.join(save_dir_cam, name + '.npy')).item()
cam_full_arr = np.zeros((21, orig_shape[2], orig_shape[3]), np.float32)
for k, v in cam.items():
cam_full_arr[k+1] = v
cam_full_arr[0] = (1 - np.max(cam_full_arr[1:], (0), keepdims=False))**args.alpha
cam_full_arr = np.pad(cam_full_arr, ((0, 0), (0, p2d[3]), (0, p2d[1])), mode='constant')
with torch.no_grad():
aff_mat = torch.pow(model.forward(img.cuda(), True), args.beta)
trans_mat = aff_mat / torch.sum(aff_mat, dim=0, keepdim=True)#D sum(W)
for _ in range(args.logt):
trans_mat = torch.matmul(trans_mat, trans_mat)
cam_full_arr = torch.from_numpy(cam_full_arr)
cam_full_arr = F.avg_pool2d(cam_full_arr, 8, 8)
cam_vec = cam_full_arr.view(21, -1)
cam_rw = torch.matmul(cam_vec.cuda(), trans_mat)
cam_rw = cam_rw.view(1, 21, dheight, dwidth)
cam_rw = torch.nn.Upsample((img.shape[2], img.shape[3]), mode='bilinear')(cam_rw)
cam_rw = cam_rw[:,:,:orig_shape[2], :orig_shape[3]]
cam_rw = cam_rw.squeeze().data.cpu().numpy()
aff_dict = {}
aff_dict[0] = cam_rw[0]
for i in range(20):
if label[i] > 1e-5:
aff_dict[i+1] = cam_rw[i+1]
np.save(os.path.join(save_dir_aff, name + '.npy'), aff_dict)
mask=np.argmax(cam_rw,axis=0)
mask2png(mask, os.path.join(save_dir_aff, name + '.png'))
v = np.array(list(aff_dict.values()))
aff_crf = imutils.crf_inference(orig_img, v, labels=v.shape[0])
aff_crf_full_arr = np.zeros((21, orig_shape[2], orig_shape[3]), np.float32)
cnt=0
for k, v in aff_dict.items():
aff_crf_full_arr[k] = aff_crf[cnt]
cnt+=1
np.save(os.path.join(save_dir_aff_crf, name + '.npy'), aff_crf)
mask=np.argmax(aff_crf_full_arr,axis=0)
mask2png(mask, os.path.join(save_dir_aff_crf, name + '.png'))
|
# coding=utf-8
from utils import Parser, criterions
import setproctitle # pip install setproctitle
from predict import AverageMeter
from data.data_utils import init_fn
from data.sampler import CycleSampler
from data import datasets
import models
import numpy as np
import argparse
import os
import time
import logging
import random
import glob
from loguru import logger
from tensorboardX import writer
import torch
import torch.backends.cudnn as cudnn
import torch.optim
from torch.utils.data import DataLoader
import wandb
from tensorboardX import SummaryWriter
import time
cudnn.benchmark = True
parser = argparse.ArgumentParser()
parser.add_argument('-cfg', '--cfg', default='/home/xwj/Xcode/BraTS-DMFNet/experiments/compare.yaml', type=str,
help='Your detailed configuration of the network')
parser.add_argument('-gpu', '--gpu', default='0,1,2', type=str,
help='Supprot one GPU & multiple GPUs.')
parser.add_argument('-batch_size', '--batch_size', default=6,
help='Batch size')
parser.add_argument('-restore', '--restore',
default='model_last.pth', type=str) # model_last.pth
path = os.path.dirname(__file__)
# parse arguments
args = parser.parse_args()
args = Parser(args.cfg, log='train').add_args(args)
# args.net_params.device_ids= [int(x) for x in (args.gpu).split(',')]
ckpts = args.savedir
args.resume = os.path.join(ckpts, args.restore) # specify the epoch
def main():
os.makedirs(ckpts, exist_ok=True)
timestamp = time.strftime("%m-%d_%H-%M", time.localtime())
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
assert torch.cuda.is_available(), "Currently, we only support CUDA version"
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
logger.add(ckpts+'/output.log')
logger.info(args)
Network = getattr(models, args.net)
model = Network(**args.net_params)
model = torch.nn.DataParallel(model).cuda()
optimizer = getattr(torch.optim, args.opt)(
model.parameters(), **args.opt_params)
criterion = getattr(criterions, args.criterion)
msg = ''
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_iter = checkpoint['iter']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optim_dict'])
msg = ("=> loaded checkpoint '{}' (iter {})".format(
args.resume, checkpoint['iter']))
else:
msg = "=> no checkpoint found at '{}'".format(args.resume)
else:
msg = '-------------- New training session ----------------'
msg += '\n'
logger.info(msg)
logger.info(model)
# Data loading code
Dataset = getattr(datasets, args.dataset)
train_list = glob.glob(args.train_list_dir)[:60]
train_set = Dataset(train_list, root=args.train_data_dir,
for_train=True, transforms=args.train_transforms)
num_iters = args.num_iters or (
len(train_set) * args.num_epochs) // args.batch_size
num_iters -= args.start_iter
train_sampler = CycleSampler(len(train_set), num_iters*args.batch_size)
train_loader = DataLoader(
dataset=train_set,
batch_size=args.batch_size,
# collate_fn=train_set.collate,
sampler=train_sampler,
num_workers=args.workers,
pin_memory=True,
worker_init_fn=init_fn)
start = time.time()
# nums_batch per epoch
enum_batches = len(train_set) / float(args.batch_size)
losses = AverageMeter()
torch.set_grad_enabled(True)
# wandb.init(config=args, project='Brats2018', name=timestamp + '@' + 'compare.yaml',
# sync_tensorboard=True)
writer = SummaryWriter()
for i, data in enumerate(train_loader, args.start_iter):
elapsed_bsize = int(i / enum_batches)+1
epoch = int((i + 1) / enum_batches)
setproctitle.setproctitle(
"Epoch:{}/{}".format(elapsed_bsize, args.num_epochs))
# actual training
adjust_learning_rate(
optimizer, epoch, args.num_epochs, args.opt_params.lr)
#data = [t.cuda(non_blocking=True) for t in data]
x, target = data[:2]
x = x.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
output = model(x)
if not args.weight_type: # compatible for the old version
args.weight_type = 'square'
if args.criterion_kwargs is not None:
loss = criterion(output, target, logger, **args.criterion_kwargs)
else:
loss = criterion(output, target, logger)
# measure accuracy and record loss
losses.update(loss.item(), target.numel())
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i+1) % int(enum_batches * args.save_freq) == 0 \
or (i+1) % int(enum_batches * (args.num_epochs - 1)) == 0\
or (i+1) % int(enum_batches * (args.num_epochs - 2)) == 0\
or (i+1) % int(enum_batches * (args.num_epochs - 3)) == 0\
or (i+1) % int(enum_batches * (args.num_epochs - 4)) == 0:
file_name = os.path.join(ckpts, 'model_epoch_{}.pth'.format(epoch))
torch.save({
'iter': i,
'state_dict': model.state_dict(),
'optim_dict': optimizer.state_dict(),
},
file_name)
msg = '>>> Iter {0:}, Epoch {1:.4f}, Loss {2:.7f}'.format(
i+1, (i+1)/enum_batches, losses.avg)
logger.info(msg)
writer.add_scalar('loss', losses.avg, i)
losses.reset()
writer.close()
i = num_iters + args.start_iter
file_name = os.path.join(ckpts, 'model_last.pth')
torch.save({
'iter': i,
'state_dict': model.state_dict(),
'optim_dict': optimizer.state_dict(),
},
file_name)
msg = 'total time: {:.4f} minutes'.format((time.time() - start)/60)
logger.info(msg)
def adjust_learning_rate(optimizer, epoch, MAX_EPOCHES, INIT_LR, power=0.9):
for param_group in optimizer.param_groups:
param_group['lr'] = round(
INIT_LR * np.power(1 - (epoch) / MAX_EPOCHES, power), 8)
if __name__ == '__main__':
main()
|
<gh_stars>1-10
"""
This module define the Item class which is base of Item instance object
in this project and the Filed class which is used to instantiate an item filed.
Item object yield by the the spider parser method or callback associated with
a Request object should derive from the Item class.
"""
from UserDict import DictMixin
from pprint import pformat
class Field(dict):
"""Representation of an item field"""
pass
class ItemMeta(type):
"""Apply some magic operations on the class which is being
created"""
def __new__(metacls, cls_name, bases, attrs):
fields = {}
extra_attrs = {}
for k, v in attrs.iteritems():
if isinstance(v, Field):
fields[k] = v
else:
extra_attrs[k] = v
cls = type.__new__(metacls, cls_name, bases, extra_attrs)
# TODO: it's not necessary really?
# cls.fields = cls.fields.copy()
cls.fields.update(fields)
return cls
class BaseItem(DictMixin, object):
"""
All Item object used in this project should derive from this class.
"""
__metaclass__ = ItemMeta
fields = {} # All keys declared are stored in it
def __init__(self, iter_pairs_or_map_obj={}, **kws):
self._values = {}
if iter_pairs_or_map_obj or kws:
for k, v in dict(iter_pairs_or_map_obj, **kws).iteritems():
self[k] = v
# Four User-defined operations on the value referenced by key.
# See DictMixin __doc__ string for more info.
def __getitem__(self, key):
# All keys manipulated are stored in _value attribute.
return self._values[key]
def __setitem__(self, key, value):
if key in self.fields:
self._values[key] = value
else:
raise KeyError('%s not supports this field: %s' %
(self.__class__.__name__, key))
def __delitem__(self, key):
del self._values[key]
def keys(self):
return self._values.keys()
def __getattr__(self, name):
# When attribute not found in __dict__, should search by this method.
if name in self.fields:
raise AttributeError('Use item[%r] to get field value' %
name)
raise AttributeError(name)
def __setattr__(self, name, value):
if name == 'fields' or name =='__metaclass__':
raise ValueError('Should not modify this attribute: %s' % name)
if not name.startswith('_'):
if name in self.fields:
raise AttributeError('Use item[%r] = %r to set field value' %
(name, value))
super(BaseItem, self).__setattr__(name, value)
def __repr__(self):
# Only print the _values attribute.
return pformat(dict(self))
def copy(self):
# Construct another Item object having the same class attributes
# and the _value attribute as this Item, but not the other non-class.
# attribtes.
return self.__class__(self)
class Item(BaseItem):
f1 = Field()
f2 = Field()
extra_class_attribute = '<class attribute of non-Field type> '
if __name__ == '__main__':
item = Item()
print 'Construct an Item object:', item
print 'item.fields:', item.fields
print 'extra non-field attribute:', item.extra_class_attribute
print 'f1 in item:', 'f1' in item # `f1` field is declared in Item
print 'f1 in item.fields:', 'f1' in item.fields # `f1` field is not manipulated
item['f1'] = 'Set f1'
print 'After setting f1:', item['f1']
print 'f1 in item:', 'f1' in item
# item.f2 = 'Set f2' will raise
item._test = '_test'
print 'item.fields:', item.fields
print 'item._values:', item._values
print 'item._test:', item._test
item2 = item.copy()
print 'Make a copy of this item:', item2
print 'the non-field attribute of class-level in the copy:', item2.extra_class_attribute
# print item2._test will raise |
from dynamic_graph import plug
from dynamic_graph.sot.core import Selec_of_vector
from dynamic_graph.sot.torque_control.talos.create_entities_utils_talos import NJ, create_rospublish, create_topic, get_sim_conf
from dynamic_graph.sot.torque_control.talos.create_entities_utils_talos import create_waist_traj_gen, create_trajectory_generator, create_com_traj_gen, create_encoders
from dynamic_graph.sot.torque_control.talos.create_entities_utils_talos import create_simple_inverse_dyn_controller, create_ctrl_manager, connect_ctrl_manager
from dynamic_graph.sot.torque_control.talos.create_entities_utils_talos import addTrace, dump_tracer
from dynamic_graph.sot.torque_control.talos.sot_utils_talos import go_to_position
from dynamic_graph.tracer_real_time import TracerRealTime
from dynamic_graph.sot.core import Substract_of_vector
# --- EXPERIMENTAL SET UP ------------------------------------------------------
conf = get_sim_conf()
dt = robot.timeStep
robot.device.setControlInputType('noInteg') # No integration for torque control
# --- SET INITIAL CONFIGURATION ------------------------------------------------
# TMP: overwrite halfSitting configuration to use SoT joint order
q = [0., 0., 1.018213, 0., 0., 0.] # Free flyer
q += [0.0, 0.0, -0.411354, 0.859395, -0.448041, -0.001708] # legs
q += [0.0, 0.0, -0.411354, 0.859395, -0.448041, -0.001708] # legs
q += [0.0, 0.006761] # Chest
q += [0.25847, 0.173046, -0.0002, -0.525366, 0.0, -0.0, 0.1, -0.005] # arms
q += [-0.25847, -0.173046, 0.0002, -0.525366, 0.0, 0.0, 0.1, -0.005] # arms
q += [0., 0.] # Head
robot.halfSitting = q
# --- CREATE ENTITIES ----------------------------------------------------------
robot.ctrl_manager = create_ctrl_manager(conf.control_manager, conf.motor_params, dt)
robot.encoders = create_encoders(robot)
# --- Posture trajectory
robot.traj_gen = create_trajectory_generator(robot, dt)
robot.traj_gen.q.recompute(0)
# --- CoM trajectory
robot.com_traj_gen = create_com_traj_gen(robot, dt)
robot.com_traj_gen.x.recompute(0)
# --- Base orientation (SE3 on the waist) trajectory
robot.waist_traj_gen = create_waist_traj_gen("tg_waist_ref", robot, dt)
robot.waist_traj_gen.x.recompute(0)
# --- Simple inverse dynamic controller
robot.inv_dyn = create_simple_inverse_dyn_controller(robot, conf.balance_ctrl, dt)
robot.inv_dyn.setControlOutputType("torque")
# --- Connect control manager
connect_ctrl_manager(robot)
# --- Error on the CoM task
robot.errorComTSID = Substract_of_vector('error_com')
plug(robot.inv_dyn.com_ref_pos, robot.errorComTSID.sin2)
plug(robot.dynamic.com, robot.errorComTSID.sin1)
# --- Error on the Posture task
robot.errorPoseTSID = Substract_of_vector('error_pose')
plug(robot.inv_dyn.posture_ref_pos, robot.errorPoseTSID.sin2)
plug(robot.encoders.sout, robot.errorPoseTSID.sin1)
# # # --- ROS PUBLISHER ----------------------------------------------------------
robot.publisher = create_rospublish(robot, 'robot_publisher')
create_topic(robot.publisher, robot.errorPoseTSID, 'sout', 'errorPoseTSID', robot=robot, data_type='vector')
create_topic(robot.publisher, robot.errorComTSID, 'sout', 'errorComTSID', robot=robot, data_type='vector')
create_topic(robot.publisher, robot.dynamic, 'com', 'dynCom', robot=robot, data_type='vector')
create_topic(robot.publisher, robot.inv_dyn, 'q_des', 'q_des', robot=robot, data_type='vector')
create_topic(robot.publisher, robot.device, 'motorcontrol', 'motorcontrol', robot=robot, data_type='vector')
create_topic(robot.publisher, robot.device, 'robotState', 'robotState', robot=robot, data_type='vector')
# # --- TRACER
robot.tracer = TracerRealTime("tau_tracer")
robot.tracer.setBufferSize(80*(2**20))
robot.tracer.open('/tmp','dg_','.dat')
robot.device.after.addSignal('{0}.triger'.format(robot.tracer.name))
addTrace(robot.tracer, robot.inv_dyn, 'tau_des')
addTrace(robot.tracer, robot.inv_dyn, 'q_des')
addTrace(robot.tracer, robot.inv_dyn, 'v_des')
addTrace(robot.tracer, robot.inv_dyn, 'dv_des')
addTrace(robot.tracer, robot.errorPoseTSID, 'sout')
addTrace(robot.tracer, robot.errorComTSID, 'sout')
addTrace(robot.tracer, robot.device, 'robotState')
addTrace(robot.tracer, robot.device, 'motorcontrol')
robot.tracer.start() |
#!/usr/bin/env python
# coding: utf-8
# # Measure orientation of seashore streets in relation to SW wind
#
# Computational notebook 06 for Climate adaptation plans in the context of coastal settlements: the case of Portugal.
#
# Date: 27/06/2020
#
# ---
#
# This notebook computes deviation of seashore street orientation from SW wind direction (45 degrees).
#
# Requires attribute `case` in `name_str` capturing which LineStrings form the seashore street itself. (1 - True) (already used in `03_Calculate_contextual_characters.ipynb`.
#
# Structure of GeoPackages:
#
# ```
# ./data/
# atlantic.gpkg
# name_blg - Polygon layers
# name_str - LineString layers
# name_case - Polygon layers
# name_tess - Polygon layers
# name_blocks - Polygon layers
# ...
# preatl.gpkg
# name_blg
# name_str
# name_case
# ...
# premed.gpkg
# name_blg
# name_str
# name_case
# ...
# med.gpkg
# name_blg
# name_str
# name_case
# ...
# ```
#
# CRS of the original data is EPSG:3763.
#
# ```
# <Projected CRS: EPSG:3763>
# Name: ETRS89 / Portugal TM06
# Axis Info [cartesian]:
# - X[east]: Easting (metre)
# - Y[north]: Northing (metre)
# Area of Use:
# - name: Portugal - mainland - onshore
# - bounds: (-9.56, 36.95, -6.19, 42.16)
# Coordinate Operation:
# - name: Portugual TM06
# - method: Transverse Mercator
# Datum: European Terrestrial Reference System 1989
# - Ellipsoid: GRS 1980
# - Prime Meridian: Greenwich
# ```
# In[1]:
import fiona
import geopandas as gpd
import shapely
import numpy as np
import pandas as pd
# In[2]:
fiona.__version__, gpd.__version__, shapely.__version__, np.__version__, pd.__version__
# In[ ]:
from shapely.ops import linemerge
def wind_issue(line, wind_angle=45):
coords = line.coords
angle = np.arctan2(coords[-1][0] - coords[0][0], coords[-1][1] - coords[0][1])
az = np.degrees(angle)
if az < wind_angle:
az += 180
az -= wind_angle
if az < 0:
az = az * -1
if 90 < az <= 180:
diff = az - 90
az = az - 2 * diff
return az / 90
wind = pd.DataFrame(columns=['place', 'winddev'])
ix = 0
parts = ['atlantic', 'preatl', 'premed', 'med']
for part in parts:
path = folder + part + '.gpkg'
layers = [x[:-4] for x in fiona.listlayers(path) if 'blg' in x]
for l in layers:
streets = gpd.read_file(path, layer=l + '_str')
seashore = streets[streets.case == 1].geometry.to_list()
merged = linemerge(seashore)
if merged.type != 'LineString':
dims = {}
for i, seg in enumerate(merged):
dims[i] = seg.length
key = max(dims, key=dims.get)
wind.loc[ix] = [l, wind_issue(merged[key])]
ix += 1
else:
wind.loc[ix] = [l, wind_issue(merged)]
ix += 1
# In[ ]:
wind.to_csv(folder + 'wind_relation.csv')
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for converting between row_splits and segment_ids."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.ragged import ragged_util
from tensorflow.python.util.tf_export import tf_export
# For background on "segments" and "segment ids", see:
# https://www.tensorflow.org/api_docs/python/tf/math#Segmentation
@tf_export("ragged.row_splits_to_segment_ids")
def row_splits_to_segment_ids(splits, name=None, out_type=None):
"""Generates the segmentation corresponding to a RaggedTensor `row_splits`.
Returns an integer vector `segment_ids`, where `segment_ids[i] == j` if
`splits[j] <= i < splits[j+1]`. Example:
>>> print(tf.ragged.row_splits_to_segment_ids([0, 3, 3, 5, 6, 9]))
tf.Tensor([0 0 0 2 2 3 4 4 4], shape=(9,), dtype=int64)
Args:
splits: A sorted 1-D integer Tensor. `splits[0]` must be zero.
name: A name prefix for the returned tensor (optional).
out_type: The dtype for the return value. Defaults to `splits.dtype`,
or `tf.int64` if `splits` does not have a dtype.
Returns:
A sorted 1-D integer Tensor, with `shape=[splits[-1]]`
Raises:
ValueError: If `splits` is invalid.
"""
with ops.name_scope(name, "RaggedSplitsToSegmentIds", [splits]) as name:
splits = ops.convert_to_tensor(
splits, name="splits",
preferred_dtype=dtypes.int64)
if splits.dtype not in (dtypes.int32, dtypes.int64):
raise ValueError("splits must have dtype int32 or int64")
splits.shape.assert_has_rank(1)
if tensor_shape.dimension_value(splits.shape[0]) == 0:
raise ValueError("Invalid row_splits: []")
if out_type is None:
out_type = splits.dtype
else:
out_type = dtypes.as_dtype(out_type)
row_lengths = splits[1:] - splits[:-1]
nrows = array_ops.shape(splits, out_type=out_type)[-1] - 1
indices = math_ops.range(nrows)
return ragged_util.repeat(indices, repeats=row_lengths, axis=0)
# For background on "segments" and "segment ids", see:
# https://www.tensorflow.org/api_docs/python/tf/math#Segmentation
@tf_export("ragged.segment_ids_to_row_splits")
def segment_ids_to_row_splits(segment_ids, num_segments=None,
out_type=None, name=None):
"""Generates the RaggedTensor `row_splits` corresponding to a segmentation.
Returns an integer vector `splits`, where `splits[0] = 0` and
`splits[i] = splits[i-1] + count(segment_ids==i)`. Example:
>>> print(tf.ragged.segment_ids_to_row_splits([0, 0, 0, 2, 2, 3, 4, 4, 4]))
tf.Tensor([0 3 3 5 6 9], shape=(6,), dtype=int64)
Args:
segment_ids: A 1-D integer Tensor.
num_segments: A scalar integer indicating the number of segments. Defaults
to `max(segment_ids) + 1` (or zero if `segment_ids` is empty).
out_type: The dtype for the return value. Defaults to `segment_ids.dtype`,
or `tf.int64` if `segment_ids` does not have a dtype.
name: A name prefix for the returned tensor (optional).
Returns:
A sorted 1-D integer Tensor, with `shape=[num_segments + 1]`.
"""
if out_type is None:
if isinstance(segment_ids, ops.Tensor):
out_type = segment_ids.dtype
elif isinstance(num_segments, ops.Tensor):
out_type = num_segments.dtype
else:
out_type = dtypes.int64
else:
out_type = dtypes.as_dtype(out_type)
with ops.name_scope(name, "SegmentIdsToRaggedSplits", [segment_ids]) as name:
# Note: we cast int64 tensors to int32, since bincount currently only
# supports int32 inputs.
segment_ids = ragged_util.convert_to_int_tensor(segment_ids, "segment_ids",
dtype=dtypes.int32)
segment_ids.shape.assert_has_rank(1)
if num_segments is not None:
num_segments = ragged_util.convert_to_int_tensor(num_segments,
"num_segments",
dtype=dtypes.int32)
num_segments.shape.assert_has_rank(0)
row_lengths = math_ops.bincount(
segment_ids,
minlength=num_segments,
maxlength=num_segments,
dtype=out_type)
splits = array_ops.concat([[0], math_ops.cumsum(row_lengths)], axis=0)
# Update shape information, if possible.
if num_segments is not None:
const_num_segments = tensor_util.constant_value(num_segments)
if const_num_segments is not None:
splits.set_shape(tensor_shape.TensorShape([const_num_segments + 1]))
return splits
|
<gh_stars>1-10
"""
SPEECh: Scalable Probabilistic Estimates of EV Charging
Code first published in October 2021.
Developed by <NAME> (<EMAIL>).
This script demonstrates how to extract individual sessions data from running the model.
"""
from speech import DataSetConfigurations
from speech import SPEECh
from speech import SPEEChGeneralConfiguration
from speech import Plotting
from speech import Scenarios
from speech import LoadProfile
import copy
import os
import pandas as pd
if not os.path.isdir('IndividualSessionsOutputData'):
os.mkdir('IndividualSessionsOutputData')
# Set up the scenario
total_evs = 500
weekday_option = 'weekday'
data = DataSetConfigurations('Original16', ng=16)
model = SPEECh(data)
config = SPEEChGeneralConfiguration(model)
scenario = Scenarios('BaseCase')
config.change_pg(new_weights = scenario.new_weights)
config.change_pg(new_weights = {7:0.2, 10:0.2}, dend=True) # If you are making your own weights, use "dend=True"
# Run a version of "config.run_all":
config.num_evs(total_evs) # Input number of EVs in simulation
config.groups()
# edited contents of config.run_all():
individual_session_parameters_all = {key:None for key in data.categories}
# Run through driver groups:
for g in range(data.ng):
print('Group '+str(g))
model = LoadProfile(config, config.group_configs[g], weekday=weekday_option) # part of config.run_all()
individual_session_parameters = model.calculate_load(return_individual_session_parameters=True) # extra flag returns the data we want
for key in individual_session_parameters.keys():
if individual_session_parameters_all[key] is not None:
individual_session_parameters_all[key] = pd.concat((individual_session_parameters_all[key], individual_session_parameters[key]), axis=0)
else:
individual_session_parameters_all[key] = copy.deepcopy(individual_session_parameters[key])
# Save the results:
for key in individual_session_parameters_all.keys():
if individual_session_parameters_all[key] is not None:
individual_session_parameters_all[key].to_csv('IndividualSessionsOutputData/'+key+'.csv')
# Extract the individual load profiles, not just the session parameters
categories = ['Home', 'MUD', 'Work', 'Other Slow', 'Other Fast']
total_load_profiles = {}
for segment in categories:
if individual_session_parameters_all[segment] is not None:
individual_session_parameters_all[segment] = individual_session_parameters_all[segment].reset_index(drop=True)
n_segment = len(individual_session_parameters_all[segment])
individual_profiles = {} # collect the individual load profiles
for index_value in np.arange(0, n_segment): # calculate for each session
tmp1, tmp2 = model.end_times_and_load(individual_session_parameters_all[segment].loc[index_value, 'Start'].reshape(1,), individual_session_parameters_all[segment].loc[index_value, 'Energy'].reshape(1,), individual_session_parameters_all[segment].loc[index_value, 'Rate'].reshape(1,))
individual_profiles[index_value] = tmp2
individual_profiles = pd.DataFrame(individual_profiles)
individual_profiles.to_csv('IndividualSessionsOutputData/'+segment+'_individual_load_profiles.csv', index=None) # save the individual load profiles in the same folder
total_load_profiles[segment] = individual_profiles.sum(axis=1)
total_load_profiles = pd.DataFrame(total_load_profiles)
total_load_profiles['Total'] = total_load_profiles.loc[:, categories].sum(axis=1) # calculate the total load profile as the sum of all sessions in all segments
total_load_profiles.to_csv('IndividualSessionsOutputData/total_load_profiles.csv', index=None) # save the total load profile
|
<filename>cinder/brick/iscsi/iscsi.py
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helper code for the iSCSI volume driver.
"""
import contextlib
import os
import re
from oslo.config import cfg
from cinder import exception
from cinder.openstack.common import fileutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils as putils
from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
iscsi_helper_opt = [cfg.StrOpt('iscsi_helper',
default='tgtadm',
help='iscsi target user-land tool to use'),
cfg.StrOpt('volumes_dir',
default='$state_path/volumes',
help='Volume configuration file storage '
'directory'),
cfg.StrOpt('iet_conf',
default='/etc/iet/ietd.conf',
help='IET configuration file'),
cfg.StrOpt('lio_initiator_iqns',
default='',
help=('Comma-separatd list of initiator IQNs '
'allowed to connect to the '
'iSCSI target. (From Nova compute nodes.)'
)
),
cfg.StrOpt('iscsi_iotype',
default='fileio',
help=('Sets the behavior of the iSCSI target '
'to either perform blockio or fileio '
'optionally, auto can be set and Cinder '
'will autodetect type of backing device')
)
]
CONF = cfg.CONF
CONF.register_opts(iscsi_helper_opt)
CONF.import_opt('volume_name_template', 'cinder.db')
class TargetAdmin(object):
"""iSCSI target administration.
Base class for iSCSI target admin helpers.
"""
def __init__(self, cmd, execute):
self._cmd = cmd
self.set_execute(execute)
def set_execute(self, execute):
"""Set the function to be used to execute commands."""
self._execute = execute
def _run(self, *args, **kwargs):
self._execute(self._cmd, *args, run_as_root=True, **kwargs)
def create_iscsi_target(self, name, tid, lun, path,
chap_auth=None, **kwargs):
"""Create a iSCSI target and logical unit"""
raise NotImplementedError()
def remove_iscsi_target(self, tid, lun, vol_id, **kwargs):
"""Remove a iSCSI target and logical unit"""
raise NotImplementedError()
def _new_target(self, name, tid, **kwargs):
"""Create a new iSCSI target."""
raise NotImplementedError()
def _delete_target(self, tid, **kwargs):
"""Delete a target."""
raise NotImplementedError()
def show_target(self, tid, iqn=None, **kwargs):
"""Query the given target ID."""
raise NotImplementedError()
def _new_logicalunit(self, tid, lun, path, **kwargs):
"""Create a new LUN on a target using the supplied path."""
raise NotImplementedError()
def _delete_logicalunit(self, tid, lun, **kwargs):
"""Delete a logical unit from a target."""
raise NotImplementedError()
class TgtAdm(TargetAdmin):
"""iSCSI target administration using tgtadm."""
def __init__(self, execute=putils.execute):
super(TgtAdm, self).__init__('tgtadm', execute)
def _get_target(self, iqn):
(out, err) = self._execute('tgt-admin', '--show', run_as_root=True)
lines = out.split('\n')
for line in lines:
if iqn in line:
parsed = line.split()
tid = parsed[1]
return tid[:-1]
return None
def create_iscsi_target(self, name, tid, lun, path,
chap_auth=None, **kwargs):
# Note(jdg) tid and lun aren't used by TgtAdm but remain for
# compatibility
fileutils.ensure_tree(CONF.volumes_dir)
vol_id = name.split(':')[1]
if chap_auth is None:
volume_conf = """
<target %s>
backing-store %s
</target>
""" % (name, path)
else:
volume_conf = """
<target %s>
backing-store %s
%s
</target>
""" % (name, path, chap_auth)
LOG.info(_('Creating iscsi_target for: %s') % vol_id)
volumes_dir = CONF.volumes_dir
volume_path = os.path.join(volumes_dir, vol_id)
f = open(volume_path, 'w+')
f.write(volume_conf)
f.close()
old_persist_file = None
old_name = kwargs.get('old_name', None)
if old_name is not None:
old_persist_file = os.path.join(volumes_dir, old_name)
try:
(out, err) = self._execute('tgt-admin',
'--update',
name,
run_as_root=True)
except exception.ProcessExecutionError as e:
LOG.error(_("Failed to create iscsi target for volume "
"id:%(vol_id)s: %(e)s")
% {'vol_id': vol_id, 'e': str(e)})
#Don't forget to remove the persistent file we created
os.unlink(volume_path)
raise exception.ISCSITargetCreateFailed(volume_id=vol_id)
iqn = '%s%s' % (CONF.iscsi_target_prefix, vol_id)
tid = self._get_target(iqn)
if tid is None:
LOG.error(_("Failed to create iscsi target for volume "
"id:%(vol_id)s. Please ensure your tgtd config file "
"contains 'include %(volumes_dir)s/*'") % {
'vol_id': vol_id,
'volumes_dir': volumes_dir,
})
raise exception.NotFound()
if old_persist_file is not None and os.path.exists(old_persist_file):
os.unlink(old_persist_file)
return tid
def remove_iscsi_target(self, tid, lun, vol_id, **kwargs):
LOG.info(_('Removing iscsi_target for: %s') % vol_id)
vol_uuid_file = CONF.volume_name_template % vol_id
volume_path = os.path.join(CONF.volumes_dir, vol_uuid_file)
if os.path.isfile(volume_path):
iqn = '%s%s' % (CONF.iscsi_target_prefix,
vol_uuid_file)
else:
raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
try:
# NOTE(vish): --force is a workaround for bug:
# https://bugs.launchpad.net/cinder/+bug/1159948
self._execute('tgt-admin',
'--force',
'--delete',
iqn,
run_as_root=True)
except exception.ProcessExecutionError as e:
LOG.error(_("Failed to remove iscsi target for volume "
"id:%(vol_id)s: %(e)s")
% {'vol_id': vol_id, 'e': str(e)})
raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
os.unlink(volume_path)
def show_target(self, tid, iqn=None, **kwargs):
if iqn is None:
raise exception.InvalidParameterValue(
err=_('valid iqn needed for show_target'))
tid = self._get_target(iqn)
if tid is None:
raise exception.NotFound()
class IetAdm(TargetAdmin):
"""iSCSI target administration using ietadm."""
def __init__(self, execute=putils.execute):
super(IetAdm, self).__init__('ietadm', execute)
def _iotype(self, path):
if CONF.iscsi_iotype == 'auto':
return 'blockio' if volume_utils.is_block(path) else 'fileio'
else:
return CONF.iscsi_iotype
@contextlib.contextmanager
def temporary_chown(self, path, owner_uid=None):
"""Temporarily chown a path.
:params path: The path to chown
:params owner_uid: UID of temporary owner (defaults to current user)
"""
if owner_uid is None:
owner_uid = os.getuid()
orig_uid = os.stat(path).st_uid
if orig_uid != owner_uid:
putils.execute('chown', owner_uid, path, run_as_root=True)
try:
yield
finally:
if orig_uid != owner_uid:
putils.execute('chown', orig_uid, path, run_as_root=True)
def create_iscsi_target(self, name, tid, lun, path,
chap_auth=None, **kwargs):
# NOTE (jdg): Address bug: 1175207
kwargs.pop('old_name', None)
self._new_target(name, tid, **kwargs)
self._new_logicalunit(tid, lun, path, **kwargs)
if chap_auth is not None:
(type, username, password) = chap_auth.split()
self._new_auth(tid, type, username, password, **kwargs)
conf_file = CONF.iet_conf
if os.path.exists(conf_file):
try:
volume_conf = """
Target %s
%s
Lun 0 Path=%s,Type=%s
""" % (name, chap_auth, path, self._iotype(path))
with self.temporary_chown(conf_file):
f = open(conf_file, 'a+')
f.write(volume_conf)
f.close()
except exception.ProcessExecutionError as e:
vol_id = name.split(':')[1]
LOG.error(_("Failed to create iscsi target for volume "
"id:%(vol_id)s: %(e)s")
% {'vol_id': vol_id, 'e': str(e)})
raise exception.ISCSITargetCreateFailed(volume_id=vol_id)
return tid
def remove_iscsi_target(self, tid, lun, vol_id, **kwargs):
LOG.info(_('Removing iscsi_target for volume: %s') % vol_id)
self._delete_logicalunit(tid, lun, **kwargs)
self._delete_target(tid, **kwargs)
vol_uuid_file = CONF.volume_name_template % vol_id
conf_file = CONF.iet_conf
if os.path.exists(conf_file):
with self.temporary_chown(conf_file):
try:
iet_conf_text = open(conf_file, 'r+')
full_txt = iet_conf_text.readlines()
new_iet_conf_txt = []
count = 0
for line in full_txt:
if count > 0:
count -= 1
continue
elif re.search(vol_uuid_file, line):
count = 2
continue
else:
new_iet_conf_txt.append(line)
iet_conf_text.seek(0)
iet_conf_text.truncate(0)
iet_conf_text.writelines(new_iet_conf_txt)
finally:
iet_conf_text.close()
def _new_target(self, name, tid, **kwargs):
self._run('--op', 'new',
'--tid=%s' % tid,
'--params', 'Name=%s' % name,
**kwargs)
def _delete_target(self, tid, **kwargs):
self._run('--op', 'delete',
'--tid=%s' % tid,
**kwargs)
def show_target(self, tid, iqn=None, **kwargs):
self._run('--op', 'show',
'--tid=%s' % tid,
**kwargs)
def _new_logicalunit(self, tid, lun, path, **kwargs):
self._run('--op', 'new',
'--tid=%s' % tid,
'--lun=%d' % lun,
'--params', 'Path=%s,Type=%s' % (path, self._iotype(path)),
**kwargs)
def _delete_logicalunit(self, tid, lun, **kwargs):
self._run('--op', 'delete',
'--tid=%s' % tid,
'--lun=%d' % lun,
**kwargs)
def _new_auth(self, tid, type, username, password, **kwargs):
self._run('--op', 'new',
'--tid=%s' % tid,
'--user',
'--params=%s=%s,Password=%s' % (type, username, password),
**kwargs)
class FakeIscsiHelper(object):
def __init__(self):
self.tid = 1
def set_execute(self, execute):
self._execute = execute
def create_iscsi_target(self, *args, **kwargs):
self.tid += 1
return self.tid
class LioAdm(TargetAdmin):
"""iSCSI target administration for LIO using python-rtslib."""
def __init__(self, execute=putils.execute):
super(LioAdm, self).__init__('rtstool', execute)
try:
self._execute('rtstool', 'verify')
except (OSError, exception.ProcessExecutionError):
LOG.error(_('rtstool is not installed correctly'))
raise
def _get_target(self, iqn):
(out, err) = self._execute('rtstool',
'get-targets',
run_as_root=True)
lines = out.split('\n')
for line in lines:
if iqn in line:
return line
return None
def create_iscsi_target(self, name, tid, lun, path,
chap_auth=None, **kwargs):
# tid and lun are not used
vol_id = name.split(':')[1]
LOG.info(_('Creating iscsi_target for volume: %s') % vol_id)
# rtstool requires chap_auth, but unit tests don't provide it
chap_auth_userid = 'test_id'
chap_auth_password = '<PASSWORD>'
if chap_auth is not None:
(chap_auth_userid, chap_auth_password) = chap_auth.split(' ')[1:]
extra_args = []
if CONF.lio_initiator_iqns:
extra_args.append(CONF.lio_initiator_iqns)
try:
command_args = ['rtstool',
'create',
path,
name,
chap_auth_userid,
chap_auth_password]
if extra_args != []:
command_args += extra_args
self._execute(*command_args, run_as_root=True)
except exception.ProcessExecutionError as e:
LOG.error(_("Failed to create iscsi target for volume "
"id:%s.") % vol_id)
LOG.error("%s" % str(e))
raise exception.ISCSITargetCreateFailed(volume_id=vol_id)
iqn = '%s%s' % (CONF.iscsi_target_prefix, vol_id)
tid = self._get_target(iqn)
if tid is None:
LOG.error(_("Failed to create iscsi target for volume "
"id:%s.") % vol_id)
raise exception.NotFound()
return tid
def remove_iscsi_target(self, tid, lun, vol_id, **kwargs):
LOG.info(_('Removing iscsi_target: %s') % vol_id)
vol_uuid_name = 'volume-%s' % vol_id
iqn = '%s%s' % (CONF.iscsi_target_prefix, vol_uuid_name)
try:
self._execute('rtstool',
'delete',
iqn,
run_as_root=True)
except exception.ProcessExecutionError as e:
LOG.error(_("Failed to remove iscsi target for volume "
"id:%s.") % vol_id)
LOG.error("%s" % str(e))
raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
def show_target(self, tid, iqn=None, **kwargs):
if iqn is None:
raise exception.InvalidParameterValue(
err=_('valid iqn needed for show_target'))
tid = self._get_target(iqn)
if tid is None:
raise exception.NotFound()
def initialize_connection(self, volume, connector):
volume_iqn = volume['provider_location'].split(' ')[1]
(auth_method, auth_user, auth_pass) = \
volume['provider_auth'].split(' ', 3)
# Add initiator iqns to target ACL
try:
self._execute('rtstool', 'add-initiator',
volume_iqn,
auth_user,
auth_pass,
connector['initiator'],
run_as_root=True)
except exception.ProcessExecutionError as e:
LOG.error(_("Failed to add initiator iqn %s to target") %
connector['initiator'])
raise exception.ISCSITargetAttachFailed(volume_id=volume['id'])
def get_target_admin():
if CONF.iscsi_helper == 'tgtadm':
return TgtAdm()
elif CONF.iscsi_helper == 'fake':
return FakeIscsiHelper()
elif CONF.iscsi_helper == 'lioadm':
return LioAdm()
else:
return IetAdm()
|
<gh_stars>1-10
from pathlib import Path
from re import findall
from modules.CardType import CardType
from modules.Debug import log
class TextlessTitleCard(CardType):
"""
This class describes a type of CardType that does not modify the source
image in anyway, and only optionally blurs it. No text of any kind is added.
"""
"""Characteristics for title splitting by this class"""
TITLE_CHARACTERISTICS = {
'max_line_width': 999, # Character count to begin splitting titles
'max_line_count': 1, # Maximum number of lines a title can take up
'top_heavy': False, # This class uses bottom heavy titling
}
"""Font case for this card is entirely blank"""
DEFAULT_FONT_CASE = 'blank'
"""Default episode text format string, can be overwritten by each class"""
EPISODE_TEXT_FORMAT = ''
"""Default font and text color for episode title text"""
TITLE_FONT = None
TITLE_COLOR = None
"""Default characters to replace in the generic font"""
FONT_REPLACEMENTS = {}
"""Whether this CardType uses season titles for archival purposes"""
USES_SEASON_TITLE = False
"""Label to archive cards under"""
ARCHIVE_NAME = 'Textless Version'
__slots__ = ('source_file', 'output_file', 'blur')
def __init__(self, source: Path, output_file: Path, blur: bool=False,
*args, **kwargs) -> None:
"""
Initialize the TitleCardMaker object. This primarily just stores
instance variables for later use in `create()`. If the provided font
does not have a character in the title text, a space is used instead.
:param source: Source image.
:param output_file: Output file.
:param blur: Whether to blur the source image.
:param args and kwargs: Unused arguments to permit generalized calls
for any CardType.
"""
# Initialize the parent class - this sets up an ImageMagickInterface
super().__init__()
# Store arguments as attributes
self.source_file = source
self.output_file = output_file
self.blur = blur
def _resize_and_blur(self) -> Path:
"""
Resize the source image, optionally blurring. Write the resulting image
to the output filepath.
:returns: Path to the created image (the output file).
"""
command = ' '.join([
f'convert "{self.source_file.resolve()}"',
f'+profile "*"',
f'-gravity center',
f'-resize "{self.TITLE_CARD_SIZE}^"',
f'-extent "{self.TITLE_CARD_SIZE}"',
f'-blur {self.BLUR_PROFILE}' if self.blur else '',
f'"{self.output_file.resolve()}"',
])
self.image_magick.run(command)
return self.output_file
@staticmethod
def is_custom_font(font: 'Font') -> bool:
"""
Determines whether the given font characteristics constitute a default
or custom font.
:param font: The Font being evaluated.
:returns: False, as fonts are not customizable with this card.
"""
return False
@staticmethod
def is_custom_season_titles(custom_episode_map: bool,
episode_text_format: str) -> bool:
"""
Determines whether the given attributes constitute custom or generic
season titles.
:param custom_episode_map: Whether the EpisodeMap was
customized.
:param episode_text_format: The episode text format in use.
:returns: False, as season titles are not customizable with this card.
"""
return False
def create(self) -> None:
"""
Make the necessary ImageMagick and system calls to create this object's
defined title card.
"""
# Only ImageMagick calls are resizing and an optional blur
self._resize_and_blur()
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn import datasets
from sklearn import model_selection
from sklearn import preprocessing
from sklearn import svm
from mlxtend.plotting import plot_decision_regions
from sklearn import linear_model
from sklearn import tree
from sklearn import ensemble
from sklearn import metrics
from sklearn.model_selection import GridSearchCV
def plot_iris(X: np.ndarray) -> None:
# Wizualizujemy tylko dwie pierwsze cechy – aby móc je przedstawić bez problemu w 2D.
plt.figure()
plt.scatter(X[:, 0], X[:, 1])
plt.axvline(x=0)
plt.axhline(y=0)
plt.title('Iris sepal features')
plt.xlabel('sepal length (cm)')
plt.ylabel('sepal width (cm)')
def ex_1():
# Loading IRIS dataset
iris = datasets.load_iris(as_frame=True)
# print(iris.frame.describe())
iris = datasets.load_iris()
X, y = iris.data, iris.target
# X = np.append(X, [[50,1,1,1]], axis=0)
# y = np.append(y, [1])
print(f'count y: {np.bincount(y)}')
# Parametr stratify - równomierne rozłożenie danych
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.25, stratify=y)
print(f'count y_train: {np.bincount(y_train)}')
print(f'count y_test: {np.bincount(y_test)}')
# Normalizacja danych
# Normalizacja min-max scaler
plot_iris(X)
min_max_scaler = preprocessing.MinMaxScaler()
min_max_scaler.fit(X_train)
X_min_max = min_max_scaler.transform(X_test)
plot_iris(X_min_max)
# Normalizacja Standard Scaler
standard_scaler = preprocessing.StandardScaler()
standard_scaler.fit(X_train)
X_standard = standard_scaler.transform(X_test)
plot_iris(X_standard)
plt.show()
def ex_2():
iris = datasets.load_iris()
X, y = iris.data, iris.target
X = X[:, [1, 3]] # Wybor cech ktore beda klasyfikowane, nie da sie wyswietlic wiecej cech na wykresach niz 2
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.25, stratify=y, random_state=42)
min_max_scaler = preprocessing.MinMaxScaler()
min_max_scaler.fit(X_train)
X_min_max = min_max_scaler.transform(X)
X_train = min_max_scaler.transform(X_train)
X_test = min_max_scaler.transform(X_test)
clf_svm = svm.SVC(random_state=42, kernel='rbf', probability=True)
clf_svm.fit(X_train, y_train)
acc_svm = metrics.accuracy_score(y_test, clf_svm.predict(X_test))
print(f'acc_svm:{acc_svm}')
clf_linear = linear_model.LogisticRegression(random_state=42)
clf_linear.fit(X_train, y_train)
acc_lin = metrics.accuracy_score(y_test, clf_linear.predict(X_test))
print(f'acc_lin:{acc_lin}')
clf_tree = tree.DecisionTreeClassifier(random_state=42, max_depth=5)
clf_tree.fit(X_train, y_train)
acc_tree = metrics.accuracy_score(y_test, clf_tree.predict(X_test))
print(f'acc_tree:{acc_tree}')
clf_rf = ensemble.RandomForestClassifier(random_state=42, n_estimators=1000)
clf_rf.fit(X_train, y_train)
acc_rf = metrics.accuracy_score(y_test, clf_rf.predict(X_test))
print(f'acc_rf:{acc_rf}')
param_grid = [
{'C': [1, 10, 100, 1000], 'kernel': ['linear']},
{'C': [1, 10, 100, 1000], 'gamma': [0.001, 0.0001], 'kernel': ['rbf']},
]
clf_gs = GridSearchCV(estimator=svm.SVC(), param_grid=param_grid, n_jobs=20, verbose=20)
clf_gs.fit(X_train, y_train)
print(clf_gs.cv_results_)
# Predykcja klasy
print(clf_svm.predict(min_max_scaler.transform([[8.0, 4.0]])))
print(clf_svm.predict(min_max_scaler.transform([[8.0, 50.0]])))
# Prawdopodobieństwa predykcji klas
print(clf_svm.predict_proba(min_max_scaler.transform([[8.0, 4.0]])))
print(clf_svm.predict_proba(min_max_scaler.transform([[8.0, 50.0]])))
# Plotting decision regions
plt.figure()
plot_decision_regions(X_test, y_test, clf=clf_svm, legend=2)
plt.figure()
plot_decision_regions(X_test, y_test, clf=clf_linear, legend=2)
plt.figure()
plot_decision_regions(X_test, y_test, clf=clf_tree, legend=2)
plt.figure()
plot_decision_regions(X_test, y_test, clf=clf_rf, legend=2)
plt.show()
if __name__ == '__main__':
# ex_1()
ex_2() |
<gh_stars>1-10
import time
import json
import queue
import random
import hashlib
import threading
import requests
from apscheduler.schedulers.background import BackgroundScheduler
from django.http import HttpResponse
from django.shortcuts import render
from django.template.context_processors import csrf
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_http_methods
from . import ini
ServersQueue = queue.Queue()
Serverslist = []
Answer = {}
def RandStr():
Seed = "abcdefghijklmnopqrstuvwxyz0123456789"
Str = []
for i in range(32):
Str.append(random.choice(Seed))
return "".join(Str)
def CheckServers():
global Serverslist
if len(Serverslist) == 0:
return
i = 0
lens = len(Serverslist)
while i < lens:
if time.time() - Serverslist[i]['LastTime'] >= 10:
Serverslist.pop(i)
print("删除!")
i -= 1
lens -= 1
i += 1
Check = BackgroundScheduler()
Check.add_job(CheckServers, 'interval', seconds = 10)
Check.start()
def Runner(Req, x):
while ServersQueue.empty():
time.sleep(0.1)
NowServer = ServersQueue.get()
if NowServer >= len(Serverslist):
Run = threading.Timer(0, Runner, (Req, 0))
Run.start()
return
Req["Key"] = hashlib.sha256(Serverslist[NowServer]["Key"].encode('utf-8')).hexdigest()
Data = {"headers" : {"Content-type" : "application/json"}}
Data["json"] = Req
try:
Ans = json.loads((requests.post(Serverslist[NowServer]['ServerUrl'] + '/accept', **Data)).text)
except requests.exceptions.ConnectionError:
Ans = {"error" : 2, "Output": "无法连接CodeRunner", "Id": Req['Id'], "Judger": NowServer}
except requests.exceptions.ConnectTimeout:
Ans = {"error" : 2, "Output": "CodeRunner连接超时", "Id": Req['Id'], "Judger": NowServer}
except requests.exceptions.ProxyError:
Ans = {"error" : 2, "Output": "请联系管理员检查网络环境", "Id": Req['Id'], "Judger": NowServer}
Ans['Runner'] = NowServer
Answer[Req['Id']] = Ans
ServersQueue.put(NowServer)
@require_http_methods(["POST"])
def GetAns(request):
Req = request.body.decode("utf-8")
if Req in Answer:
Res = Answer[Req]
Ans = ""
if Res['error'] == 1:
Ans = "Compile Error! region: " + Serverslist[Res['Runner']]["Name"] + "\n" + Res['Output']
elif Res['error'] == 2:
Ans = "服务器错误! \n "
elif Res['error'] == 3:
Ans = "运行错误! region: " + Serverslist[Res['Runner']]["Name"] + "\n" + Res['Output']
elif Res['error'] == 4:
Ans = "TLE! region: " + Serverslist[Res['Runner']]["Name"] + "\n" + Res['Output']
elif Res['error'] == 5:
Ans = "MLE! region: " + Serverslist[Res['Runner']]["Name"] + "\n" + Res['Output']
else :
Ans = "Run Time: " + str(Res['cpu_time']) + "ms Memory: " + str(round(Res['memory'] / 1024 / 1024 , 3)) + "MB region: " + Serverslist[Res['Runner']]["Name"] + "\n" + Res['Output']
return HttpResponse(Ans)
else:
return HttpResponse("no")
@require_http_methods(["POST"])
def RunCode(request):
Req = json.loads(request.body)
Run = {
'Id': RandStr(),
'Language': Req['Language'],
"Name": ini.Language[Req['Language']]["Name"],
"CompileName" : ini.Language[Req['Language']]["CompileName"],
"Memory": 256 * 1024 * 1024,
"Time": 3000,
"Input": Req['Input'],
"Code": Req['Code'],
"RunCmd" : ini.Language[Req['Language']]["RunCmd"],
"CompileCmd" : ini.Language[Req['Language']]["CompileCmd"],
}
timer = threading.Timer(0, Runner, (Run, 0))
timer.start()
return HttpResponse(Run['Id'])
@require_http_methods(["GET"])
def Index(request):
return render(request, "Index.html", {"csrf_token" : csrf(request)['csrf_token']})
#如果需要压力测试,请将urls中的注释删去
#默认关闭 防止被他人使用
@csrf_exempt
@require_http_methods(["GET"])
def Wrk(request):
Run = {
'Id': RandStr(),
'Language': "cpp14",
"Name": ini.Language["cpp14"]["Name"],
"CompileName" : ini.Language["cpp14"]["CompileName"],
"Memory": 256 * 1024 * 1024,
"Time": 3000,
"Input": "1 2",
"Code": "using%20namespace%20std%3B%0D%0A%0D%0Aint%20main%28%29%7B%0D%0A%20%20%20%20double%20a%20%3D%201.555%2C%20b%20%3D%201.666%3B%0D%0A%20%20%20%20int%20times1%20%3D%201e5%2C%20times2%20%3D%204e4%3B%0D%0A%20%20%20%20for%28int%20i%20%3D%200%3B%20i%20%3C%20times1%3B%20i++%29%7B%0D%0A%20%20%20%20%20%20%20%20for%28int%20j%20%3D%200%3B%20j%20%3C%20times2%3B%20j++%29%7B%0D%0A%20%20%20%20%20%20%20%20%20%20%20%20a%20*%3D%20b%3B%0D%0A%20%20%20%20%20%20%20%20%7D%0D%0A%20%20%20%20%7D%0D%0A%20%20%20%20return%200%3B%0D%0A%7D%0D%0A%0D%0A",
"RunCmd" : ini.Language["cpp14"]["RunCmd"],
"CompileCmd" : ini.Language["cpp14"]["CompileCmd"],
}
Runner(Run, 0)
return HttpResponse(json.dumps(Answer[Run["Id"]]))
@csrf_exempt
@require_http_methods(["POST"])
def Connect(request):
global Serverslist
Req = json.loads(request.body)
if Req['Token'] != hashlib.sha256(ini.Token.encode('utf-8')).hexdigest():
return HttpResponse("No!")
ServerUrl = "http://" + str(request.META.get('REMOTE_ADDR')) + ':' + str(Req['Port'])
ServerId = RandStr()
ServersQueue.put(len(Serverslist))
NowServer = {"ServerUrl": ServerUrl,
"LastTime": time.time(),
"Id": ServerId,
"Key": RandStr(),
"Name": Req['Name'] }
Serverslist.append(NowServer)
return HttpResponse(json.dumps({"Id": ServerId, "State": "Connect", "Key": NowServer['Key']}))
@csrf_exempt
@require_http_methods(["POST"])
def Ping(request):
Found = False
Req = json.loads(request.body)
if Req['Token'] != hashlib.sha256(ini.Token.encode('utf-8')).hexdigest():
return HttpResponse("No!")
for i in Serverslist:
if i['Id'] == str(Req['Id']):
Found = True
i['LastTime'] = time.time()
break
if not Found:
return HttpResponse("No!")
return HttpResponse("pong!")
@require_http_methods(["GET"])
def RunnerInfo(request):
Ans = "<table>"
for i in Serverslist:
Ans += '<tr><th>' + i['ServerUrl'] + '</th><th>' + time.asctime(time.localtime(i['LastTime'])) + '</th><th>' + i["Name"] + '</th></tr>'
Ans += "</table>"
return HttpResponse(Ans)
def NotFound(request, template_name = '404.html'):
return render(request, "404.html")
|
<filename>tflow_model.py
#!/usr/bin/python
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
import cv2
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
sess = tf.InteractiveSession()
def draw_batches(batches):
for i, batch in enumerate(batches):
cv2.namedWindow("result_" + str(i), cv2.WINDOW_NORMAL)
img = np.zeros((28, 28), dtype=np.float)
for i in xrange(28):
for j in xrange(28):
img[i][j] = batch[i*28+j]
cv2.imshow("result_" + str(i), img)
cv2.waitKey(0)
cv2.destroyAllWindows()
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
x = tf.placeholder("float", shape=[None, 784])
y_ = tf.placeholder(tf.float32, [None, 10])
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
x_image = tf.reshape(x, [-1,28,28,1])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
keep_prob = tf.placeholder("float")
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
cross_entropy = -tf.reduce_sum(y_*tf.log(y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
saver = tf.train.Saver()
if __name__ == '__main__':
sess.run(tf.initialize_all_variables())
for i in range(10000):
batch = mnist.train.next_batch(50)
if i%100 == 0:
train_accuracy = accuracy.eval(feed_dict={
x:batch[0], y_: batch[1], keep_prob: 1.0}, session=sess)
print("step %d, training accuracy %g"%(i, train_accuracy))
sess.run(train_step, feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
save_path = saver.save(sess, "./model.ckpt")
print("Model saved in file: %s" % save_path)
print("test accuracy %g"%accuracy.eval(feed_dict={
x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}, session=sess))
|
# -*- coding: utf8 -*-
import sys
import os
from PIL import Image, ImageTk
import tkinter
import argparse
color_list = ["金", "茶", "赤", "橙", "桃", "黒", "青", "紫", "緑", "白", "銀", "黄"]
hair_color_list = ["金", "茶", "赤", "橙", "桃", "黒", "青", "紫", "緑", "白", "銀"]
hair_type_list = ["ロング", "ショート", "ツインテ", "ドリル", "ポニテ"]
eye_color_list = ["青", "紫", "赤", "茶", "緑", "黄", "桃", "黒"]
opend_mouse = ["true", "false"]
flags_hat = ["true", "false"]
flags_glasses = ["true", "false"]
def JapaneseColor2EnglshColor(string):
""" 日本語から英語に変換するときの処理(色の名前の変換で用いる)
"""
if string not in color_list:
return None
if string == color_list[0]:return "light goldenrod"
elif string == color_list[1]:return "sienna1"
elif string == color_list[2]:return "red"
elif string == color_list[3]:return "dark orange"
elif string == color_list[4]:return "Light Pink3"
elif string == color_list[5]:return "black"
elif string == color_list[6]:return "blue"
elif string == color_list[7]:return "purple"
elif string == color_list[8]:return "green"
elif string == color_list[9]:return "white"# whiteではないことに注意
elif string == color_list[10]:return "silver"
elif string == color_list[11]:return "yellow"
def click_registration_button():
""" RadioButtonがクリックされたときの処理
"""
print("選択された髪の色:",hair_color_list[v100.get()])
print("選択された髪型:", hair_type_list[v101.get()])
print("選択された目の色:", eye_color_list[v102.get()])
print("選択された開口:", opend_mouse[v103.get()])
print("選択された帽子:", flags_hat[v104.get()])
print("選択されたメガネ:", flags_glasses[v105.get()])
print("=="*20)
# JSON で出力
output_dict = {}
class ImageFrame(tkinter.Frame):
def __init__(self, master, path_img):
# 画像を読み込む
print("path_img:",path_img)
self.obj_img = Image.open(path_img).resize((200, 200))
self.image = ImageTk.PhotoImage(self.obj_img)
def set_label(self, ImageFrameObject):
#ラベルを宣言
label_img_fig = tkinter.Label(ImageFrameObject, image=self.image)
label_img_fig.pack(fill=tkinter.BOTH)
if __name__ == "__main__":
# - * - * - * - * - * - * - * - * - * - * - * -
# - * - * - * - * - 引数の設定 - * - * - * - * -
# - * - * - * - * - * - * - * - * - * - * - * -
parser = argparse.ArgumentParser()
parser.add_argument('--src_dir', type=str, default=None)
parser.add_argument('--dst_dir', type=str, default=None)
args = parser.parse_args()
print(args.src_dir)
print(os.listdir(args.src_dir))
if args.src_dir==None and args.dst_dir==None:
print("please specify --src_dir and --dst_dir")
exit()
# src_dir からPNG画像のPATHを取得
path_src_dir = args.src_dir
path_src_png_imgs = [os.path.join(path_src_dir, name_file) for name_file in os.listdir(path_src_dir)]
# - * - * - * - * - * - * - * - * - * - * - * -
# - * - * - * - * - GUIの設定 - * - * - * - * -
# - * - * - * - * - * - * - * - * - * - * - * -
root = tkinter.Tk()
# 第一層フレームの宣言
f0 = tkinter.Frame(root)
f1 = tkinter.Frame(root)
# 第二層フレームの宣言
f00 = tkinter.Frame(f0)
#f01 = tkinter.Frame(f0)
f01 = ImageFrame(f0, path_src_png_imgs[0])
f01.set_label(f01)
f02 = tkinter.Frame(f0)
f10 = tkinter.LabelFrame(f1, text="属性選択")
f11 = tkinter.Frame(f1)
# フレームの配置
# 第一層フレーム
f0.pack(side=tkinter.LEFT, fill=tkinter.BOTH)
f1.pack(side=tkinter.RIGHT, fill=tkinter.BOTH)
# 第二層フレーム
f00.pack(side=tkinter.TOP, fill=tkinter.BOTH)
f01.pack(side=tkinter.TOP, fill=tkinter.BOTH)
f02.pack(side=tkinter.TOP, fill=tkinter.BOTH)
f10.pack(side=tkinter.TOP, fill=tkinter.BOTH)
f11.pack(side=tkinter.TOP, fill=tkinter.BOTH)
# f00にラベルを配置する
label_img_name = tkinter.Label(f00, bg="green", text="src = 2007/hoge.png")
label_img_name.pack(fill=tkinter.BOTH)
# f01に画像を表示する
# obj_img = Image.open("./sample/91913_0.png")
# obj_img = obj_img.resize((200, 220))
# image = ImageTk.PhotoImage(obj_img)
# label_img_fig = tkinter.Label(f01, image=image)
# label_img_fig.pack(fill=tkinter.BOTH)
# f02に仮背景を設定
label_kari_name = tkinter.Label(f02, bg="green", text="仮")
label_kari_name.pack(fill=tkinter.BOTH)
# - - - - - - f10の設定 - - - - - -
# 髪の色
f100 = tkinter.LabelFrame(f10, text="髪の色", relief="sunken", bg="white")
v100 = tkinter.IntVar()
v100.set(0)
for i, hair_color in enumerate(hair_color_list):
tkinter.Radiobutton(f100, text=hair_color, fg=JapaneseColor2EnglshColor(hair_color), bg="white", activebackground="black", value=i, variable=v100).pack(side=tkinter.LEFT, fill=tkinter.BOTH)
f100.pack(side=tkinter.TOP, fill=tkinter.BOTH)
# 髪型
f101 = tkinter.LabelFrame(f10, text="髪型", relief="sunken")
v101 = tkinter.IntVar()
v101.set(0)
for i, hair_type in enumerate(hair_type_list):
tkinter.Radiobutton(f101, text=hair_type, activebackground="black", value=i, variable=v101).pack(side=tkinter.LEFT, fill=tkinter.BOTH)
f101.pack(side=tkinter.TOP, fill=tkinter.BOTH)
# 目の色
v102 = tkinter.IntVar()
v102.set(0)
f102 = tkinter.LabelFrame(f10, text="目の色", relief="sunken")
for i, eye_color in enumerate(eye_color_list):
tkinter.Radiobutton(f102, text=eye_color, activebackground="black", fg=JapaneseColor2EnglshColor(eye_color), value=i , variable=v102).pack(side=tkinter.LEFT, fill=tkinter.BOTH)
f102.pack(side=tkinter.TOP, fill=tkinter.BOTH)
# 開口
v103 = tkinter.IntVar()
v103.set(0)
f103 = tkinter.LabelFrame(f10, text="開口", relief="sunken")
for i, flag_opend_mouse in enumerate(opend_mouse):
tkinter.Radiobutton(f103, text=flag_opend_mouse, value=i , activebackground="black", variable=v103).pack(side=tkinter.LEFT, fill=tkinter.BOTH)
f103.pack(side=tkinter.TOP, fill=tkinter.BOTH)
# 帽子
v104 = tkinter.IntVar()
v104.set(0)
f104 = tkinter.LabelFrame(f10, text="帽子", relief="sunken")
for i, flag_hat in enumerate(flags_hat):
tkinter.Radiobutton(f104, text=flag_hat, value=i , activebackground="black", variable=v104).pack(side=tkinter.LEFT, fill=tkinter.BOTH)
f104.pack(side=tkinter.TOP, fill=tkinter.BOTH)
# メガネ
v105 = tkinter.IntVar()
v105.set(0)
f105 = tkinter.LabelFrame(f10, text="メガネ", relief="sunken")
for i, flag_glasses in enumerate(flags_glasses):
tkinter.Radiobutton(f105, text=flag_glasses, value=i, activebackground="black", variable=v105).pack(side=tkinter.LEFT, fill=tkinter.BOTH)
f105.pack(side=tkinter.TOP, fill=tkinter.BOTH)
# - - - - - - f11の設定 - - - - - -
button110 = tkinter.Button(f11, text="この内容で登録", command=click_registration_button)
button110.pack(side=tkinter.TOP, fill=tkinter.BOTH)
button111 = tkinter.Button(f11, text="削除する")
button111.pack(side=tkinter.TOP, fill=tkinter.BOTH)
root.mainloop()
|
import jax.numpy as jnp
import haiku as hk
import chex
from ._base import PolicyObjective
from ..utils import is_qfunction
class SoftPG(PolicyObjective):
def __init__(self, pi, q_targ_list, optimizer=None, regularizer=None):
super().__init__(pi, optimizer=optimizer, regularizer=regularizer)
self._check_input_lists(q_targ_list)
self.q_targ_list = q_targ_list
@property
def hyperparams(self):
return hk.data_structures.to_immutable_dict({
'regularizer': getattr(self.regularizer, 'hyperparams', {}),
'q': {'params': [q_targ.params for q_targ in self.q_targ_list],
'function_state': [q_targ.function_state for q_targ in self.q_targ_list]}})
def objective_func(self, params, state, hyperparams, rng, transition_batch, Adv):
rngs = hk.PRNGSequence(rng)
# get distribution params from function approximator
S = self.pi.observation_preprocessor(next(rngs), transition_batch.S)
dist_params, state_new = self.pi.function(params, state, next(rngs), S, True)
A = self.pi.proba_dist.sample(dist_params, next(rngs))
log_pi = self.pi.proba_dist.log_proba(dist_params, A)
Q_sa_list = []
qs = list(zip(self.q_targ_list, hyperparams['q']
['params'], hyperparams['q']['function_state']))
for q_targ, params_q, state_q in qs:
# compute objective: q(s, a)
S = q_targ.observation_preprocessor(next(rngs), transition_batch.S)
Q, _ = q_targ.function_type1(params_q, state_q, next(rngs), S, A, True)
Q_sa_list.append(Q)
# take the min to mitigate over-estimation
Q_sa_next_list = jnp.stack(Q_sa_list, axis=-1)
assert Q_sa_next_list.ndim == 2, f"bad shape: {Q_sa_next_list.shape}"
Q = jnp.min(Q_sa_next_list, axis=-1)
assert Q.ndim == 1, f"bad shape: {Q.shape}"
# clip importance weights to reduce variance
W = jnp.clip(transition_batch.W, 0.1, 10.)
# the objective
chex.assert_equal_shape([W, Q])
chex.assert_rank([W, Q], 1)
objective = W * Q
return jnp.mean(objective), (dist_params, log_pi, state_new)
def _check_input_lists(self, q_targ_list):
# check input: q_targ_list
if not isinstance(q_targ_list, (tuple, list)):
raise TypeError(f"q_targ_list must be a list or a tuple, got: {type(q_targ_list)}")
if not q_targ_list:
raise ValueError("q_targ_list cannot be empty")
for q_targ in q_targ_list:
if not is_qfunction(q_targ):
raise TypeError(f"all q_targ in q_targ_list must be a coax.Q, got: {type(q_targ)}")
def update(self, transition_batch, Adv=None):
r"""
Update the model parameters (weights) of the underlying function approximator.
Parameters
----------
transition_batch : TransitionBatch
A batch of transitions.
Adv : ndarray, ignored
This input is ignored; it is included for consistency with other policy objectives.
Returns
-------
metrics : dict of scalar ndarrays
The structure of the metrics dict is ``{name: score}``.
"""
return super().update(transition_batch, None)
def grads_and_metrics(self, transition_batch, Adv=None):
r"""
Compute the gradients associated with a batch of transitions with
corresponding advantages.
Parameters
----------
transition_batch : TransitionBatch
A batch of transitions.
Adv : ndarray, ignored
This input is ignored; it is included for consistency with other policy objectives.
Returns
-------
grads : pytree with ndarray leaves
A batch of gradients.
function_state : pytree
The internal state of the forward-pass function. See :attr:`Policy.function_state
<coax.Policy.function_state>` and :func:`haiku.transform_with_state` for more details.
metrics : dict of scalar ndarrays
The structure of the metrics dict is ``{name: score}``.
"""
return super().grads_and_metrics(transition_batch, None)
|
# Copyright (c) 2015. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import functools
from nose.tools import eq_
from varlens.commands import reads
from . import data_path, run_and_parse_csv, cols_concat, temp_file
run = functools.partial(run_and_parse_csv, reads.run)
expected_cols = (
"source,query_name,reference_start,reference_end,cigarstring").split(',')
def test_basic():
result = run([
data_path("CELSR1/bams/bam_0.bam"),
])
eq_(result.shape, (953, len(expected_cols)))
result = run([
data_path("CELSR1/bams/bam_0.bam"),
"--is-duplicate",
])
eq_(result.shape, (173, len(expected_cols)))
result = run([
data_path("CELSR1/bams/bam_0.bam"),
"--is-read1",
])
eq_(result.shape, (481, len(expected_cols)))
result = run([
data_path("CELSR1/bams/bam_0.bam"),
"--is-read2",
])
eq_(result.shape, (472, len(expected_cols)))
def test_loci_filtering():
result = run([
data_path("CELSR1/bams/bam_5.bam"),
])
eq_(result.shape, (37053, len(expected_cols)))
result = run([
data_path("CELSR1/bams/bam_5.bam"),
"--locus", "chr22:46930257-46930259"
])
eq_(result.shape, (1795, len(expected_cols)))
result = run([
data_path("CELSR1/bams/bam_5.bam"),
"--locus", "chr22/46930256-46930259"
])
eq_(result.shape, (1795, len(expected_cols)))
result = run([
data_path("CELSR1/bams/bam_5.bam"),
"--locus", "chr22:46930257-46930257"
])
eq_(result.shape, (1753, len(expected_cols)))
result = run([
data_path("CELSR1/bams/bam_5.bam"),
"--locus", "chr22:46930257"
])
eq_(result.shape, (1753, len(expected_cols)))
result = run([
data_path("CELSR1/bams/bam_5.bam"),
"--locus", "chr22/46930256"
])
eq_(result.shape, (1753, len(expected_cols)))
def test_read_filtering():
result = run([
data_path("CELSR1/bams/bam_5.bam"),
"--reference-start", '46932059',
])
eq_(result.shape, (26, len(expected_cols)))
result = run([
data_path("CELSR1/bams/bam_5.bam"),
"--reference-start", '46932059',
"--query-name-contains", '57841',
])
eq_(result.shape, (1, len(expected_cols)))
def test_round_trip():
with temp_file(".bam") as out:
reads.run([
data_path("CELSR1/bams/bam_5.bam"),
"--locus", "chr22/46930276",
"--locus", "chr22/46930256",
"--out", out,
])
result1 = run([
out,
])
result2 = run([
data_path("CELSR1/bams/bam_5.bam"),
"--locus", "chr22/46930276",
"--locus", "chr22/46930256",
])
eq_(sorted(cols_concat(result1, expected_cols[1:])),
sorted(cols_concat(result2, expected_cols[1:])))
def test_round_trip_sam():
with temp_file(".sam") as out:
print(out)
reads.run([
data_path("CELSR1/bams/bam_5.bam"),
"--locus", "chr22/46930276",
"--locus", "chr22/46930256",
"--out", out,
])
result1 = run([
out,
])
result2 = run([
data_path("CELSR1/bams/bam_5.bam"),
"--locus", "chr22/46930276",
"--locus", "chr22/46930256",
])
eq_(sorted(cols_concat(result1, expected_cols[1:])),
sorted(cols_concat(result2, expected_cols[1:])))
|
<reponame>ncrubin/chemftr
""" Determine costs for DF decomposition in QC """
from typing import Tuple
from chemftr.utils import QR, QI, power_two
import numpy as np
from numpy.lib.scimath import arccos, arcsin # want version that has analytic continuation to cplx
def compute_cost(n: int, lam: float, dE: float, L: int, Lxi: int, chi: int, beta: int, stps: int,
verbose: bool = False) -> Tuple[int, int, int]:
""" Determine fault-tolerant costs using DF decomposition in quantum chemistry
Args:
n (int) - the number of spin-orbitals
lam (float) - the lambda-value for the Hamiltonian
dE (float) - allowable error in phase estimation
L (int) - the rank of the first decomposition
Lxi (int) - the total number of eigenvectors
chi (int) - equivalent to aleph_1 and aleph_2 in the document, the number of bits for
the representation of the coefficients
beta (int) - equivalent to beth in the document, the number of bits for the rotations
stps (int) - an approximate number of steps to choose the precision of single qubit
rotations in preparation of the equal superposition state
verbose (bool) - do additional printing of intermediates?
Returns:
step_cost (int) - Toffolis per step
total_cost (int) - Total number of Toffolis
ancilla_cost (int) - Total ancilla cost
"""
# The number of bits used for the second register.
nxi = np.ceil(np.log2(n//2))
# The number of bits for the contiguous register.
nLxi = np.ceil(np.log2(Lxi + n //2))
# The number of bits used for the first register.
nL = np.ceil(np.log2(L + 1))
# The power of 2 that is a factor of L + 1
eta = power_two(L + 1)
oh = [0] * 20
for p in range(20):
# JJG note: arccos arg may be > 1
v = np.round(np.power(2,p+1) / (2 * np.pi) * arccos(np.power(2,nL) /\
np.sqrt((L + 1)/2**eta)/2))
oh[p] = np.real(stps * (1 / (np.sin(3 * arcsin(np.cos(v * 2 * np.pi / np.power(2,p+1)) * \
np.sqrt((L + 1)/2**eta) / np.power(2,nL)))**2) - 1) + 4 * (p + 1))
# Bits of precision for rotation
br = int(np.argmin(oh) + 1)
# The following costs are from the list starting on page 50.
# The cost for preparing an equal superposition for preparing the first
# register in step 1 (a). We double this cost to account for the inverse.
cost1a = 2 * ( 3 * nL + 2 * br - 3 * eta - 9)
# The output size for the QROM for the first state preparation in Eq. (C27)
bp1 = nL + chi
# The cost of the QROM for the first state preparation in step 1 (b) and its inverse.
cost1b = QR(L+1, bp1)[1] + QI(L + 1)[1]
# The cost for the inequality test, controlled swap and their inverse in steps 1 (c) and (d)
cost1cd = 2 * (chi + nL)
# The total cost for preparing the first register in step 1.
cost1 = cost1a + cost1b + cost1cd
# The output size for the QROM for the data to prepare the equal superposition on the second
# register, as given in Eq. (C29).
bo = nxi + nLxi + br + 1
# This is step 2. This is the cost of outputting the data to prepare the equal superposition on
# the second register. We will assume it is not uncomputed, because we want to keep the offset
# for applying the QROM for outputting the rotations.
cost2 = QR(L + 1, bo)[1] + QI(L + 1)[1]
# The number of bits for rotating the ancilla for the second preparation. We are just entering
#this manually because it is a typical value.
br = 7
# The cost of preparing an equal superposition over the second register in a controlled way.
# We pay this cost 4 times.
cost3a = 4 * (7 * nxi + 2 * br - 6)
# The cost of the offset to apply the QROM for state preparation on the second register.
cost3b = 4 * (nLxi - 1)
bp2 = nxi + chi + 2
# The cost of the QROMs and inverse QROMs for the state preparation, where in the first one we
# need + n/2 to account for the one-electron terms.
cost3c = QR(Lxi + n//2, bp2)[1] + QI(Lxi + n//2)[1] + QR(Lxi, bp2)[1] + QI(Lxi)[1]
# The inequality test and state preparations.
cost3d = 4 * ( nxi + chi)
# The total costs for state preparations on register 2.
cost3 = cost3a + cost3b + cost3c + cost3d
# The cost of adding offsets in steps 4 (a) and (h).
cost4ah = 4 * (nLxi - 1)
# The costs of the QROMs and their inverses in steps 4 (b) and (g).
cost4bg = QR(Lxi + n//2, n*beta//2)[1] + QI(Lxi + n//2)[1] + QR(Lxi, n*beta//2)[1] + QI(Lxi)[1]
# The cost of the controlled swaps based on the spin qubit in steps 4 (c) and (f).
cost4cf = 2 * n
# The controlled rotations in steps 4 (d) and (f).
cost4df = 4 * n * (beta - 2)
# The controlled Z operations in the middle for step 4 (e).
cost4e = 3
# This is the cost of the controlled rotations for step 4.
cost4 = cost4ah + cost4bg + cost4cf + cost4df + cost4e
# This is the cost of the reflection on the second register from step 6.
cost6 = nxi + chi + 2
# The cost of the final reflection req'd to construct the step of the quantum walk from step 9.
cost9 = nL + nxi + chi + 1
# The extra two qubits for unary iteration and making the reflection controlled.
cost10 = 2
# The Toffoli cost for a single step
cost = cost1 + cost2 + cost3 + cost4 + cost6 + cost9 + cost10
# The number of steps needed
iters = np.ceil(np.pi * lam / (2 * dE))
# Now the number of qubits from the list on page 54.
k1 = np.power(2,QR(Lxi + n//2, n*beta//2)[0])
# The control register for phase estimation and iteration on it.
ac1 = np.ceil(np.log2(iters + 1))*2 - 1
# The system qubits
ac2 = n
# The first register prepared, a rotated qubit and a flag qubit.
ac3 = nL + 2
# The output of the QROM, the equal superposition state and a flag qubit.
ac4 = nL + chi*2 + 1
# The data used for preparing the equal superposition state on the second register.
ac5 = bo
# The second register, a rotated qubit and a flag qubit.
ac6 = nxi + 2
# The second preparation QROM output.
ac8 = bp2
# The equal superposition state and the result of the inequality test.
ac9 = chi + 1
# The angles for rotations.
ac10 = k1*n*beta//2
# The phase gradient state.
ac11 = beta
# A control qubit for the spin.
ac12 = 1
# A T state.
ac13 = 1
if verbose:
print("[*] Top of routine")
print(" [+] nxi = ", nxi)
print(" [+] nLxi = ", nLxi)
print(" [+] nL = ", nL)
print(" [+] eta = ", eta)
print(" [+] cost3 = ", cost3)
print(" [+] cost4 = ", cost4)
print(" [+] cost = ", cost)
print(" [+] iters = ", iters)
ancilla_cost = ac1 + ac2 + ac3 + ac4 + ac5 + ac6 + ac8 + ac9 + ac10 + ac11 + +ac12 + ac13
# Sanity checks before returning as int
assert cost.is_integer()
assert iters.is_integer()
assert ancilla_cost.is_integer()
step_cost = int(cost)
total_cost = int(cost * iters)
ancilla_cost = int(ancilla_cost)
return step_cost, total_cost, ancilla_cost
if __name__ == '__main__':
# GLOBAL DEFAULTS
DE = 0.001
CHI = 10
# Reiher et al orbitals
N = 108
LAM = 294.8
L = 360
LXI = 13031
BETA = 16
# Here we're using an initial calculation with a very rough estimate of the number of steps
# to give a more accurate number of steps. Then we input that into the function again.
output = compute_cost(N,LAM,DE,L,LXI,CHI,BETA,stps=20000)
stps1 = output[0]
output = compute_cost(N,LAM,DE,L,LXI,CHI,BETA,stps1)
assert output == (21753, 10073183463, 3725)
print("OUTPUT (Reiher): ", output)
# Li et al orbitals
N = 152
LAM = 1171.2
L = 394
LXI = 20115
BETA = 20
# Here we're using an initial calculation with a very rough estimate of the number of steps
# to give a more accurate number of steps. Then we input that into the function again.
output = compute_cost(N,LAM,DE,L,LXI,CHI,BETA,stps=20000)
stps2 = output[0]
output = compute_cost(N,LAM,DE,L,LXI,CHI,BETA,stps2)
assert output == (35008, 64404812736, 6404)
print("OUTPUT (Li): ", output)
|
<gh_stars>0
# ----------------------------------------------------------------------------
# Copyright (c) 2020, <NAME>.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import numpy as np
import pandas as pd
def get_possible_stratas(metadatas: dict, categorical: dict,
stratify: tuple, logs: list) -> dict:
"""
Collect for each metadata table the metadata variables
that amongst the variables passed to the "-p", "--p-stratify"
option are categorical and occurring in the metadata tables.
Parameters
----------
metadatas : dict
Key = Metadata file path.
Value = Metadata table.
categorical : dict
Key = Metadata file path.
Value = Metadata variables that are categorical.
stratify : tuple
Metadata variables on which to split the visualizations.
logs : list
List of lists: each nested list is:
[variable, metadata file path, warning message, a number]
Returns
-------
possible_stratas : dict
Key = Metadata file path.
Value = List of variables to possibly stratify on.
"""
possible_stratas = {}
for md_fp, categorical_variables in categorical.items():
md = metadatas[md_fp]
for strata in stratify:
if strata in md.columns:
if strata in categorical_variables:
possible_stratas.setdefault(md_fp, []).append(strata)
else:
logs.append([strata, md_fp, 'not categorical', np.nan])
else:
logs.append([strata, md_fp, 'not in', np.nan])
return possible_stratas
def make_merged_columns(md: pd.DataFrame, strata_: list) -> str:
"""
Join variables contents for multiple,
merged combination stratification.
Parameters
----------
md : pd.DataFrame
Metadata table.
strata_ : list
Variables to stratify on.
Returns
-------
new_column : str
Name of the variable created from joined variables.
"""
new_column = '__'.join(strata_)
md[new_column] = md[strata_].fillna('nan').agg('__'.join, axis=1)
return new_column
def get_stratas(metadatas: dict, possible_stratas: dict,
max_strata: int, merge: bool, logs: list) -> dict:
"""
For each metadata table, collect for each of its categorical
metadata variables those that have a number of factors that
is not too high, after merging or not.
Parameters
----------
metadatas : dict
Key = Metadata file path.
Value = Metadata table.
possible_stratas : dict
Key = Metadata file path.
Value = List of variables to possibly stratify on.
max_strata : int
Maximum number of stratification to create.
merge : bool
Whether to merge multiple stratification variables or not.
logs : list
List of lists: each nested list is:
[variable, metadata file path, warning message, a number]
Returns
-------
stratas : dict
Key = Metadata file path.
Value = List of variables to stratify on.
"""
stratas = {}
for md_fp, strata_ in possible_stratas.items():
md = metadatas[md_fp]
if merge:
new_column = make_merged_columns(md, strata_)
strata = [new_column]
else:
strata = strata_
for s in strata:
n_factors = md[s].value_counts().size
if n_factors > max_strata:
logs.append([s, md_fp, 'too many factors', n_factors])
continue
stratas.setdefault(md_fp, []).append(s)
return stratas
def get_dummy_stratas(metadatas: dict) -> dict:
"""
Create a dummy, one-factor 'no_stratification' categorical
metadata variables to stratify on for each metadata table.
Parameters
----------
metadatas : dict
Key = Metadata file path.
Value = Metadata table.
Returns
-------
stratas : dict
Key = Metadata file path.
Value = List of one dummy 'no_stratification' variable to stratify on.
"""
stratas = {}
for md_fp, md in metadatas.items():
md['no_stratification'] = 'no_stratification'
stratas[md_fp] = ['no_stratification']
return stratas
def get_stratification(metadatas: dict, categorical: dict,
stratify: tuple, max_strata: int,
merge: bool, logs: list) -> dict:
"""
Collect the categorical metadata variables to
stratify on for each metadata table.
Parameters
----------
metadatas : dict
Key = Metadata file path.
Value = Metadata table.
categorical : dict
Key = Metadata file path.
Value = Metadata variables that are categorical.
stratify : tuple
Metadata variables on which to split the visualizations.
max_strata : int
Maximum number of stratification to create.
merge : bool
Whether to merge multiple stratification variables or not.
logs : list
List of lists: each nested list is:
[variable, metadata file path, warning message, a number]
Returns
-------
stratas : dict
Key = Metadata file path.
Value = List of variables to stratify on.
"""
if stratify:
possible_stratas = get_possible_stratas(metadatas, categorical, stratify, logs)
stratas = get_stratas(metadatas, possible_stratas, max_strata, merge, logs)
else:
stratas = get_dummy_stratas(metadatas)
return stratas
|
from audioop import tomono
from platform import release
from typing import Sized
from pywhatkit.main import search
from pywhatkit.sc import cancelShutdown
import speech_recognition as sr
import pyttsx3
import pywhatkit
import datetime
import wikipedia
import pyjokes
import googlesearch
import os
import time
import datetime
import re
import webbrowser
import requests
import json
import subprocess
import pyautogui
import random
import pyjokes
import math
from tkinter import*
from tkinter import messagebox
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import pyautogui
import wolframalpha
import PyDictionary
listener = sr.Recognizer()
engine = pyttsx3.init()
rate = engine.getProperty('rate')
volume = engine.getProperty('volume')
newvoicerate = 190
voices = engine.getProperty("voices")
engine.setProperty('rate', newvoicerate )
def t2s(text):
object.text2speech(text)
def talk(text):
engine.say(text)
engine.runAndWait()
def take_command():
try:
with sr.Microphone() as source:
print("Listening..")
voice = listener.listen(source)
command = listener.recognize_google(voice)
command = command.lower()
if "vF" in command:
command = command.replace("vF", " ")
print(command)
except:
pass
return command
def run_vF():
command = take_command()
print(command)
if "play" in command:
song = command.replace("play", " ")
talk("playing " + song)
pywhatkit.playonyt(song)
elif "my name is" in command:
name_needed = command.replace("my name is", "")
name_needed = name_needed
talk("from now on i will call you " + name_needed)
elif "hey" in command or "hi" in command or "wassup" in command or "hello" in command or "what is up" in command or "what's up" in command:
greetings = ["Hi sir, What we gonna do today?" , "Hi sir, what are we doing today?" , "Hi sir, How can i help you?"]
greet=greetings[random.randint(0,len(greetings)-1)]
print(talk(greet))
elif "joke" in command:
talk(pyjokes.get_joke())
print(pyjokes.get_joke())
elif "time" in command:
time = command.replace("time", " ")
time = datetime.datetime.now().strftime("%I:%M %p")
talk("the time right now is "+ time)
print("the time right now is " + time)
elif "shutdown" in command:
shut__down = command.replace("shutdown", " ")
print("in how many seconds would you like to shut down your computer?")
talk("in how many seconds would you like to shut down your computer?")
te = int(input())
strOnee = "shutdown /s /t "
strTwoo = str(te)
str12 = strOnee+strTwoo
talk("Please say exit Down to cancel the shut down process.")
os.system(str12)
elif "exit down" in command:
cancel_Shutdown = command.replace("exit down", "")
talk("cancelling shut down!")
os.system("shutdown /a")
talk("Shut down cancelled!")
print("Shut down cancelled!")
elif "restart" in command:
re_start = command.replace("restart", "")
print("in how many seconds would you like to restart your computer?")
talk("in how many seconds would you like to restart your computer?")
tx = int(input())
strOne = "shutdown /r /t "
strTwo = str(tx)
str1 = strOne+strTwo
talk("Please say exit Down to cancel the restart process.")
os.system(str1)
elif "exit down" in command:
cancel_Shutdown = command.replace("exit down", "")
talk("cancelling shut down!")
os.system("shutdown /a")
talk("Shut down cancelled!")
print("Shut down cancelled!")
elif "netflix" in command:
webbrowser.open("https://www.netflix.com/browse")
talk("redirected you to netflix ")
print("Redirected you to Netflix")
elif "watch movies" in command or "watch" in command or "stream movies" in command or "watch some movies" in command or "movies" in command:
webbrowser.open("https://cineb.net")
talk("I have redirected you to a free movies/shows streaming website")
print("I have redirected you to a free movies/shows streaming website")
elif "search" in command:
search_lol = command.replace("search", " ")
pywhatkit.search(search_lol)
talk(search_lol)
print(search_lol)
elif "information" in command or "info" in command:
information = command.replace("information", "info", "")
pywhatkit.info(information)
talk(information)
print(information)
elif "who is" in command:
people = command.replace('who is', "")
info = wikipedia.summary(people, 3)
print(info)
talk(info)
elif "what is" in command:
what_people = command.replace("what is","")
info_2 = wikipedia.summary(what_people, 1)
print(info_2)
talk(info_2)
elif "can you tell me about" in command:
searches_user = command.replace("can you tell me about", "")
searches_user = googlesearch.search(searches_user, num_results=5, lang="eng")
for i in range (3):
print(searches_user[i])
talk("I have displayed a few links on your screen, please make sure to refer to them for further information regarding them. You can also ask me who is and what is questions seperately. ")
elif "news" in command:
news = webbrowser.open_new_tab("https://news.google.com/topstories?hl=en-IN&gl=IN&ceid=IN:en")
talk("I have redirected you to the official Google page for news.")
print("I have redirected you to the official Google page for news.")
elif 'open' in command:
statement = command.replace("search", "")
webbrowser.open_new_tab(statement)
elif "name" in command:
name_result = command.replace( "name", "")
print("My name is vF sir!")
talk("My name is vF sir!")
elif "who made you" in command or "who created you" in command or "who built you" in command or "who is your developer"in command or "who developed you" in command:
print("I was made by <NAME>")
talk("I was made my Yashas Bhat")
webbrowser.open_new_tab("https://yashasnbhat.wixsite.com/yshs")
talk("I will now be redirecting you to his website, If you have any quries regarding the program, make sure to email him.")
print("I will now be redirecting you to his website, If you have any quries regarding the program, make sure to email him.")
elif "yashas" in command or "bhat" in command:
webbrowser.open("https://yashasnbhat.wixsite.com/yshs")
talk("He is the developer of this program. Redirecting you to his website so that you can contact him either through his email or twitter.")
print("He is the developer of this program. Redirecting you to his website so that you can contact him either through his email or twitter.")
elif "stop" in command or "quit" in command:
talk("vF is now shutting down.")
exit()
# elif "screenshot" in command:
# myScreenshot = pyautogui.screenshot()
# print("where would you like to store this screenshot?")
# talk("where would you like to store this screenshot?")
# path_input = input(r"")
# myScreenshot.save(path_input + "\screen.png")
# print("the screen shot has been taken")
# talk("the screnshot has been taken")
elif "screenshot" in command:
myScreenshot = pyautogui.screenshot()
print("Where would you like to store this screenshot? sir")
talk("Where would you like to store this screenshot? sir")
path_input = input(r"")
if path_input == " ":
talk("Error")
else:
myScreenshot.save(path_input + "\screen.png")
print("ScreenShot Saved!")
talk("Screenshot saved!")
elif "solve" in command:
talk("I can answer computational and geographical questions for you")
print("I can answer computational and geographical questions for you")
elif 'solve' in command:
talk('I can answer to computational and geographical questions and what question do you want to ask now')
question= take_command()
app_id="AJ4PEP-8Q7A7PL8W3"
client = wolframalpha.Client('R2K75H-7ELALHR35X')
res = client.query(question)
answer = next(res.results).text
talk(answer)
print(answer)
elif "email" in command:
print("Before we start, Using this app to send email is recognized as a vulnerability by google. Please make sure to turn off Less secure apps on https://myaccount.google.com/lesssecureapps?pli=1&rapt=AEjHL4NbRqwsP6QIEIXsdNzJFu_lrS<KEY>wDnt0Ad1u7oQT2jkpvt6nEvydX_cHDMWpt7Crp5SPgZGiQSACc14KdCxV0fA")
talk("Before we start, Using this app to send email is recognized as a vulnerability by google. Please make sure to turn off Less secure apps. Link has been provided.")
talk("Type ok once the process has been completed.")
input_needed = str(input())
if input_needed == "ok":
talk("What is the email being used to send to the recipient?")
print("What is the email being used to send to the recipient?")
email_input = str(input())
talk("What is the password for this email account?")
print("What is the password for this email account?")
talk("Please make sure to diffrentiate between small case and capital case.")
print("Please make sure to diffrentiate between small case and capital case")
password_input = input()
talk("What should be the subject of this email? Press Enter to leave blank")
print("What should be the subject of this email? Press Enter to leave blank")
email_subject = input()
talk("What content should the body of the email consist?")
print("What content should the body of the email consist?")
body_mail = input()
talk("What is the email of the recipient?")
print("What is the email of the recipient?")
reciptient_email = input()
pywhatkit.send_mail(email_input, password_input, email_subject,body_mail , reciptient_email)
talk("The email has been sent to " + reciptient_email)
print("The email has been sent to " + reciptient_email)
else:
print("Please consider giving LESS SECURE APPS permission on the site mentioned above")
print("https://myaccount.google.com/lesssecureapps?pli=1&rapt=<KEY>")
talk("Please consider giving LESS SECURE APPS permission on the site mentioned above")
#72d6fc37e6da06e40cda6fd26450dabc
elif "weather" in command:
api_key="72d6fc37e6da06e40cda6fd26450dabc"
base_url="https://api.openweathermap.org/data/2.5/weather?"
talk("what is the city name")
city_name= take_command()
complete_url=base_url+"appid="+api_key+"&q="+city_name
response = requests.get(complete_url)
x=response.json()
if x["cod"]!="404":
y=x["main"]
current_temperature = y["temp"]
current_humidiy = y["humidity"]
z = x["weather"]
weather_description = z[0]["description"]
talk(" Temperature in kelvin unit is " +
str(current_temperature) +
"\n humidity in percentage is " +
str(current_humidiy) +
"\n description " +
str(weather_description))
print(" Temperature in kelvin unit = " +
str(current_temperature) +
"\n humidity (in percentage) = " +
str(current_humidiy) +
"\n description = " +
str(weather_description))
elif "n**a" in command or "co**" in command or "ni**er" in command or "nig**ard" in command or "d**a*s" in command or "***hole" in command or "***tard" in command:
print("I have detected a slur and therefore I refuse to respond to any of your questions. I will now be shutting down")
talk("I have detected a slur and therefore I refuse to respond to any of your questions. I will not be shutting down")
exit() #NOTE - I do not condone the use of these words, but as a programmer, I do need to make sure there is a repurcussion for a negative/deaming command given by
# the user.
elif "meaning of" in command:
dictionary = PyDictionary()
meaning_req = command.replace("meaning of", "")
print(dictionary.meaning(meaning_req))
talk(dictionary.meaning(meaning_req))
elif "synonym of" in command:
dictionary = PyDictionary()
syn_req = command.replace("synonym of", "")
print("some of the synonyms are" + str(dictionary.synonym(syn_req)))
talk("some of the synonyms are"+ str(dictionary.antonym(syn_req)))
elif "antonym of" in command:
dictionary = PyDictionary()
syn_req = command.replace("antonym of", "")
print("some of the antonyms are"+ str(dictionary.synonym(syn_req)))
talk("some of the antonyms are"+ str(dictionary.antonym(syn_req)))
# elif "screenshot" in command:
# myScreenshot = pyautogui.screenshot()
# print("where would you like to store this image?")
# talk("Where would you like to store this image?")
# path_input = str(input())
# if os.path.isfile(path_input) == True:
# myScreenshot.save(path_input + "/screen.png")
# print("screnshot saved")
# talk("screenshot saved.")
# else:
# print("error, please input a valid path.")
# talk("error, please input a valid path.")
else:
print("I am sorry, I could not discern what you are trying to say, please repeat.")
talk("I am sorry, I could not discern what you are trying to say, please repeat.")
while True:
run_vF()
|
<reponame>crwxrws/Waybar
#!/usr/bin/env python3
import argparse
import logging
import sys
import signal
import gi
import json
gi.require_version('Playerctl', '2.0')
from gi.repository import Playerctl, GLib
logger = logging.getLogger(__name__)
def write_output(text, player):
logger.info('Writing output')
output = {'text': text,
'class': 'custom-' + player.props.player_name,
'alt': player.props.player_name}
sys.stdout.write(json.dumps(output) + '\n')
sys.stdout.flush()
def on_play(player, status, manager):
logger.info('Received new playback status')
on_metadata(player, player.props.metadata, manager)
def on_metadata(player, metadata, manager):
logger.info('Received new metadata')
track_info = ''
if player.props.player_name == 'spotify' and \
'mpris:trackid' in metadata.keys() and \
':ad:' in player.props.metadata['mpris:trackid']:
track_info = 'AD PLAYING'
elif player.get_artist() != '' and player.get_title() != '':
track_info = '{artist} - {title}'.format(artist=player.get_artist(),
title=player.get_title())
if player.props.status != 'Playing' and track_info:
track_info = ' ' + track_info
write_output(track_info, player)
def on_player_appeared(manager, player, selected_player=None):
if player is not None and player.name == selected_player:
init_player(manager, player)
else:
logger.debug("New player appeared, but it's not the selected player, skipping")
def on_player_vanished(manager, player):
logger.info('Player has vanished')
sys.stdout.write('\n')
sys.stdout.flush()
def init_player(manager, name):
logger.debug('Initialize player: {player}'.format(player=name.name))
player = Playerctl.Player.new_from_name(name)
player.connect('playback-status', on_play, manager)
player.connect('metadata', on_metadata, manager)
manager.manage_player(player)
on_metadata(player, player.props.metadata, manager)
def signal_handler(sig, frame):
logger.debug('Received signal to stop, exiting')
sys.stdout.write('\n')
sys.stdout.flush()
# loop.quit()
sys.exit(0)
def parse_arguments():
parser = argparse.ArgumentParser()
# Increase verbosity with every occurance of -v
parser.add_argument('-v', '--verbose', action='count', default=0)
# Define for which player we're listening
parser.add_argument('--player')
return parser.parse_args()
def main():
arguments = parse_arguments()
# Initialize logging
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG,
format='%(name)s %(levelname)s %(message)s')
# Logging is set by default to WARN and higher.
# With every occurrence of -v it's lowered by one
logger.setLevel(max((3 - arguments.verbose) * 10, 0))
# Log the sent command line arguments
logger.debug('Arguments received {}'.format(vars(arguments)))
manager = Playerctl.PlayerManager()
loop = GLib.MainLoop()
manager.connect('name-appeared', lambda *args: on_player_appeared(*args, arguments.player))
manager.connect('player-vanished', on_player_vanished)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
for player in manager.props.player_names:
if arguments.player is not None and arguments.player != player.name:
logger.debug('{player} is not the filtered player, skipping it'
.format(player=player.name)
)
continue
init_player(manager, player)
loop.run()
if __name__ == '__main__':
main()
|
<filename>jovsatools/function_approximation.py
# AUTOGENERATED! DO NOT EDIT! File to edit: notebooks/function_approximation.ipynb (unless otherwise specified).
__all__ = ['KerasPipeline', 'HighLevelKerasPipeline', 'HighLevelKerasPipelineMultiTarget']
# Cell
from abc import ABC, abstractmethod
from fastcore.test import *
import jovsatools
from jovsatools import data_generator
from nbdev.showdoc import *
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers.experimental.preprocessing import Rescaling
test_eq(tf.__version__>= "2.2.0", True)
# Cell
class KerasPipeline(ABC):
"""Scafolding for a tf.keras model building pipeline.
This base class will contain scafolding for all pipelines
used. The goal of this base class is mainly to add safeguard
and assumption enforcement.
"""
def __init__(self):
# safe guard: to ensure that various session don't collide
keras.backend.clear_session()
self.train_x, self.train_y, self.test_x, self.test_y = None, None, None, None
self.model = None
self.evaluation = None
self.train_history = None
@abstractmethod
def get_data(self):
pass
@abstractmethod
def build_model(self):
pass
@abstractmethod
def run_training(self):
pass
@abstractmethod
def evaluate_pipeline(self):
pass
def plot_model_shapes(self, show_shapes=True, show_layer_names=True):
return keras.utils.plot_model(self.model,
show_shapes=show_shapes,
show_layer_names=show_layer_names)
def keras_model_summary(self):
return self.model.summary()
def __call__(self, verbose=0):
########################################
# step 1: get data
########################################
self.get_data()
# checking that self.get_data() is implimented as expected
assert list(
map(lambda x: x is not None, [self.train_x, self.train_y, self.test_x, self.test_y])
) == [True] * 4
########################################
# step 2: build model
########################################
self.build_model()
# checking that self.model() is implimented as expected
assert self.model is not None
########################################
# step 3: train model
########################################
print("\n", "=-="*10, "MODEL TRAINING", "=-="*10, "\n")
self.run_training(verbose)
# checking that self.train() is implimented as expected
assert self.train_history is not None
assert self.model._is_compiled == True
if verbose > 0:
print("\n", "=-="*10, "MODEL SUMMARY", "=-="*10, "\n")
self.keras_model_summary()
########################################
# step 4: evaluate pipeline
########################################
print("\n", "=-="*10, "PIPELINE EVALUATION", "=-="*10, "\n")
self.evaluate_pipeline()
# checking that self.evaluate() is implimented as expected
assert self.evaluation is not None
# TODO (jovsa): run tests that ensure pipeline trains model above a benchmark
# Cell
class HighLevelKerasPipeline(KerasPipeline):
def __init__(self, sample_n, additional_y):
self.sample_n = sample_n
self.additional_y = additional_y
super().__init__()
def get_data(self):
data = data_generator.MNISTDataGenerator(self.additional_y)
datasets = data(self.sample_n)
self.train_x, self.train_y, self.test_x, self.test_y = datasets
def build_model(self):
inputs = keras.Input(shape=(784, 1), name="inputs")
# Rescale images to [0, 1]
x = Rescaling(scale=1./255)(inputs)
x = keras.layers.Flatten()(x)
x = keras.layers.Dense(units=64, activation='relu')(x)
output = keras.layers.Dense(units=10, activation='softmax', name="output")(x)
self.model = keras.Model(inputs=inputs, outputs=output)
def run_training(self, verbose):
self.model.compile(
optimizer=keras.optimizers.RMSprop(learning_rate=1e-3),
loss=keras.losses.SparseCategoricalCrossentropy(),
metrics=[keras.metrics.SparseCategoricalAccuracy()])
self.train_history = self.model.fit(
x=self.train_x, y=self.train_y, epochs=10, batch_size=64,
verbose=verbose, validation_split=0.1)
def evaluate_pipeline(self):
self.evaluation = self.model.evaluate(x=self.test_x, y=self.test_y)
# Cell
class HighLevelKerasPipelineMultiTarget(KerasPipeline):
def __init__(self, sample_n, additional_y):
self.sample_n = sample_n
self.additional_y = additional_y
super().__init__()
def get_data(self):
data = data_generator.MNISTDataGenerator(self.additional_y)
datasets = data(self.sample_n)
self.train_x, self.train_y, self.test_x, self.test_y = datasets
def build_model(self):
inputs = keras.Input(shape=(784, 1), name="inputs")
# Rescale images to [0, 1]
x = Rescaling(scale=1./255)(inputs)
x = keras.layers.Flatten()(x)
x = keras.layers.Dense(units=64, activation='relu')(x)
output_1 = keras.layers.Dense(units=10, activation='softmax', name="output_0")(x)
output_2 = keras.layers.Dense(units=1, activation='sigmoid', name="output_1")(x)
self.model = keras.Model(inputs=inputs, outputs=[output_1, output_2])
def custom_loss_0(self):
def loss_fn(y_true, y_pred):
l = keras.losses.SparseCategoricalCrossentropy()
return l(y_true, y_pred)
return loss_fn
def custom_loss_1(self):
def loss_fn(y_true, y_pred):
l = keras.losses.CategoricalCrossentropy()
return l(y_true, y_pred)
return loss_fn
def run_training(self, verbose):
self.model.compile(
optimizer=keras.optimizers.RMSprop(learning_rate=1e-3),
loss = {
"output_0": self.custom_loss_0(),
"output_1": self.custom_loss_1(),
},
metrics={
"output_0": keras.metrics.SparseCategoricalAccuracy(),
"output_1": "categorical_accuracy"
})
self.train_history = self.model.fit(
x=self.train_x,
y={
"output_0": self.train_y[:,0],
"output_1": self.train_y[:,1]
},
epochs=30, batch_size=64,
verbose=verbose, validation_split=0.1)
def evaluate_pipeline(self):
self.evaluation = self.model.evaluate(x=self.test_x, y={"output_0": self.test_y[:,0], "output_1": self.test_y[:,1]}) |
<gh_stars>0
import os
import tempfile
from decimal import Decimal
from PIL import Image
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Tag, Ingredient, Recipe
from recipes.serializers import RecipeSerializer, RecipeDetailSerializer
RECIPES_URL = reverse("recipes:recipe-list")
def image_upload_url(recipe_id):
return reverse('recipes:recipe-upload-image', args=[recipe_id])
def detail_recipe_url(recipe_id):
return reverse('recipes:recipe-detail', args=[recipe_id])
def create_user(email, password):
return get_user_model().objects.create_user(email=email, password=password)
def sample_tag(user, name='Simple tag'):
return Tag.objects.create(user=user, name=name)
def sample_ingredient(user, name='Simple ingredient'):
return Ingredient.objects.create(user=user, name=name)
def sample_recipe(user, **params):
defaults = {
"title": "Simple recipe",
"time_minutes": 45,
"price_dolars": 10.00,
}
defaults.update(params)
return Recipe.objects.create(user=user, **defaults)
class PublicRecipesAPITests(TestCase):
def setUp(self):
self.client = APIClient()
def test_login_required(self):
"""Test that authentication is needed for retriving tags"""
res = self.client.get(RECIPES_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateRecipesAPITests(TestCase):
def setUp(self):
self.user = create_user(
email="<EMAIL>", password="<PASSWORD>"
)
self.client = APIClient()
self.client.force_authenticate(user=self.user)
def test_get_recipes(self):
"""Test retrieving tags"""
sample_recipe(user=self.user, title="Tomato Soup", time_minutes=25)
sample_recipe(
user=self.user, title="Delicious Beef", price_dolars=11.25
)
recipes = Recipe.objects.all().order_by('-title')
serializer = RecipeSerializer(recipes, many=True)
res = self.client.get(RECIPES_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_get_only_user_recipe(self):
"""Test that only user tags are retrieved"""
sample_recipe(user=self.user, title="Tomato Soup", time_minutes=25)
sample_recipe(
user=self.user, title="Delicious Beef", price_dolars=11.25
)
other_user = create_user(
email="<EMAIL>", password="<PASSWORD>"
)
sample_recipe(user=other_user, title="Orange Cake", time_minutes=60)
sample_recipe(
user=other_user, title="Mac & Cheese", price_dolars=4.50
)
recipes = Recipe.objects.filter(user=self.user).order_by('-title')
serializer = RecipeSerializer(recipes, many=True)
res = self.client.get(RECIPES_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_detail_recipe(self):
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
recipe.ingredients.add(sample_ingredient(user=self.user))
serializer = RecipeDetailSerializer(recipe)
url = detail_recipe_url(recipe.id)
res = self.client.get(url)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_create_basic_recipe(self):
payload = {
'title': "Sugar Cookies",
'time_minutes': 120,
'price_dolars': Decimal('8.99')
}
res = self.client.post(RECIPES_URL, payload)
recipe = Recipe.objects.get(id=res.data['id'])
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
for key in payload.keys():
self.assertEqual(getattr(recipe, key), payload[key])
def test_create_recipe_with_tags(self):
tag = sample_tag(user=self.user, name="Desser")
other_tag = sample_tag(user=self.user, name="Delicious")
payload = {
'title': "Sugar Cookies",
'time_minutes': 120,
'price_dolars': Decimal('8.99'),
'tags': [tag.id, other_tag.id, ]
}
res = self.client.post(RECIPES_URL, payload)
recipe = Recipe.objects.get(id=res.data['id'])
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
tags = recipe.tags.all()
self.assertEqual(tags.count(), 2)
self.assertIn(tag, tags)
self.assertIn(other_tag, tags)
def test_create_recipe_with_ingredients(self):
ingredient = sample_ingredient(user=self.user, name="Sugar")
other_ingredient = sample_ingredient(user=self.user, name="Eggs")
payload = {
'title': "Sugar Cookies",
'time_minutes': 120,
'price_dolars': Decimal('8.99'),
'ingredients': [ingredient.id, other_ingredient.id, ]
}
res = self.client.post(RECIPES_URL, payload)
recipe = Recipe.objects.get(id=res.data['id'])
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
ingredients = recipe.ingredients.all()
self.assertEqual(ingredients.count(), 2)
self.assertIn(ingredient, ingredients)
self.assertIn(other_ingredient, ingredients)
def test_partial_update_recipe(self):
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
new_title = 'New Title'
new_tags = [sample_tag(user=self.user, name='New Tag')]
payload = {
'title': new_title,
'tags': [tag.id for tag in new_tags],
}
url = detail_recipe_url(recipe.id)
res = self.client.patch(url, payload)
self.assertEqual(res.status_code, status.HTTP_200_OK)
recipe.refresh_from_db()
self.assertEqual(recipe.title, new_title)
tags = list(recipe.tags.all())
self.assertEqual(tags, new_tags)
def test_update_recipe(self):
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
recipe.ingredients.add(sample_ingredient(user=self.user))
new_title = 'New Title'
new_time_minutes = 42
new_price_dolars = 32
new_tags = [sample_tag(user=self.user, name='New Tag')]
payload = {
'title': new_title,
'time_minutes': new_time_minutes,
'price_dolars': new_price_dolars,
'tags': [tag.id for tag in new_tags],
}
url = detail_recipe_url(recipe.id)
res = self.client.put(url, payload)
self.assertEqual(res.status_code, status.HTTP_200_OK)
recipe.refresh_from_db()
self.assertEqual(recipe.title, new_title)
self.assertEqual(recipe.time_minutes, new_time_minutes)
tags_list = list(recipe.tags.all())
self.assertEqual(tags_list, new_tags)
ingredients = recipe.ingredients.all()
self.assertEqual(ingredients.count(), 0)
def test_filter_recipes_by_tags(self):
recipe1 = sample_recipe(user=self.user, title='Recipe 1')
tag1 = sample_tag(user=self.user, name='Tag 1')
recipe1.tags.add(tag1)
recipe2 = sample_recipe(user=self.user, title='Recipe 2')
tag2 = sample_tag(user=self.user, name='Tag 2')
recipe2.tags.add(tag2)
recipe3 = sample_recipe(user=self.user, title='Recipe 3')
res = self.client.get(
RECIPES_URL,
{'tags': f'{tag1.id}'}
)
serializer1 = RecipeSerializer(recipe1)
serializer2 = RecipeSerializer(recipe2)
serializer3 = RecipeSerializer(recipe3)
self.assertIn(serializer1.data, res.data)
self.assertNotIn(serializer2.data, res.data)
self.assertNotIn(serializer3.data, res.data)
def test_filter_recipes_by_ingredient(self):
recipe1 = sample_recipe(user=self.user, title='Recipe 1')
ingredient1 = sample_ingredient(user=self.user, name='Ingredient 1')
recipe1.ingredients.add(ingredient1)
recipe2 = sample_recipe(user=self.user, title='Recipe 2')
ingredient2 = sample_ingredient(user=self.user, name='Ingredient 2')
recipe2.ingredients.add(ingredient2)
recipe3 = sample_recipe(user=self.user, title='Recipe 3')
res = self.client.get(
RECIPES_URL,
{'ingredients': f'{ingredient1.id}'}
)
serializer1 = RecipeSerializer(recipe1)
serializer2 = RecipeSerializer(recipe2)
serializer3 = RecipeSerializer(recipe3)
self.assertIn(serializer1.data, res.data)
self.assertNotIn(serializer2.data, res.data)
self.assertNotIn(serializer3.data, res.data)
class RecipeImageUploadTests(TestCase):
def setUp(self):
self.user = create_user(
email="<EMAIL>", password="<PASSWORD>"
)
self.client = APIClient()
self.client.force_authenticate(user=self.user)
self.recipe = sample_recipe(user=self.user)
def tearDown(self):
self.recipe.image.delete()
def test_upload_image(self):
url = image_upload_url(self.recipe.id)
with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:
img = Image.new('RGB', (16, 16))
img.save(ntf, format='JPEG')
ntf.seek(0)
res = self.client.post(url, {'image': ntf}, format='multipart')
self.recipe.refresh_from_db()
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertIn('image', res.data)
self.assertTrue(os.path.exists(self.recipe.image.path))
def test_upload_incorrect_image_unsuccessful(self):
url = image_upload_url(self.recipe.id)
res = self.client.post(
url, {'image': 'not an image'}, format='multipart')
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
|
<gh_stars>0
import base64
import io
import os
import socket
import threading
import time
from importlib import import_module
from typing import Deque, List, Dict, Tuple
from uuid import uuid4
import json
from collections import deque
import PIL.Image
from PIL.Image import Image
import falcon
import numpy as np
from telesto.logger import logger
from telesto.config import config
from telesto.instance_segmentation import (
SegmentationObject,
DataStorage
)
from telesto.instance_segmentation.model import DummySegmentationModel, SegmentationModelBase
INPUT_IMAGE_FORMAT = {
"type": "png",
"palette": "RGB24",
"encoding": "base64",
"max_size": "5120",
}
OUTPUT_OBJECT_MASK_FORMAT = {
"type": "json",
"palette": "GREY8",
"encoding": "RLE",
}
API_DOCS = {
"name": config.get("common", "name"),
"description": config.get("common", "desc"),
"authentication": {
"header": "Authorization",
"schema": "Bearer <API_KEY>"
},
"endpoints": [
{
"path": "/",
"method": "GET",
"name": "Status endpoint",
"description": "Returns status of the API",
},
{
"path": "/docs",
"method": "GET",
"name": "Documentation endpoint",
"description": "Returns this information",
},
{
"path": "/jobs/",
"method": "POST",
"request_body": {
"image": "<str>",
},
"image_format": {
**INPUT_IMAGE_FORMAT,
},
"response_body": {
"job_id": "<UUID>"
}
},
{
"path": "/jobs/<job_id>",
"method": "GET",
"response_body": {
"objects": [
{
"class_i": "<int>",
"x": "<int>",
"y": "<int>",
"w": "<int>",
"h": "<int>",
"mask": "<str>"
}
],
},
"object_mask_format": {
**OUTPUT_OBJECT_MASK_FORMAT,
},
"classes": config.get("common", "classes").split(","),
}
]
}
def preprocess(doc: Dict) -> Image:
image_bytes = base64.b64decode(doc["image"])
image = PIL.Image.open(io.BytesIO(image_bytes))
if image.mode != "RGB":
raise ValueError(f"Wrong image mode: {image.mode}. Expected: 'RGB'")
return image
def postprocess(objects: List[SegmentationObject], size: Tuple[int, int]) -> Dict:
return {"objects": [(obj.asdict(size)) for obj in objects]}
class SegmentationBase:
def on_get(self, req, resp):
doc = {"status": "ok", "host": socket.getfqdn(), "worker.pid": os.getpid()}
resp.body = json.dumps(doc, ensure_ascii=False)
class SegmentationDocs:
def on_get(self, req, resp):
resp.body = json.dumps(API_DOCS, ensure_ascii=False)
class SegmentationJobs:
def __init__(self, storage: DataStorage, job_queue: Deque):
self._storage = storage
self._job_queue = job_queue
def on_post(self, req: falcon.Request, resp: falcon.Response):
try:
req_doc = json.load(req.bounded_stream)
assert "image" in req_doc, f"'image' not found in {req_doc}"
job_id = uuid4().hex
image = preprocess(req_doc)
self._storage.save(job_id, image, output=False)
self._job_queue.appendleft(job_id)
resp.status = falcon.HTTP_CREATED
resp.body = json.dumps({"job_id": job_id})
except (ValueError, AssertionError) as e:
raise falcon.HTTPError(falcon.HTTP_400, description=str(e))
except Exception as e:
logger.error(e, exc_info=True)
raise falcon.HTTPError(falcon.HTTP_500)
class SegmentationJob:
def __init__(self, storage: DataStorage):
self._storage = storage
def on_get(self, req: falcon.Request, resp: falcon.Response, job_id: str):
try:
image = self._storage.load(job_id, output=False)
objects = self._storage.load(job_id, output=True)
assert objects is not None, "No data found"
resp_doc = postprocess(objects, image.size)
resp.body = json.dumps(resp_doc)
except AssertionError as e:
raise falcon.HTTPError(falcon.HTTP_404, description=str(e))
except ValueError as e:
raise falcon.HTTPError(falcon.HTTP_400, description=str(e))
except Exception as e:
logger.error(e, exc_info=True)
raise falcon.HTTPError(falcon.HTTP_500)
def load_model() -> SegmentationModelBase:
try:
module = import_module("model")
model_class = getattr(module, "SegmentationModel")
return model_class()
except ModuleNotFoundError as e:
if int(os.environ.get("USE_FALLBACK_MODEL", 0)):
logger.warning(
"No 'model' module found. Using fallback model 'DummySegmentationModel'"
)
return DummySegmentationModel()
else:
raise e
def start_worker(storage: DataStorage, job_queue: Deque):
def thread_function():
logger.info("Starting worker thread")
model_wrapper = load_model()
logger.info("Worker thread started")
while True:
if job_queue:
job_id = job_queue.pop()
logger.info(f"Processing task {job_id}")
image = storage.load(job_id, output=False)
objects = model_wrapper.predict(np.asarray(image))
storage.save(job_id, objects, output=True)
logger.info(f"Finished task {job_id}")
else:
time.sleep(1)
thread = threading.Thread(target=thread_function, args=(), daemon=True)
thread.start()
def add_routes(api: falcon.API):
storage = DataStorage()
job_queue = deque()
start_worker(storage, job_queue)
api.add_route("/", SegmentationBase())
api.add_route("/docs", SegmentationDocs())
# Note: Falcon internally strips trailing slashes when compiling routes.
# When "api.req_options.strip_url_path_trailing_slash = True"
# they are also striped them from requests
api.add_route("/jobs", SegmentationJobs(storage, job_queue))
api.add_route("/jobs/{job_id}", SegmentationJob(storage))
|
# coding: utf-8
# # Pipeline processing using serial workflows.
#
# This notebook demonstrates the continuum imaging and ICAL pipelines. These are based on ARL functions wrapped up as SDP workflows using the serial class.
# In[1]:
#get_ipython().run_line_magic('matplotlib', 'inline')
import os
import sys
sys.path.append(os.path.join('..', '..'))
from data_models.parameters import arl_path
#results_dir = arl_path('test_results')
results_dir = './results/orig'
#from matplotlib import pylab
#pylab.rcParams['figure.figsize'] = (12.0, 12.0)
#pylab.rcParams['image.cmap'] = 'rainbow'
import numpy
from astropy.coordinates import SkyCoord
from astropy import units as u
from astropy.wcs.utils import pixel_to_skycoord
#from matplotlib import pyplot as plt
from data_models.polarisation import PolarisationFrame
from wrappers.serial.calibration.calibration import solve_gaintable
from wrappers.serial.calibration.operations import apply_gaintable
from wrappers.serial.calibration.calibration_control import create_calibration_controls
from wrappers.serial.visibility.base import create_blockvisibility
from wrappers.serial.visibility.coalesce import convert_blockvisibility_to_visibility, convert_visibility_to_blockvisibility
from wrappers.serial.skycomponent.operations import create_skycomponent
from wrappers.serial.image.deconvolution import deconvolve_cube
#from wrappers.serial.image.operations import show_image, export_image_to_fits, qa_image
from wrappers.serial.image.operations import export_image_to_fits, qa_image
from wrappers.serial.visibility.iterators import vis_timeslice_iter
from wrappers.serial.simulation.testing_support import create_low_test_image_from_gleam
from processing_components.simulation.configurations import create_named_configuration
from wrappers.serial.imaging.base import predict_2d, create_image_from_visibility, advise_wide_field
from workflows.serial.imaging.imaging_serial import invert_list_serial_workflow, predict_list_serial_workflow, deconvolve_list_serial_workflow
from workflows.serial.simulation.simulation_serial import simulate_list_serial_workflow, corrupt_list_serial_workflow
from workflows.serial.pipelines.pipeline_serial import continuum_imaging_list_serial_workflow, ical_list_serial_workflow
import pprint
import time
pp = pprint.PrettyPrinter()
import logging
def init_logging():
log = logging.getLogger()
logging.basicConfig(filename='%s/imaging-pipeline.log' % results_dir,
filemode='a',
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=logging.INFO)
return log
log = init_logging()
log.info("Starting imaging-pipeline")
# In[2]:
#pylab.rcParams['figure.figsize'] = (12.0, 12.0)
#pylab.rcParams['image.cmap'] = 'Greys'
# We make the visibility. The parameter rmax determines the distance of the furthest antenna/stations used. All over parameters are determined from this number.
# In[3]:
nfreqwin=7
ntimes=5
rmax=300.0
frequency=numpy.linspace(1.0e8,1.2e8,nfreqwin)
#ntimes=11
#rmax=300.0
#frequency=numpy.linspace(0.9e8,1.1e8,nfreqwin)
channel_bandwidth=numpy.array(nfreqwin*[frequency[1]-frequency[0]])
times = numpy.linspace(-numpy.pi/3.0, numpy.pi/3.0, ntimes)
#phasecentre=SkyCoord(ra=+30.0 * u.deg, dec=-60.0 * u.deg, frame='icrs', equinox='J2000')
phasecentre=SkyCoord(ra=+0.0 * u.deg, dec=-40.0 * u.deg, frame='icrs', equinox='J2000')
bvis_list=simulate_list_serial_workflow('LOWBD2',
frequency=frequency,
channel_bandwidth=channel_bandwidth,
times=times,
phasecentre=phasecentre,
order='frequency',
rmax=rmax)
vis_list = [convert_blockvisibility_to_visibility(bv) for bv in bvis_list]
print('%d elements in vis_list' % len(vis_list))
log.debug('%d elements in vis_list' % len(vis_list))
# In[4]:
wprojection_planes=1
advice_low=advise_wide_field(vis_list[0], guard_band_image=8.0, delA=0.02,
wprojection_planes=wprojection_planes)
advice_high=advise_wide_field(vis_list[-1], guard_band_image=8.0, delA=0.02,
wprojection_planes=wprojection_planes)
vis_slices = advice_low['vis_slices']
npixel=advice_high['npixels2']
cellsize=min(advice_low['cellsize'], advice_high['cellsize'])
# Now make a graph to fill with a model drawn from GLEAM
# In[ ]:
gleam_model = [create_low_test_image_from_gleam(npixel=npixel,
frequency=[frequency[f]],
channel_bandwidth=[channel_bandwidth[f]],
cellsize=cellsize,
phasecentre=phasecentre,
polarisation_frame=PolarisationFrame("stokesI"),
flux_limit=1.0,
applybeam=True)
for f, freq in enumerate(frequency)]
log.info('About to make GLEAM model')
# In[ ]:
log.info('About to run predict to get predicted visibility')
log.info('About to run predict to get predicted visibility')
start=time.time()
predicted_vislist = predict_list_serial_workflow(vis_list, gleam_model,
context='wstack', vis_slices=vis_slices)
#log.info('About to run corrupt to get corrupted visibility')
#corrupted_vislist = corrupt_list_serial_workflow(predicted_vislist, phase_error=1.0)
end=time.time()
print('predict finished in %f seconds'%(end-start),flush=True)
# Get the LSM. This is currently blank.
# In[ ]:
model_list = [create_image_from_visibility(vis_list[f],
npixel=npixel,
frequency=[frequency[f]],
channel_bandwidth=[channel_bandwidth[f]],
cellsize=cellsize,
phasecentre=phasecentre,
polarisation_frame=PolarisationFrame("stokesI"))
for f, freq in enumerate(frequency)]
# In[ ]:
start=time.time()
print('About to start invert' ,flush=True)
dirty_list = invert_list_serial_workflow(predicted_vislist, model_list,
context='wstack',
vis_slices=vis_slices, dopsf=False)
psf_list = invert_list_serial_workflow(predicted_vislist, model_list,
context='wstack',
vis_slices=vis_slices, dopsf=True)
end=time.time()
print('invert finished in %f seconds'%(end-start),flush=True)
# Create and execute graphs to make the dirty image and PSF
# In[ ]:
log.info('About to run invert to get dirty image')
dirty = dirty_list[0][0]
#show_image(dirty, cm='Greys', vmax=1.0, vmin=-0.1)
#plt.show()
print(qa_image(dirty))
export_image_to_fits(dirty, '%s/imaging-dirty.fits'
%(results_dir))
log.info('About to run invert to get PSF')
psf = psf_list[0][0]
#show_image(psf, cm='Greys', vmax=0.1, vmin=-0.01)
#plt.show()
print(qa_image(psf))
export_image_to_fits(psf, '%s/imaging-psf.fits'
%(results_dir))
# Now deconvolve using msclean
# In[ ]:
log.info('About to run deconvolve')
start=time.time()
deconvolved, _ = deconvolve_list_serial_workflow(dirty_list, psf_list, model_imagelist=model_list,
deconvolve_facets=8, deconvolve_overlap=16, deconvolve_taper='tukey',
scales=[0, 3, 10],
algorithm='msclean', niter=1000,
fractional_threshold=0.1,
threshold=0.1, gain=0.1, psf_support=64)
end=time.time()
print('deconvolve finished in %f seconds'%(end-start),flush=True)
#show_image(deconvolved[0], cm='Greys', vmax=0.1, vmin=-0.01)
#plt.show()
# In[ ]:
log.info('About to run continuum imaging')
start=time.time()
continuum_imaging_list = continuum_imaging_list_serial_workflow(predicted_vislist,
model_imagelist=model_list,
context='wstack', vis_slices=vis_slices,
scales=[0, 3, 10], algorithm='mmclean',
nmoment=3, niter=1000,
fractional_threshold=0.1,
threshold=0.1, nmajor=5, gain=0.25,
deconvolve_facets = 8, deconvolve_overlap=16,
deconvolve_taper='tukey', psf_support=64)
# In[ ]:
end=time.time()
print('continuum imaging finished in %f seconds'%(end-start),flush=True)
deconvolved = continuum_imaging_list[0][0]
residual = continuum_imaging_list[1][0]
restored = continuum_imaging_list[2][0]
#f=show_image(deconvolved, title='Clean image - no selfcal', cm='Greys',
# vmax=0.1, vmin=-0.01)
print(qa_image(deconvolved, context='Clean image - no selfcal'))
#plt.show()
#f=show_image(restored, title='Restored clean image - no selfcal',
# cm='Greys', vmax=1.0, vmin=-0.1)
print(qa_image(restored, context='Restored clean image - no selfcal'))
#plt.show()
export_image_to_fits(restored, '%s/imaging-dask_continuum_imaging_restored.fits'
%(results_dir))
#f=show_image(residual[0], title='Residual clean image - no selfcal', cm='Greys',
# vmax=0.1, vmin=-0.01)
print(qa_image(residual[0], context='Residual clean image - no selfcal'))
#plt.show()
export_image_to_fits(residual[0], '%s/imaging-dask_continuum_imaging_residual.fits'
%(results_dir))
# In[ ]:
#for chan in range(nfreqwin):
# residual = continuum_imaging_list[1][chan]
#show_image(residual[0], title='Channel %d' % chan, cm='Greys',
# vmax=0.1, vmin=-0.01)
#plt.show()
# In[ ]:
controls = create_calibration_controls()
controls['T']['first_selfcal'] = 1
controls['G']['first_selfcal'] = 3
controls['B']['first_selfcal'] = 4
controls['T']['timeslice'] = 'auto'
controls['G']['timeslice'] = 'auto'
controls['B']['timeslice'] = 1e5
pp.pprint(controls)
# In[ ]:
start=time.time()
log.info('About to run ical')
# TODO I change this to predicted_vislist to make it deterministic, I hope it makes
# sense :)
#ical_list = ical_list_serial_workflow(corrupted_vislist,
ical_list = ical_list_serial_workflow(predicted_vislist,
model_imagelist=model_list,
context='wstack',
calibration_context = 'TG',
controls=controls,
scales=[0, 3, 10], algorithm='mmclean',
nmoment=3, niter=1000,
fractional_threshold=0.1,
threshold=0.1, nmajor=5, gain=0.25,
deconvolve_facets = 8,
deconvolve_overlap=16,
deconvolve_taper='tukey',
vis_slices=ntimes,
timeslice='auto',
global_solution=False,
psf_support=64,
do_selfcal=True)
# In[ ]:
end=time.time()
print('ical finished in %f seconds'%(end-start),flush=True)
deconvolved = ical_list[0][0]
residual = ical_list[1][0]
restored = ical_list[2][0]
#f=show_image(deconvolved, title='Clean image', cm='Greys', vmax=1.0, vmin=-0.1)
print(qa_image(deconvolved, context='Clean image'))
#plt.show()
#f=show_image(restored, title='Restored clean image', cm='Greys', vmax=1.0,
# vmin=-0.1)
print(qa_image(restored, context='Restored clean image'))
#plt.show()
export_image_to_fits(restored, '%s/imaging-dask_ical_restored.fits'
%(results_dir))
#f=show_image(residual[0], title='Residual clean image', cm='Greys',
# vmax=0.1, vmin=-0.01)
print(qa_image(residual[0], context='Residual clean image'))
#plt.show()
export_image_to_fits(residual[0], '%s/imaging-dask_ical_residual.fits'
%(results_dir))
|
<reponame>RiccardoAiolfi/bbt<filename>py-scripts/bip32_testvector1.py
#!/usr/bin/env python3
# Copyright (C) 2017-2020 The btclib developers
#
# This file is part of btclib. It is subject to the license terms in the
# LICENSE file found in the top-level directory of this distribution.
#
# No part of btclib including this file, may be copied, modified, propagated,
# or distributed except according to the terms contained in the LICENSE file.
from hashlib import sha512
from hmac import HMAC
from btclib.base58 import b58encode
from btclib.curvemult import mult
from btclib.curves import secp256k1 as ec
from btclib.secpoint import bytes_from_point
from btclib.utils import hash160
## https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki
# version bytes
# mainnet: 0x0488B21E public -> xpub; 0x0488ADE4 private -> xprv
# testnet: 0x043587CF public ; 0x04358394 private
xprvn= 0x0488ADE4
xprv = xprvn.to_bytes(4, 'big')
xpubn= 0x0488B21E
xpub = xpubn.to_bytes(4, 'big')
seed = 0x000102030405060708090a0b0c0d0e0f
seed_bytes = 16
print("Seed:", hex(seed), "\nbytes:", seed_bytes)
# ==master ext private key==
# depth: 0x00 for master nodes, 0x01 for level-1 derived keys, ...
depth = b'\x00'
# This is ser32(i) for i in xi = xpar/i, with xi the key being serialized. (0x00000000 if master key)
child_number = b'\x00\x00\x00\x00'
# the fingerprint of the parent's public key (0x00000000 if master key)
fingerprint = b'\x00\x00\x00\x00'
idf = depth + fingerprint + child_number
# master private key, master public key, chain code
hd = HMAC(b"Bitcoin seed", seed.to_bytes(seed_bytes, byteorder='big'), sha512).digest()
qbytes = hd[:32]
q = int(qbytes.hex(), 16) % ec.n
qbytes = b'\x00' + q.to_bytes(32, byteorder='big')
Q = mult(q, ec.G)
Qbytes = bytes_from_point(Q, True)
chain_code = hd[32:]
#extended keys
ext_prv = b58encode(xprv + idf + chain_code + qbytes)
print("\nm")
print(ext_prv)
ext_pub = b58encode(xpub + idf + chain_code + Qbytes)
print("M")
print(ext_pub)
assert ext_prv == b"<KEY>", "failure"
assert ext_pub == b"<KEY>", "failure"
# ==first (0) hardened child==
depth = b'\x01'
child_n = 0 + 0x80000000 #hardened
child_number = child_n.to_bytes(4, byteorder='big')
fingerprint = hash160(Qbytes)[:4]
idf = depth + fingerprint + child_number
key = qbytes if child_number[0]>127 else Qbytes
hd = HMAC(chain_code, key + child_number, sha512).digest()
q = (q + int(hd[:32].hex(), 16)) % ec.n
qbytes = b'\x00' + q.to_bytes(32, byteorder='big')
Q = mult(q, ec.G)
Qbytes = bytes_from_point(Q, True)
chain_code = hd[32:]
ext_prv = b58encode(xprv + idf + chain_code + qbytes)
print("\nm/0'")
print(ext_prv)
ext_pub = b58encode(xpub + idf + chain_code + Qbytes)
print("M/0'")
print(ext_pub)
assert ext_prv == b"<KEY>", "failure"
assert ext_pub == b"<KEY>", "failure"
# ==second (1) normal grandchild==
depth = b'\x02'
child_n = 1 + 0x00000000 #normal
child_number = child_n.to_bytes(4, byteorder='big')
fingerprint = hash160(Qbytes)[:4]
idf = depth + fingerprint + child_number
key = qbytes if child_number[0]>127 else Qbytes
hd = HMAC(chain_code, key + child_number, sha512).digest()
q = (q + int(hd[:32].hex(), 16)) % ec.n
qbytes = b'\x00' + q.to_bytes(32, byteorder='big')
Q = mult(q, ec.G)
Qbytes = bytes_from_point(Q, True)
chain_code = hd[32:]
ext_prv = b58encode(xprv + idf + chain_code + qbytes)
print("\nm/0'/1")
print(ext_prv)
ext_pub = b58encode(xpub + idf + chain_code + Qbytes)
print("M/0'/1")
print(ext_pub)
assert ext_prv == b"<KEY>", "failure"
assert ext_pub == b"<KEY>", "failure"
# ==third (2) hardened grand-grandchild==
depth = b'\x03'
child_n = 2 + 0x80000000 #hardened
child_number = child_n.to_bytes(4, byteorder='big')
fingerprint = hash160(Qbytes)[:4]
idf = depth + fingerprint + child_number
key = qbytes if child_number[0]>127 else Qbytes
hd = HMAC(chain_code, key + child_number, sha512).digest()
q = (q + int(hd[:32].hex(), 16)) % ec.n
qbytes = b'\x00' + q.to_bytes(32, byteorder='big')
Q = mult(q, ec.G)
Qbytes = bytes_from_point(Q, True)
chain_code = hd[32:]
ext_prv = b58encode(xprv + idf + chain_code + qbytes)
print("\nm/0'/1/2'")
print(ext_prv)
ext_pub = b58encode(xpub + idf + chain_code + Qbytes)
print("M/0'/1/2'")
print(ext_pub)
assert ext_prv == b"<KEY>", "failure"
assert ext_pub == b"<KEY>", "failure"
# ==third (2) normal grand-grand-grandchild==
depth = b'\x04'
child_n = 2 + 0x00000000 #normal
child_number = child_n.to_bytes(4, byteorder='big')
fingerprint = hash160(Qbytes)[:4]
idf = depth + fingerprint + child_number
key = qbytes if child_number[0]>127 else Qbytes
hd = HMAC(chain_code, key + child_number, sha512).digest()
q = (q + int(hd[:32].hex(), 16)) % ec.n
qbytes = b'\x00' + q.to_bytes(32, byteorder='big')
Q = mult(q, ec.G)
Qbytes = bytes_from_point(Q, True)
chain_code = hd[32:]
ext_prv = b58encode(xprv + idf + chain_code + qbytes)
print("\nm/0'/1/2'/2")
print(ext_prv)
ext_pub = b58encode(xpub + idf + chain_code + Qbytes)
print("M/0'/1/2'/2")
print(ext_pub)
assert ext_prv == b"<KEY>", "failure"
assert ext_pub == b"<KEY>", "failure"
# ==1000000001th (1000000000) normal grand-grand-grand-grandchild==
depth = b'\x05'
child_n = 1000000000 + 0x00000000 #normal
child_number = child_n.to_bytes(4, byteorder='big')
fingerprint = hash160(Qbytes)[:4]
idf = depth + fingerprint + child_number
key = qbytes if child_number[0]>127 else Qbytes
hd = HMAC(chain_code, key + child_number, sha512).digest()
q = (q + int(hd[:32].hex(), 16)) % ec.n
qbytes = b'\x00' + q.to_bytes(32, byteorder='big')
Q = mult(q, ec.G)
Qbytes = bytes_from_point(Q, True)
chain_code = hd[32:]
ext_prv = b58encode(xprv + idf + chain_code + qbytes)
print("\nm/0'/1/2'/2/1000000000")
print(ext_prv)
ext_pub = b58encode(xpub + idf + chain_code + Qbytes)
print("M/0'/1/2'/2/1000000000")
print(ext_pub)
assert ext_prv == b"<KEY>", "failure"
assert ext_pub == b"<KEY>", "failure"
|
from o3seespy.base_model import OpenSeesObject
class FrictionModelBase(OpenSeesObject):
op_base_type = "frictionModel"
class Coulomb(FrictionModelBase):
"""
The Coulomb FrictionModel Class
This command is used to construct a `Coulomb friction <http://en.wikipedia.org/wiki/Friction>`_ model object.
Coulomb's Law of Friction states that kinetic friction is independent of the sliding velocity.
"""
op_type = 'Coulomb'
def __init__(self, osi, mu):
"""
Initial method for Coulomb
Parameters
----------
osi: o3seespy.OpenSeesInstance
mu: float
Coefficient of friction
Examples
--------
>>> import o3seespy as o3
>>> osi = o3.OpenSeesInstance(ndm=2)
>>> o3.friction_model.Coulomb(osi, mu=1.0)
"""
self.osi = osi
self.mu = float(mu)
osi.n_ele += 1
self._tag = osi.n_ele
self._parameters = [self.op_type, self._tag, self.mu]
self.to_process(osi)
class VelDependent(FrictionModelBase):
"""
The VelDependent FrictionModel Class
This command is used to construct a VelDependent friction model object. It is useful for modeling the behavior of
`PTFE <http://en.wikipedia.org/wiki/Polytetrafluoroethylene>`_ or PTFE-like materials sliding on a stainless steel
surface. For a detailed presentation on the velocity dependence of such interfaces please refer to Constantinou
et al. (1999).
"""
op_type = 'VelDependent'
def __init__(self, osi, mu_slow, mu_fast, trans_rate):
"""
Initial method for VelDependent
Parameters
----------
osi: o3seespy.OpenSeesInstance
mu_slow: float
Coefficient of friction at low velocity
mu_fast: float
Coefficient of friction at high velocity
trans_rate: float
Transition rate from low to high velocity
Examples
--------
>>> import o3seespy as o3
>>> osi = o3.OpenSeesInstance(ndm=2)
>>> o3.friction_model.VelDependent(osi, mu_slow=1.0, mu_fast=1.0, trans_rate=1.0)
"""
self.osi = osi
self.mu_slow = float(mu_slow)
self.mu_fast = float(mu_fast)
self.trans_rate = float(trans_rate)
osi.n_ele += 1
self._tag = osi.n_ele
self._parameters = [self.op_type, self._tag, self.mu_slow, self.mu_fast, self.trans_rate]
self.to_process(osi)
class VelNormalFrcDep(FrictionModelBase):
"""
The VelNormalFrcDep FrictionModel Class
This command is used to construct a VelNormalFrcDep friction model object.
"""
op_type = 'VelNormalFrcDep'
def __init__(self, osi, a_slow, n_slow, a_fast, n_fast, alpha0, alpha1, alpha2, max_mu_fact):
r"""
Initial method for VelNormalFrcDep
Parameters
----------
osi: o3seespy.OpenSeesInstance
a_slow: float
Constant for coefficient of friction at low velocity
n_slow: float
Exponent for coefficient of friction at low velocity
a_fast: float
Constant for coefficient of friction at high velocity
n_fast: float
Exponent for coefficient of friction at high velocity
alpha0: float
Constant rate parameter coefficient
alpha1: float
Linear rate parameter coefficient
alpha2: float
Quadratic rate parameter coefficient
max_mu_fact: float
Factor for determining the maximum coefficient of friction. this value prevents the friction coefficient
from exceeding an unrealistic maximum value when the normal force becomes very small. the maximum friction
coefficient is determined from μfast, for example :math:`\mu \leq maxmufac*μfast`.
Examples
--------
>>> import o3seespy as o3
>>> osi = o3.OpenSeesInstance(ndm=2)
>>> o3.friction_model.VelNormalFrcDep(osi, a_slow=1.0, n_slow=1.0, a_fast=1.0, n_fast=1.0, alpha0=1.0, alpha1=1.0, alpha2=1.0, max_mu_fact=1.0)
"""
self.osi = osi
self.a_slow = float(a_slow)
self.n_slow = float(n_slow)
self.a_fast = float(a_fast)
self.n_fast = float(n_fast)
self.alpha0 = float(alpha0)
self.alpha1 = float(alpha1)
self.alpha2 = float(alpha2)
self.max_mu_fact = float(max_mu_fact)
osi.n_ele += 1
self._tag = osi.n_ele
self._parameters = [self.op_type, self._tag, self.a_slow, self.n_slow, self.a_fast, self.n_fast, self.alpha0, self.alpha1, self.alpha2, self.max_mu_fact]
self.to_process(osi)
class VelPressureDep(FrictionModelBase):
"""
The VelPressureDep FrictionModel Class
This command is used to construct a VelPressureDep friction model object.
"""
op_type = 'VelPressureDep'
def __init__(self, osi, mu_slow, mu_fast0, big_a, delta_mu, alpha, trans_rate):
"""
Initial method for VelPressureDep
Parameters
----------
osi: o3seespy.OpenSeesInstance
mu_slow: float
Coefficient of friction at low velocity
mu_fast0: float
Initial coefficient of friction at high velocity
big_a: float
Nominal contact area
delta_mu: float
Pressure parameter calibrated from experimental data
alpha: float
Pressure parameter calibrated from experimental data
trans_rate: float
Transition rate from low to high velocity
Examples
--------
>>> import o3seespy as o3
>>> osi = o3.OpenSeesInstance(ndm=2)
>>> o3.friction_model.VelPressureDep(osi, mu_slow=1.0, mu_fast0=1.0, big_a=1.0, delta_mu=1.0, alpha=1.0, trans_rate=1.0)
"""
self.osi = osi
self.mu_slow = float(mu_slow)
self.mu_fast0 = float(mu_fast0)
self.big_a = float(big_a)
self.delta_mu = float(delta_mu)
self.alpha = float(alpha)
self.trans_rate = float(trans_rate)
osi.n_ele += 1
self._tag = osi.n_ele
self._parameters = [self.op_type, self._tag, self.mu_slow, self.mu_fast0, self.big_a, self.delta_mu, self.alpha, self.trans_rate]
self.to_process(osi)
class VelDepMultiLinear(FrictionModelBase):
"""
The VelDepMultiLinear FrictionModel Class
This command is used to construct a VelDepMultiLinear friction model object. The friction-velocity relationship is
given by a multi-linear curve that is define by a set of points. The slope given by the last two specified points on
the positive velocity axis is extrapolated to infinite positive velocities. Velocity and friction points need to be
equal or larger than zero (no negative values should be defined). The number of provided velocity points needs to
be equal to the number of provided friction points.
"""
op_type = 'VelDepMultiLinear'
def __init__(self, osi, vel_points: list=None, frn_points: list=None):
"""
Initial method for VelDepMultiLinear
Parameters
----------
osi: o3seespy.OpenSeesInstance
vel_points: list, optional
List of velocity points along friction-velocity curve
frn_points: list, optional
List of friction points along friction-velocity curve
Examples
--------
>>> import o3seespy as o3
>>> osi = o3.OpenSeesInstance(ndm=2)
>>> vel_points = [0.0, 1.0]
>>> frn_points = [1.0, 1.0]
>>> o3.friction_model.VelDepMultiLinear(osi, vel_points=vel_points, frn_points=frn_points)
"""
self.osi = osi
self.vel_points = vel_points
self.frn_points = frn_points
osi.n_ele += 1
self._tag = osi.n_ele
self._parameters = [self.op_type, self._tag]
if getattr(self, 'vel_points') is not None:
self._parameters += ['-vel', *self.vel_points]
if getattr(self, 'frn_points') is not None:
self._parameters += ['-frn', *self.frn_points]
self.to_process(osi)
|
<reponame>ZurMaD/DeepGrabCut-PyTorch<filename>dataloaders/custom_transforms.py
import torch, cv2
import numpy.random as random
import numpy as np
from dataloaders import utils
class ScaleNRotate(object):
"""Scale (zoom-in, zoom-out) and Rotate the image and the ground truth.
Args:
two possibilities:
1. rots (tuple): (minimum, maximum) rotation angle
scales (tuple): (minimum, maximum) scale
2. rots [list]: list of fixed possible rotation angles
scales [list]: list of fixed possible scales
"""
def __init__(self, rots=(-30, 30), scales=(.75, 1.25), semseg=False):
assert (isinstance(rots, type(scales)))
self.rots = rots
self.scales = scales
self.semseg = semseg
def __call__(self, sample):
if type(self.rots) == tuple:
# Continuous range of scales and rotations
rot = (self.rots[1] - self.rots[0]) * random.random() - \
(self.rots[1] - self.rots[0])/2
sc = (self.scales[1] - self.scales[0]) * random.random() - \
(self.scales[1] - self.scales[0]) / 2 + 1
elif type(self.rots) == list:
# Fixed range of scales and rotations
rot = self.rots[random.randint(0, len(self.rots))]
sc = self.scales[random.randint(0, len(self.scales))]
for elem in sample.keys():
if 'meta' in elem:
continue
tmp = sample[elem]
h, w = tmp.shape[:2]
center = (w / 2, h / 2)
assert(center != 0) # Strange behaviour warpAffine
M = cv2.getRotationMatrix2D(center, rot, sc)
if ((tmp == 0) | (tmp == 1)).all():
flagval = cv2.INTER_NEAREST
elif 'gt' in elem and self.semseg:
flagval = cv2.INTER_NEAREST
else:
flagval = cv2.INTER_CUBIC
tmp = cv2.warpAffine(tmp, M, (w, h), flags=flagval)
sample[elem] = tmp
return sample
def __str__(self):
return 'ScaleNRotate:(rot='+str(self.rots)+',scale='+str(self.scales)+')'
class FixedResize(object):
"""Resize the image and the ground truth to specified resolution.
Args:
resolutions (dict): the list of resolutions
"""
def __init__(self, resolutions=None, flagvals=None):
self.resolutions = resolutions
self.flagvals = flagvals
if self.flagvals is not None:
assert(len(self.resolutions) == len(self.flagvals))
def __call__(self, sample):
# Fixed range of scales
if self.resolutions is None:
return sample
elems = list(sample.keys())
for elem in elems:
if 'meta' in elem or 'bbox' in elem or ('extreme_points_coord' in elem and elem not in self.resolutions):
continue
if elem in self.resolutions:
if self.resolutions[elem] is None:
continue
if isinstance(sample[elem], list):
if sample[elem][0].ndim == 3:
output_size = np.append(self.resolutions[elem], [3, len(sample[elem])])
else:
output_size = np.append(self.resolutions[elem], len(sample[elem]))
tmp = sample[elem]
sample[elem] = np.zeros(output_size, dtype=np.float32)
for ii, crop in enumerate(tmp):
if self.flagvals is None:
sample[elem][..., ii] = utils.fixed_resize(crop, self.resolutions[elem])
else:
sample[elem][..., ii] = utils.fixed_resize(crop, self.resolutions[elem], flagval=self.flagvals[elem])
else:
if self.flagvals is None:
sample[elem] = utils.fixed_resize(sample[elem], self.resolutions[elem])
else:
sample[elem] = utils.fixed_resize(sample[elem], self.resolutions[elem], flagval=self.flagvals[elem])
return sample
def __str__(self):
return 'FixedResize:'+str(self.resolutions)
class RandomHorizontalFlip(object):
"""Horizontally flip the given image and ground truth randomly with a probability of 0.5."""
def __call__(self, sample):
if random.random() < 0.5:
for elem in sample.keys():
if 'meta' in elem:
continue
tmp = sample[elem]
tmp = cv2.flip(tmp, flipCode=1)
sample[elem] = tmp
return sample
def __str__(self):
return 'RandomHorizontalFlip'
class DistanceMap(object):
"""
Returns the distance map in a given binary mask
v: controls the degree of rectangle variation
elem: which element of the sample to choose as the binary mask
"""
def __init__(self, v=0.15, elem='gt'):
self.v = v
self.elem = elem
def __call__(self, sample):
if sample[self.elem].ndim == 3:
raise ValueError('DistanceMap not implemented for multiple object per image.')
_target = sample[self.elem]
if np.max(_target) == 0:
# TODO: if mask do no have any object, distance=255
sample['distance_map'] = np.zeros(_target.shape, dtype=_target.dtype) + 255
else:
sample['distance_map'] = utils.distance_map(_target, self.v)
return sample
def __str__(self):
return 'DistanceMap:(v='+str(self.v)+', elem='+str(self.elem)+')'
class ConcatInputs(object):
def __init__(self, elems=('image', 'distance_map')):
self.elems = elems
def __call__(self, sample):
res = sample[self.elems[0]]
for elem in self.elems[1:]:
assert(sample[self.elems[0]].shape[:2] == sample[elem].shape[:2])
# Check if third dimension is missing
tmp = sample[elem]
if tmp.ndim == 2:
tmp = tmp[:, :, np.newaxis]
res = np.concatenate((res, tmp), axis=2)
sample['concat'] = res
return sample
def __str__(self):
return 'ConcatInputs:'+str(self.elems)
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
for elem in sample.keys():
if 'meta' in elem:
continue
elif 'bbox' in elem:
tmp = sample[elem]
sample[elem] = torch.from_numpy(tmp)
continue
tmp = sample[elem].astype(np.float32)
if tmp.ndim == 2:
tmp = tmp[:, :, np.newaxis]
# swap color axis because
# numpy image: H x W x C
# torch image: C X H X W
tmp = tmp.transpose((2, 0, 1))
sample[elem] = torch.from_numpy(tmp).float()
return sample
def __str__(self):
return 'ToTensor'
|
import discord
from discord import Message, Role, Embed, Color, TextChannel, Guild, Member, Reaction, Emoji, PartialEmoji
from discord.abc import Snowflake
from discord.ext import commands
from discord.raw_models import RawReactionActionEvent
from peewee import ModelSelect
import checks
from awaiter import AdvancedAwaiter, AwaitCanceled, AwaitTimedOut
from database import *
class ReactionsCog:
def __init__(self, bot: commands.Bot):
self.bot: commands.Bot = bot
@commands.command(name="setupreaction")
@checks.admin_permissions()
async def setup(self, ctx: commands.Context):
try:
awaiter: AdvancedAwaiter = AdvancedAwaiter(ctx)
channel: TextChannel = await awaiter.guild_channel('Okay the setup has started! first of all mention the'
' channel in which your message was written and where'
' I should add the reaction. Btw. you could cancel the '
'setup with `@CANCEL@`')
reaction_message: Message = await awaiter.as_message(
'So now please send me the message ID from the message where the reactions should been added. You can '
'get it by right-clicking on your message and copying it with `copy id`. This will not work until you '
'have activated Developer mode. (https://discordia.me/developer-mode)',
in_channel=channel)
if len(reaction_message.reactions) > 10:
return await ctx.send(
embed=Embed(
color=Color.red(),
description='Too many reactions. Canceling the setup'))
reaction_emote: Reaction = (
await awaiter.emoji_reaction('React to this message with the emoji I should add'
' to your message - if you want a non-standard Discord Emoji you need to'
' add this emoji to this server'))
guild: Guild = ctx.guild
reaction_emote_str: str = str(reaction_emote)
reaction_emote: Emoji = reaction_emote.emoji
if not (isinstance(reaction_emote, str) or reaction_emote.id in [emoji.id for emoji in guild.emojis]):
return await ctx.send(
embed=Embed(
description='You need to add the emoji to this server if you do not want to use an standard'
' discord emoji',
color=Color.red()))
if isinstance(reaction_emote, Snowflake):
reaction_emote_str = reaction_emote.id
await reaction_message.add_reaction(reaction_emote)
action_count = ReactionAction.select().where(
(ReactionAction.message_id == reaction_message.id) &
(ReactionAction.emoji == str(reaction_emote))).count()
if action_count > 0:
await ctx.send(
embed=Embed(
description='You could cancel the setup with @CANCEL@'))
reaction_role: Role = await awaiter.guild_role('Please mention now the role I should add the users if they'
' click on the reaction ')
action, created = ReactionAction.get_or_create(emoji=reaction_emote_str,
message_id=reaction_message.id,
defaults={
'role_id': reaction_role.id
})
action: ReactionAction
if created:
await ctx.send(
embed=Embed(
color=Color.green(),
description='created'))
else:
action.role_id = reaction_role.id
await ctx.send(
embed=Embed(
color=Color.green(),
description='Role overwriten'))
action.save()
db.commit()
except AwaitTimedOut:
await ctx.send(
embed=Embed(
color=Color.red(),
description="You timed out."))
except AwaitCanceled:
await ctx.send(
embed=Embed(
color=Color.red(),
description="You canceled."))
async def emote_reaction_handle(self, event: RawReactionActionEvent, handle):
message_id: str = str(event.message_id)
emoji: PartialEmoji = event.emoji
if emoji.is_unicode_emoji():
emoji = str(emoji)
else:
emoji = emoji.id
guild: Guild = self.bot.get_guild(event.guild_id)
user: Member = guild.get_member(event.user_id)
action_query: ModelSelect = ReactionAction.select().where((ReactionAction.emoji == emoji) &
(ReactionAction.message_id == message_id))
if not action_query.exists():
return
action: ReactionAction = action_query.get()
role_id = action.role_id
role: Role = discord.utils.get(guild.roles,
id=int(role_id))
if role is None:
print('Role not found.')
return
await handle(user, role, reason='Self roles')
async def on_raw_reaction_add(self, event: RawReactionActionEvent):
await self.emote_reaction_handle(event, Member.add_roles)
async def on_raw_reaction_remove(self, event: RawReactionActionEvent):
await self.emote_reaction_handle(event, Member.remove_roles)
def setup(bot: commands.Bot):
bot.add_cog(ReactionsCog(bot))
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
__author__ = 'banxi'
ui_field_constraint_map = {
'x': '',
'y': '',
'l': 'app:layout_constraintLeft_toLeftOf',
't': 'app:layout_constraintTop_toTopOf',
'r': 'app:layout_constraintRight_toRightOf',
'b': 'app:layout_constraintBottom_toBottomOf',
'w': 'android:layout_width',
'h': 'android:layout_height',
'a': 'pac_aspectRatio',
'e': 'pac_edge',
'hor': 'pac_horizontal',
'ver': 'pac_vertical',
'bl': 'app:layout_constraintTop_toBottomOf',
'ab': 'app:layout_constraintBottom_toTopOf',
'bf': 'app:layout_constraintRight_toLeftOf',
'at': 'app:layout_constraintLeft_toRightOf',
}
ui_field_offset_map = {
'x':' android:layout_marginStart',
'y':' android:layout_marginTop',
'l':' android:layout_marginStart',
't':' android:layout_marginTop',
'r':' android:layout_marginEnd',
'b':' android:layout_marginBottom',
'ab':' android:layout_marginBottom',
'bf':' android:layout_marginEnd',
'at':' android:layout_marginStart',
'bl':' android:layout_marginTop',
}
name_field_map = dict()
def field_by_name(name):
return name_field_map.get(name)
class _ConstraintGenerator(object):
def __init__(self,config):
self.config = config
@property
def anchor_name(self):
anchor_name = self.config.secondItem
if anchor_name:
field = field_by_name(anchor_name)
if field:
return '@+id/%s' % field.id_name
else:
return '@+id/%s' % anchor_name
return 'parent'
@property
def config_value(self):
return self.config.value
def generate_stmts(self):
"""返回生成的 约束语句
"""
stmts = self.position_stmts()
offset_stmt = self.offset_stmt()
if offset_stmt:
stmts.append(offset_stmt.strip())
return stmts
def position_stmts(self):
constraint_attr_name = ui_field_constraint_map.get(self.config.ctype, '')
stmt ='%s="%s"' % (constraint_attr_name,self.anchor_name)
return [stmt.strip()]
def offset_stmt(self):
value = self.config_value
offset_type = ui_field_offset_map.get(self.config_value)
if value and offset_type:
stmt = '%s="%sdp"' % (offset_type,value)
return stmt.strip()
_constraint_type_generator_cls_map = dict()
def find_generator_by_config(config):
cls = _constraint_type_generator_cls_map.get(config.ctype)
if cls:
return cls(config=config)
return _ConstraintGenerator(config=config)
def register_as_constraint_generator(type):
def decorator(cls):
_constraint_type_generator_cls_map[type] = cls
return cls
return decorator
@register_as_constraint_generator('x')
class CenterXConstraint(_ConstraintGenerator):
def position_stmts(self):
stmts = []
stmt = 'app:layout_constraintLeft_toLeftOf="%s"' % self.anchor_name
stmts.append(stmt.strip())
stmt = 'app:layout_constraintRight_toRightOf="%s"' % self.anchor_name
stmts.append(stmt.strip())
return stmts
@register_as_constraint_generator('y')
class CenterYConstraint(_ConstraintGenerator):
def position_stmts(self):
stmts = []
stmt = 'app:layout_constraintTop_toTopOf="%s"' % self.anchor_name
stmts.append(stmt.strip())
stmt = 'app:layout_constraintBottom_toBottomOf="%s"' % self.anchor_name
stmts.append(stmt.strip())
return stmts
@register_as_constraint_generator('w')
@register_as_constraint_generator('h')
class SizeConstraint(_ConstraintGenerator):
def generate_stmts(self):
value = self.config_value or '0'
size_attr_name = ui_field_constraint_map.get(self.config.ctype, '')
stmt = '%s="%sdp"' % (size_attr_name,value)
return [stmt.strip()]
|
<gh_stars>0
#!/usr/bin/env python3
import os
import websocket
import time
import sys
import json
import subprocess
import logging
import signal
import libdlt
from libdlt.util import common as common
from libdlt.util.common import ExnodePUBSUBQuery, parseArgs, print_progress
SYS_PATH="/etc/periscope"
USER_DEPOTS=os.path.join(SYS_PATH, "depots.conf")
SHUTDOWN = False
def signal_handler(signal, frame):
global SHUTDOWN
logging.info("Exiting...")
SHUTDOWN = True
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
def progress(depot, name, total, size, offset):
print_progress(offset+size, total, name)
class Listener(object):
def __init__(self, rq, unis_url, viz, verbose, vlist):
self._rq = rq
self._unis = unis_url.replace('ws', 'http')
self._viz = viz
self._verbose = verbose
self._list = vlist
def on_message(self, ws, message):
href = None
name = None
try:
js = json.loads(message)
if not js["headers"]["action"] == "POST":
return
else:
js = js["data"]
if not len(js["extents"]):
return
href = js["selfRef"]
name = js["name"]
logging.info("Matching file %s [%d bytes]" % (js["name"], js["size"]))
time.sleep(2)
except Exception as e:
logging.warn("Failed to decode eXnode: %s" % e)
logging.debug(message)
return
if not self._list:
depots = None
block_size = '5m'
try:
f = open(USER_DEPOTS, "r")
depots = json.loads(f.read())
except Exception as e:
print ("ERROR: Could not read depot file: {}".format(e))
exit(1)
try:
sess = libdlt.Session(self._unis, bs=block_size, depots=depots,
**{"viz_url": self._viz})
sess = libdlt.Session(host, bs=block_size, depots=depots,
**{"viz_url": self._viz})
xfer = sess.download
result = xfer(href, None, progress_cb=progress)
diff, res = result.time, result.exnode
print ("{0} ({1} {2:.2f} MB/s) {3}".format(res.name, res.size,
res.size/1e6/diff,
res.selfRef))
except Exception as e:
logging.error("Failed libdlt download for %s: %s " % (name, e))
def on_error(self, ws, error):
logging.warn("Websocket error - {exp}".format(exp = error))
def on_close(self, ws):
logging.warn("Remote connection lost")
def on_open(self, ws):
logging.info("Connected to %s" % self._rq.url())
logging.info("Adding query %s" % self._rq.query())
query = { "query": self._rq.query(), "resourceType": self._rq.ctype }
ws.send(json.dumps(query))
logging.info("Listening for EODN-IDMS eXnodes...")
def start(self):
ws = websocket.WebSocketApp(self._rq.url(),
on_message = self.on_message,
on_error = self.on_error,
on_close = self.on_close)
self._ws = ws
ws.on_open = self.on_open
ws.run_forever()
def main ():
args = parseArgs(desc="EODN-IDMS Subscription Tool",
ptype=common.PARSER_TYPE_PUBSUB)
rq = ExnodePUBSUBQuery(args)
listener = Listener(rq, args.url, args.visualize,
args.verbose, args.list)
while True:
listener.start()
time.sleep(5)
logging.info("Attempting to reconnect...")
if __name__ == "__main__":
main()
|
<gh_stars>1-10
import matplotlib
#matplotlib.use('agg')
import matplotlib.pyplot as plt
import yt
from yt.units import second, g, cm ,dyne
from yt.visualization.fixed_resolution import FixedResolutionBuffer
G = 6.674e-8*cm**3/second**2/g
ctr = 5e18*cm
yt.mylog.setLevel(50)
import numpy as np
import os
# Particle Clean File
# cp ../source/Simulation/SimulationMain/unitTest/SinkMomTest/utils/clean_flashdat.py .
# python clean_sinks_evol.py
def plot_dens(i,fname="sphere",cut="z", velocity=False,grid=False,zmin ="",zmax="",magnetic=False, particle=False,zoom="", save_path="",scale=False):
ds = yt.load("{0}_hdf5_chk_{1}".format(fname,str(i).zfill(4)))
physical_quantity="density"
slc = yt.SlicePlot(ds,cut,physical_quantity)#,center=(0.5,0.5,0.5))
slc.set_figure_size(5)
slc.annotate_text((0.05, 0.02),"Time: {} Myr".format(round(ds.current_time.in_cgs().in_units('Myr'),3)), coord_system='axis')
if zoom!="": slc.zoom(zoom)
if grid: slc.annotate_grids()
if cut == "x":
if velocity:slc.annotate_streamlines('velocity_y', 'velocity_z',factor=100,plot_args={'color':'#000066'})
if magnetic:slc.annotate_streamlines('magnetic_field_y', 'magnetic_field_z',factor=100,plot_args={'color':'orange'})
if cut == "y":
if velocity:slc.annotate_streamlines('velocity_x', 'velocity_z',factor=100,plot_args={'color':'#000066'})
if magnetic:slc.annotate_streamlines('magnetic_field_x', 'magnetic_field_z',factor=100,plot_args={'color':'orange'})
if cut == "z":
if velocity:slc.annotate_streamlines('velocity_x', 'velocity_y',factor=100,plot_args={'color':'#000066'})
if magnetic:slc.annotate_streamlines('magnetic_field_x', 'magnetic_field_y',factor=100,plot_args={'color':'orange'})
slc.set_cmap("all","rainbow")
slc.annotate_scale(corner="upper_right",unit='pc',pos=(0.8,0.9),coord_system="figure")
if zmin!="" and zmax!="": slc.set_zlim(physical_quantity,zmin,zmax)
if particle :
#os.system("cp ../source/Simulation/SimulationMain/unitTest/SinkMomTest/utils/clean_sinks_evol.py .")
os.system("python clean_sinks_evol.py")
data =np.loadtxt("sinks_evol.dat_cleaned",skiprows=1)
pcl_indx_at_t = np.where(np.isclose(int(ds.current_time.in_cgs()),data[:,1]))[0]
print "Number of sink particles: " , len(pcl_indx_at_t)
pcl_pos_at_t = data[pcl_indx_at_t,2:5]
for pos in pcl_pos_at_t:
slc.annotate_marker(pos, coord_system='data',marker='.',plot_args={'color':'black','s':3})
if save_path !="":
slc.save(save_path+"{0}_{1}.png".format(ds,physical_quantity))
else:
slc.show()
def plot_var(i,physical_quantity,fname="sphere",cut="z",magnetic=False,velocity=False,grid=False,zmin ="",zmax="",particle=False,save_path="",scale=False):
ds = yt.load("{0}_hdf5_chk_{1}".format(fname,str(i).zfill(4)))
slc = yt.SlicePlot(ds, cut,physical_quantity)#,center=(0.5,0.5,0.5))
slc.set_figure_size(5)
if grid: slc.annotate_grids()
if cut == "x":
if velocity:slc.annotate_streamlines('velocity_y', 'velocity_z',factor=100,plot_args={'color':'#000066'})
if magnetic:slc.annotate_streamlines('magnetic_field_y', 'magnetic_field_z',factor=100,plot_args={'color':'orange'})
if cut == "y":
if velocity:slc.annotate_streamlines('velocity_x', 'velocity_z',factor=100,plot_args={'color':'#000066'})
if magnetic:slc.annotate_streamlines('magnetic_field_x', 'magnetic_field_z',factor=100,plot_args={'color':'orange'})
if cut == "z":
if velocity:slc.annotate_streamlines('velocity_x', 'velocity_y',factor=100,plot_args={'color':'#000066'})
if magnetic:slc.annotate_streamlines('magnetic_field_x', 'magnetic_field_y',factor=100,plot_args={'color':'orange'})
slc.set_cmap("all","rainbow")
if scale: slc.annotate_scale(corner="upper_right",unit='pc',pos=(0.8,0.9),coord_system="figure")
if zmin!="" and zmax!="": slc.set_zlim(physical_quantity,zmin,zmax)
if save_path !="":
slc.save(save_path+"{0}_{1}.png".format(ds,physical_quantity))
else:
slc.show()
def all_direction_slices(i,fname="sphere",physical_quantity="density",zmin="",zmax="",zoom="",save_path=""):
from mpl_toolkits.axes_grid1 import AxesGrid
ds = yt.load("{0}_hdf5_chk_{1}".format(fname,str(i).zfill(4)))
fig = plt.figure()
grid = AxesGrid(fig, ( (0, 0, 0.8, 0.8)),
nrows_ncols = (1, 3),
axes_pad = 1.0,
label_mode = "1",
share_all = True,
cbar_location="right",
cbar_mode="each",
cbar_size="3%",
cbar_pad="0%")
direction = ['x','y','z']
for i, direc in enumerate(direction):
slc = yt.SlicePlot(ds,direc, physical_quantity)
slc.set_axes_unit('pc')
slc.set_font_size(12)
if zmin!="" and zmax!="": slc.set_zlim(physical_quantity,zmin,zmax)
if zoom!="": slc.zoom(zoom)
slc.annotate_magnetic_field()
plot = slc.plots[physical_quantity]
plot.figure = fig
slc.set_cmap(physical_quantity,"rainbow")
plot.axes = grid[i].axes
plot.cax = grid.cbar_axes[i]
slc._setup_plots()
if save_path !="":
plt.savefig(save_path+"{0}_alldir_{1}.png".format(ds,physical_quantity))
else:
plt.show()
|
<filename>tests/unit/deployment/serverprovider/providers/test_cobbler.py
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally.deployment.serverprovider.providers import cobbler
from tests.unit import test
class TestCobblerProvider(test.TestCase):
def setUp(self):
self.config = {"type": "CobblerProvider",
"host": "h1", "user": "u1", "password": "p1",
"system_password": "p2",
"selector": {"profile": "p1", "owners": "o1"}}
self.rendered = {"ip_address_eth3": "",
"ip_address_eth1": "1.1.1.1",
"power_user": "fake_root",
"redhat_management_key": "fake_key",
"name": "fake_name"}
self.system_names = ["s1", "s2"]
self.token = "token"
self.handle = "handle"
super(TestCobblerProvider, self).setUp()
def create_mocks(self, mock_server, is_no_ip, provider):
mock_server.find_system = mock.Mock(return_value=self.system_names)
mock_server.login = mock.Mock(return_value=self.token)
mock_server.get_system_handle = mock.Mock(return_value=self.handle)
mock_server.power_system = mock.Mock()
if is_no_ip:
self.rendered["ip_address_eth1"] = ""
mock_server.get_system_as_rendered = mock.Mock(
return_value=self.rendered)
provider.cobbler = mock_server
@mock.patch("six.moves.xmlrpc_client.Server")
def test_create_servers(self, mock_server):
provider = cobbler.CobblerProvider(config=self.config, deployment=None)
mock_server.assert_called_once_with(uri="http://h1/cobbler_api")
self.create_mocks(mock_server=mock_server, is_no_ip=False,
provider=provider)
credentials = provider.create_servers()
mock_server.find_system.assert_called_once_with(
self.config["selector"])
mock_server.login.assert_called_with(self.config["user"],
self.config["password"])
mock_server.login.call_count = len(self.system_names)
mock_server.power_system.assert_called_with(self.handle, "reboot",
self.token)
self.assertEqual(["1.1.1.1"] * 2, [s.host for s in credentials])
self.assertEqual(["fake_root"] * 2, [s.user for s in credentials])
self.assertEqual(["p2"] * 2, [s.password for s in credentials])
self.assertEqual(["fake_key"] * 2, [s.key for s in credentials])
self.assertEqual([22] * 2, [s.port for s in credentials])
@mock.patch("six.moves.xmlrpc_client.Server")
def test_create_servers_when_selects_nothing(self, mock_server):
provider = cobbler.CobblerProvider(config=self.config, deployment=None)
mock_server.find_system = mock.Mock(return_value=[])
provider.cobbler = mock_server
self.assertRaisesRegexp(RuntimeError,
"No associated systems selected by {.*}$",
provider.create_servers)
@mock.patch("six.moves.xmlrpc_client.Server")
def test_create_servers_when_no_ip_found(self, mock_server):
provider = cobbler.CobblerProvider(config=self.config, deployment=None)
self.create_mocks(mock_server=mock_server, is_no_ip=True,
provider=provider)
self.assertRaisesRegexp(RuntimeError,
"No valid ip address found for system '.*'$",
provider.create_servers)
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
'''Data generator'''
import keras
from keras.preprocessing.image import *
from keras.applications.imagenet_utils import preprocess_input
from keras import backend as K
import numpy as np
import cv2
import scipy.io
from config import cfg
class DataGen(object):
'Generates data for Keras'
def __init__(self, number_classes, shuffle):
'Initialization'
self.number_classes = number_classes
self.shuffle = shuffle
self.batch_size = 1
self.image_channel = 3
def generator(self, data_base):
'Generates batches of samples'
# Infinite loop
while 1:
# Generate order of exploration of dataset
indexes = self.__get_exploration_order(data_base)
len_indexes = len(indexes)
# Generate batches
Max_index = int(len_indexes/self.batch_size)
for i_index in range(Max_index):
# Find list of IDs
data_base_batch_list = [data_base[j_batch] for j_batch in indexes[i_index*self.batch_size:(i_index + 1)*self.batch_size]]
# Generate data
input_image, im_info, gt_boxes = self.__data_generating(data_base_batch_list)
yield input_image, im_info, gt_boxes
def __get_exploration_order(self, data_base):
'Generates order of exploration'
# Find exploration order
indexes = np.arange(len(data_base))
if self.shuffle == True:
np.random.shuffle(indexes)
return indexes
def __data_processing(self):
pass
def __data_generating(self, data_base_batch_list):
'Generates data of BatchSize samples'
target_size = cfg.TRAIN.SCALES[0]
max_size = cfg.TRAIN.MAX_SIZE
for i_count, i_db in enumerate(data_base_batch_list):
# gt image
image_read = cv2.imread(i_db['image'])
#print(i_db['image'])
# change the color channel to RGB
image_read = cv2.cvtColor(image_read, cv2.COLOR_BGR2RGB)
if(i_db['flipped']):
image_read = image_read[:, ::-1, :]
image_read = image_read.astype(np.float32, copy=False)
image_read = image_read - cfg.PIXEL_MEANS
im_shape = image_read.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
im_scale = float(target_size)/float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
if np.round(im_scale * im_size_max) > max_size:
im_scale = float(max_size)/float(im_size_max)
image_read = cv2.resize(image_read, None, None, \
fx=im_scale, fy=im_scale, \
interpolation=cv2.INTER_LINEAR)
# gt boxes
gt_inds = np.where(i_db['gt_classes'] != 0)[0]
gt_boxes = np.empty((len(gt_inds), 5), dtype=np.float32)
gt_boxes[:, 0:4] = i_db['boxes'][gt_inds, :]*im_scale
gt_boxes[:, 4] = i_db['gt_classes'][gt_inds]
# output format. (only single batch in this version)
input_image_out = np.zeros((self.batch_size, image_read.shape[0], image_read.shape[1], \
self.image_channel), dtype=np.float32)
input_image_out[0, :] = image_read
im_info_out = np.zeros((self.batch_size, 3), dtype=np.float32)
im_info_out[0, 0] = image_read.shape[0]
im_info_out[0, 1] = image_read.shape[1]
im_info_out[0, 2] = im_scale
gt_boxes_out = np.zeros((self.batch_size, len(gt_inds), 5), dtype=np.float32)
gt_boxes_out[0, :] = gt_boxes
return input_image_out, im_info_out, gt_boxes_out
|
<reponame>leipzig/gatk-sv
#!/usr/bin/env python
from scipy import stats
import numpy as np
from sklearn import mixture
# calculate the Del statistic given a FME combo in het files
def Deltest(F, M, E, length, crit=0.01, thres1=0.0005):
# if True:
thres1 = min(50 / length, thres1)
if F / length < thres1 and M / length < thres1 \
or E / length < thres1 and M / length < thres1:
return "ROH"
else:
flank = min(F, E)
ratio = np.log10((M + thres1 * length) / (flank + thres1 * length))
# if m<flank:
# print()
# if ratio<0:
# print(ratio)
return ratio
def ROH(F, M, E, length, thres=0.0001):
if min(F, M, E) < length * thres:
return True
else:
return False
class DeletionTest:
def __init__(self, obj, probands, length):
self.length = length # length of SV
self.obj = obj # het file
# self.homobkgrd=0
# self.hetbkgrd=0
self.probands = probands # python list of proband IDs
self.count = {} # record of FME count and Deltest statistic for everyone
self.nullratio = [] # list of del statistic for non-ROH controls
# if not os.path.isfile(self.regionfile):
# raise ValueError("file not found")
if self.obj.shape[0] == 0:
self.nullavg = 'nan'
self.ns = 0
else:
nsROH = 0 # total number of SNP in nonROH controls in SV region
ns = 0 # total number of SNPs in SV region
for index, row in self.obj.iterrows():
F = row['before']
M = row['inside']
E = row['after']
# print(dat[-1])
self.count[row['sample']] = {
'F': F, 'M': M, 'E': E, 'Ratio': Deltest(F, M, E, self.length)}
if row['sample'] not in self.probands and Deltest(F, M, E, self.length) != 'ROH':
self.nullratio.append(Deltest(F, M, E, self.length))
# print(F,M,E)
nsROH += M
ns += M
self.nullavg = nsROH / (len(self.nullratio) + 1)
self.ns = ns
self.nullratio = np.array(self.nullratio).reshape(-1, 1)
if len(self.nullratio) > 10:
self.gmm = mixture.BayesianGaussianMixture(
n_components=3, covariance_type='spherical').fit(self.nullratio)
# def Ttest(self,sample):
# testlist=[self.count[x]['Ratio'] for x in sample if self.count[x]['Ratio']!='ROH']
# if len(self.nullratio)<10 or np.std(self.nullratio)==0:
# return 'nan',"ROHregion"
# elif len(testlist)==0:
# return 'nan',"ROH"
# elif len(testlist)==1:
# stat=testlist[0]
# if stat=="ROH":
# return 'nan',"ROH"
# else:
# stat1=(stat-np.mean(self.nullratio))/(np.std(self.nullratio))
# ans=stats.norm.cdf(stat1)
# return 10**-stat,ans
# else:
# tstat,pvalue=stats.ttest_ind(testlist,self.nullratio)
# mean=np.mean([10**-x for x in testlist])
# if tstat<0:
# return mean,pvalue
# else:
# return mean,1-pvalue
# def Ttest(self,sample):
# testlist=[self.count[x]['Ratio'] for x in sample if self.count[x]['Ratio']!='ROH']
# if len(self.nullratio)<10 or max(self.nullratio)-min(self.nullratio)<0.0001:
# return 'nan',"ROHregion"
# elif len(testlist)==0:
# return 'nan',"ROH"
# elif len(testlist)>len(self.nullratio) or self.ns<10:
# return 'nan',"Potential ROHregion or reference error"
# elif len(testlist)==1:
# stat=testlist[0]
# if stat=="ROH":
# return 'nan',"ROH"
# else:
# _,ans=stats.mannwhitneyu(testlist, self.nullratio, use_continuity=False,alternative='less')
# return 10**-stat,ans
# else:
# _,pvalue=stats.mannwhitneyu(testlist, self.nullratio, use_continuity=False, alternative='less')
# mean=np.mean([10**-x for x in testlist])
# return mean,pvalue
def Ttest(self, sample):
testlist = [self.count[x]['Ratio']
for x in sample if self.count[x]['Ratio'] != 'ROH']
if len(self.nullratio) <= 10 or max(self.nullratio) - min(self.nullratio) < 0.0001:
return 'nan', "ROHregion"
elif len(testlist) == 0:
return 'nan', "ROH"
elif len(testlist) > len(self.nullratio) or self.ns < 10:
return 'nan', "Potential ROHregion or reference error"
elif len(testlist) == 1:
stat = testlist[0]
if stat == "ROH":
return 'nan', "ROH"
else:
# stat1=(stat-np.mean(self.nullratio))/(np.std(self.nullratio))
# gmm = mixture.BayesianGaussianMixture(n_components=3, covariance_type='spherical').fit(a.reshape(-1,1))
# ans=stats.norm.cdf(stat1)
ans = self.gmm.score(stat)
return 10**-stat, ans
else:
# tstat,pvalue=stats.ttest_ind(testlist,self.nullratio)
# _,pvalue=stats.mannwhitneyu(testlist, self.nullratio, use_continuity=False, alternative='less')
ans = self.gmm.score(np.array(testlist).reshape(-1, 1))
mean = np.mean([10**-x for x in testlist])
return mean, ans
# if tstat<0:
# return mean,pvalue
# else:
# return mean,1-pvalue
def stats(self, sample):
nsnp = 0
for x in sample:
nsnp += self.count[x]['M']
testlist = [self.count[x]['Ratio']
for x in sample if self.count[x]['Ratio'] != 'ROH']
nsamplenullratio = len(self.nullratio)
nonrohsample = len(testlist)
nsample = len(sample)
nnorm = len(self.count.keys()) - nsample
return str(nsnp) + ',' + str(self.ns) + '\t' + str(nonrohsample) + ',' + str(nsample) + '\t' + str(self.nullavg) + ',' + str(nsamplenullratio) + ',' + str(nnorm)
# return Deltest(self.count[sample]['F'],self.count[sample]['M'],self.count[sample]['E'],self.length)
# with open(self.regionfile,'r') as f:
# for line in f:
# dat=line.rstrip().split("\t")
# hom=int(dat[3])
# he=int(dat[4])
# if dat[-1]==sample:
# homo=hom
# het=he
# oddsratio, pvalue =stats.fisher_exact([[self.count[sample][0],s
class KS2sample:
def __init__(self, obj, probands):
self.obj = obj
self.probands = probands
self.controlst = []
self.dct = {}
# if not os.path.isfile(self.regionfile):
# raise ValueError("file not found")
if obj.shape[0] == 0:
self.mean = ''
for index, row in self.obj.iterrows():
if row['sample'] not in probands:
self.controlst.append(row['baf'])
else:
if row['sample'] not in self.dct.keys():
self.dct[row['sample']] = [row['baf']]
else:
self.dct[row['sample']].append(row['baf'])
# self.mean=np.mean(self.controlst)##
# self.sd=np.std(self.controlst)##
# print(self.mean,self.sd)
def test(self, samples):
testset = []
for sample in samples:
if sample in self.dct.keys():
testset += self.dct[sample]
if len(testset) < 1:
return 'nan', "lowSNPs"
elif len(self.controlst) < 1:
return 'nan', "noBG"
else:
# testset=[(x-self.mean)/self.sd for x in testset]
ks = stats.ks_2samp(testset, self.controlst)
# ks=stats.kstest(testset,'norm')
return ks
#############
# import sys
# [_,txt,het,chr,start,end,cnvid,sample,type]=sys.argv
# samplelst=sample.split(",")
# Del=DeletionTest(het,samplelst,int(end)-int(start))
# delp=Del.Ttest(samplelst)
# KS=KS2sample(txt,samplelst)
# ksp=KS.test(samplelst)
# stats=Del.stats(samplelst)
# print(chr+'\t'+start+'\t'+end+'\t'+cnvid+'\t'+sample+'\t'+type+'\t'+str(delp)+"\t"+str(ksp)+'\t'+stats)
|
import json
from datetime import datetime
from flask import Flask, session
from wtforms import BooleanField, DateField, Form, RadioField, SelectField, SelectMultipleField, StringField, TextAreaField, ValidationError
from wtforms.csrf.session import SessionCSRF
from wtforms.validators import data_required, input_required, optional, regexp
from wtforms.widgets import CheckboxInput, ListWidget
from odp.lib.formats import DOI_REGEX, SID_REGEX
from odp.lib.hydra import GrantType, ResponseType, TokenEndpointAuthMethod
def init_app(app: Flask):
BaseForm.Meta.csrf_secret = bytes(app.config['SECRET_KEY'], 'utf-8')
def json_object(form, field):
"""A JSONTextField validator that ensures the value is a JSON object."""
try:
obj = json.loads(field.data)
if not isinstance(obj, dict):
raise ValidationError('The value must be a JSON object.')
except json.JSONDecodeError:
raise ValidationError('Invalid JSON')
class JSONTextField(TextAreaField):
def process_data(self, value):
self.data = json.dumps(value, indent=4, ensure_ascii=False)
class MultiCheckboxField(SelectMultipleField):
widget = ListWidget(prefix_label=False)
option_widget = CheckboxInput()
class StringListField(TextAreaField):
def process_data(self, value):
self.data = '\n'.join(value) if value is not None else None
class DateStringField(DateField):
def process_data(self, value):
self.data = datetime.strptime(value, '%Y-%m-%d') if value is not None else None
class BaseForm(Form):
class Meta:
csrf = True
csrf_class = SessionCSRF
@property
def csrf_context(self):
return session
class ClientForm(BaseForm):
id = StringField(
label='Client id',
filters=[lambda s: s.strip() if s else s],
validators=[data_required()],
)
name = StringField(
label='Client name',
validators=[data_required()],
)
secret = StringField(
label='Client secret',
)
provider_id = SelectField(
label='Provider',
)
scope_ids = MultiCheckboxField(
label='Scope',
)
grant_types = MultiCheckboxField(
label='Grant types',
choices=[(gt.value, gt.value) for gt in GrantType],
)
response_types = MultiCheckboxField(
label='Response types',
choices=[(rt.value, rt.value) for rt in ResponseType],
)
redirect_uris = StringListField(
label='Redirect URIs',
)
post_logout_redirect_uris = StringListField(
label='Post-logout redirect URIs',
)
token_endpoint_auth_method = RadioField(
label='Token endpoint auth method',
choices=[(tm.value, tm.value) for tm in TokenEndpointAuthMethod],
default=TokenEndpointAuthMethod.CLIENT_SECRET_BASIC.value,
)
allowed_cors_origins = StringListField(
label='Allowed CORS origins',
)
def validate_secret(self, field):
if field.data and len(field.data) < 6:
raise ValidationError('Client secret must be at least 6 characters long.')
def validate_scope_ids(self, field):
if not field.data:
raise ValidationError('At least one scope must be selected.')
class CollectionForm(BaseForm):
id = StringField(
label='Collection id',
filters=[lambda s: s.strip() if s else s],
validators=[data_required()],
)
name = StringField(
label='Collection name',
validators=[data_required()],
)
provider_id = SelectField(
label='Provider',
validators=[input_required()],
)
doi_key = StringField(
label='DOI key',
)
class ProjectForm(BaseForm):
id = StringField(
label='Project id',
filters=[lambda s: s.strip() if s else s],
validators=[data_required()],
)
name = StringField(
label='Project name',
validators=[data_required()],
)
collection_ids = MultiCheckboxField(
label='Collections',
)
class ProviderForm(BaseForm):
id = StringField(
label='Provider id',
filters=[lambda s: s.strip() if s else s],
validators=[data_required()],
)
name = StringField(
label='Provider name',
validators=[data_required()],
)
class RecordForm(BaseForm):
id = StringField(
label='Record id',
render_kw={'readonly': ''},
)
doi = StringField(
label='DOI (Digital Object Identifier)',
validators=[regexp('^$|' + DOI_REGEX)],
)
sid = StringField(
label='SID (Secondary Identifier)',
validators=[regexp('^$|' + SID_REGEX)],
)
collection_id = SelectField(
label='Collection',
validators=[input_required()],
)
schema_id = SelectField(
label='Schema',
validators=[input_required()],
)
metadata = JSONTextField(
label='Metadata',
validators=[input_required(), json_object],
render_kw={'rows': 24},
)
def validate_sid(self, field):
if not self.doi.data and not field.data:
raise ValidationError('SID is required if there is no DOI.')
class RecordFilterForm(BaseForm):
collection = MultiCheckboxField(
label='Filter by collection(s)',
)
class RecordTagQCForm(BaseForm):
pass_ = BooleanField(
label='Pass',
)
comment = StringField(
label='Comment',
)
class RecordTagEmbargoForm(BaseForm):
start = DateStringField(
label='Start date',
validators=[optional()],
)
end = DateStringField(
label='End date',
validators=[optional()],
)
comment = StringField(
label='Comment',
)
class RoleForm(BaseForm):
id = StringField(
label='Role id',
filters=[lambda s: s.strip() if s else s],
validators=[data_required()],
)
provider_id = SelectField(
label='Provider',
)
scope_ids = MultiCheckboxField(
label='Scope',
)
class UserForm(BaseForm):
id = StringField(
label='User id',
render_kw={'readonly': ''},
)
email = StringField(
label='Email',
render_kw={'readonly': ''},
)
name = StringField(
label='Name',
render_kw={'readonly': ''},
)
active = BooleanField(
label='Active',
)
role_ids = MultiCheckboxField(
label='Roles',
)
|
import pyvisa
from pyvisa import VisaIOError
from typing import List, Tuple
# Globals
rm = pyvisa.ResourceManager()
# Utility Functions
def get_devices_addresses() -> Tuple[str]:
"""
returns a list of the addresses of peripherals connected to the computer
"""
return rm.list_resources()
def identify_devices(verbose: bool = False) -> List[Tuple[str]]:
"""
identify_connections(verbose=False)
Queries devices connected to the machine for with and IDN query, return
those with a valid response response. The IDN query is a IEEE 488.2 Common
Command and should be supported by all SCPI compatible instruments.
Args:
verbose (bool, optional): if True device addresses and responses, or,
lack thereof are printed to stdout as they are queried. Defaults to
False.
Returns:
List[Tuple[str]]: A list of tuples containing address, IDN response
pairs for each detected device that responded to the query with a
valid response.
"""
scpi_devices = []
for address in rm.list_resources():
try:
device = Scpi_Instrument(address, open_timeout=100,
timeout=500)
scpi_devices.append((address, device.idn))
if verbose:
print("address: {}\nresponse: {}\n".format(*scpi_devices[-1]))
except pyvisa.Error:
if verbose:
print(f"Invalid IDN query reponse from address {address}\n")
finally:
del(device)
return scpi_devices
class Scpi_Instrument():
def __init__(self, address: str, **kwargs) -> None:
self.address = address
open_timeout = int(kwargs.get('open_timeout', 1000))
self.instrument = rm.open_resource(self.address,
open_timeout=open_timeout)
self.timeout = int(kwargs.get('timeout', 1000)) # ms
@property
def idn(self) -> str:
"""
idn
Identify Query
Returns a string that uniquely identifies the instrument. The IDN query
sent to the instrument is one of the IEEE 488.2 Common Commands and
should be supported by all SCPI compatible instruments.
Returns:
str: uniquely identifies the instrument
"""
return self.instrument.query('*IDN?')
def cls(self, **kwargs) -> None:
"""
cls(**kwargs)
Clear Status Command
Clears the instrument status byte by emptying the error queue and
clearing all event registers. Also cancels any preceding *OPC command
or query. The CLS command sent to the instrument is one of the
IEEE 488.2 Common Commands and should be supported by all SCPI
compatible instruments.
Returns:
None
"""
self.instrument.write('*CLS', **kwargs)
def rst(self, **kwargs) -> None:
"""
rst()
Reset Command
Executes a device reset and cancels any pending *OPC command or query.
The RST command sent to the instrument is one of the IEEE 488.2 Common
Commands and should be supported by all SCPI compatible instruments.
"""
self.instrument.write('*RST', **kwargs)
@property
def timeout(self) -> int:
return self.instrument.timeout
@timeout.setter
def timeout(self, timeout: int) -> None:
self.instrument.timeout = int(timeout) # ms
def __del__(self) -> None:
try:
# if connection has been estabilished terminate it
if hasattr(self, 'instrument'):
self.instrument.close()
except VisaIOError:
# if connection not connection has been estabilished (such as if an
# error is throw in __init__) do nothing
pass
def __repr__(self) -> str:
def_str = f"{self.__class__.__name__}"
def_str += f"({self.address}, timeout={self.timeout})"
return def_str
def __str__(self) -> str:
return f'Instrument ID: {self.idn}\nAddress: {self.address}'
def __eq__(self, obj) -> bool:
"""
__eq__(obj)
Args:
obj (object): object to compare
Returns:
bool: True if the objects are both instances of Scpi_Instrument
(or any class that inherits from Scpi_Instrument) and have the
same address and class name. Otherwise False.
"""
if not isinstance(obj, Scpi_Instrument):
return False
if not (self.__class__.__name__ == obj.__class__.__name__):
return False
if not (self.address == obj.address):
return False
return True
def __ne__(self, obj) -> bool:
"""
__ne__(obj)
Args:
obj (object): object to compare
Returns:
bool: whether or not to object are not equal. Defined as the
inverse of the result from __eq__
"""
return not self.__eq__(obj)
def send_raw_scpi(self, command_str: str, **kwargs) -> None:
"""
send_raw_scpi(command_str, **kwargs)
Pass-through function which forwards the contents of 'command_str' to
the device. This function is intended to be used for API calls for
functionally that is not currently supported. Can only be used for
commands, will not return queries.
Args:
command_str: string, scpi command to be passed through to the
device.
"""
self.instrument.write(str(command_str), **kwargs)
def query_raw_scpi(self, query_str: str, **kwargs) -> str:
"""
query_raw_scpi(query, **kwargs)
Pass-through function which forwards the contents of 'query_str' to
the device, returning the response without any processing. This
function is intended to be used for API calls for functionally that is
not currently supported. Only to be used for queries.
Args:
query_str: string, scpi query to be passed through to the device.
"""
return self.instrument.query(str(query_str), **kwargs)
def read_raw_scpi(self, **kwargs) -> str:
"""
read_raw_scpi(**kwargs)
Pass-through function which reads the device, returning the response
without any processing. This function is intended to be used for API
calls for functionally that is not currently supported.
Only to be used for read.
"""
return self.instrument.read(**kwargs)
if __name__ == "__main__":
pass
|
<gh_stars>0
#!/usr/bin/python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the PyMVPA package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""
attempt for simple alphasim based on simple 2nd level residuals (a la SPM)
uses SUMA's SurfClust, SurfFWHM, and a couple of other AFNI programs
Note: this method has not been validated properly yet.
NNO Oct 2012
"""
import os
from os.path import join as pathjoin
import fnmatch
import datetime
import re
import argparse
import math
from mvpa2.support.afni import afni_utils as utils
def _fn(config, infix, ext=None):
"""Returns a file name with a particular infix"""
if ext is None:
ext = _ext(config)
return "./%s%s%s" % (config["prefix"], infix, ext)
def _is_surf(config):
"""Returns True iff we are on the surface"""
return "surface_file" in config and config["surface_file"]
def _ext(config, for1D=False):
"""Returns the extension for a file name"""
if _is_surf(config):
return ".1D.dset" if for1D else ".niml.dset"
else:
fn = config["data_files"][0]
return "".join(utils.afni_fileparts(fn)[2:])
def _mask_expr(config):
"""returns an expression that can be used as an infix in
running other commands (depending on whether in volume or on surface)"""
m = config["mask"]
if not m:
return ""
else:
if _is_surf(config):
return "-b_mask %s" % m
else:
return "-mask %s" % m
def compute_fwhm(config):
# helper function - called by critical_clustersize
# computes FWHM of residuals of input data and stores in config
output_dir = c["output_dir"]
is_surf = _is_surf(config)
ext, ext1D = _ext(config), _ext(config, for1D=True)
if not os.path.exists(output_dir):
os.mkdir(output_dir)
cmds = ['cd "%s"' % output_dir]
# if surfaces and needs padding, do that first
pad_to_node = config["pad_to_node"]
if is_surf and pad_to_node:
data_files = []
for i, fn in enumerate(c["data_files"]):
fn_pad = "pad_%d%s" % (i, ext)
cmds.append(
"; ConvertDset -overwrite -pad_to_node %d -input %s'[%d]' -prefix ./%s"
% (pad_to_node, fn, config["brik_index"], fn_pad)
)
data_files.append(fn_pad)
pad_files = data_files
brik_index = 0
else:
data_files = c["data_files"]
pad_files = []
brik_index = c["brik_index"]
# bucket data from all participants into a single file
buck_fn = _fn(config, "buck")
cmds.append("; 3dbucket -overwrite -prefix %s" % buck_fn)
for fn in data_files:
cmds.append(" %s'[%d]'" % (fn, brik_index))
# also store as 1D (won't hurt)
if is_surf:
buck_fn_1D = _fn(config, "buck", ext1D)
cmds.append(
"; ConvertDset -overwrite -o_1D -prefix %s -input %s"
% (buck_fn_1D, buck_fn)
)
else:
buck_fn_1D = buck_fn
# compute group mean
mean_fn = _fn(config, "mean")
cmds.append("; 3dTstat -overwrite -prefix %s %s" % (mean_fn, buck_fn))
# compute residuals, and estimate FWHM for each of them
# store FWHM output in fwhm_fn
fwhm_fn = pathjoin(output_dir, _fn(config, "fwhm", ".1D"))
cmds.append('; echo > "%s"' % fwhm_fn)
resid_fns = []
for i in range(len(c["data_files"])):
fn = _fn(config, "resid_%d" % i)
cmds.append(
"; 3dcalc -overwrite -prefix %s -a %s -b %s'[%d]' -expr 'a-b'"
% (fn, mean_fn, buck_fn, i)
)
msk = _mask_expr(config)
if is_surf:
surf_fn = c["surface_file"]
cmds.append(
"; SurfFWHM %s -input %s -i_fs %s"
"| grep ^FWHM | cut -f2 -d'=' >> '%s'" % (msk, fn, surf_fn, fwhm_fn)
)
else:
cmds.append("; 3dFWHMx %s %s | cut -c18- >> %s" % (msk, fn, fwhm_fn))
resid_fns.append(fn)
cmd = "".join(cmds)
utils.run_cmds(cmd)
# read FWHM values and store in config
with open(fwhm_fn) as f:
fwhms = f.read().split()
print(fwhms)
print(fwhm_fn)
config["all_fwhms"] = fwhms # all FWHMs (for each participant)
config["fwhm"] = sum(map(float, fwhms)) / len(fwhms) # average FWHM
config["buck_fn"] = buck_fn
config["buck_fn_1D"] = buck_fn_1D
mean_fwhm_fn = pathjoin(output_dir, _fn(config, "mean_fwhm", ".1D"))
with open(mean_fwhm_fn, "w") as f:
f.write("%.3f\n" % config["fwhm"])
tmpfns = resid_fns + pad_files + [mean_fn]
print("TEMP")
print(tmpfns)
_remove_files(config, tmpfns)
def null_clustersize(config):
# helper function - called by critical_clustersize
# computes maxmimum cluster size of a single null permutation
output_dir = config["output_dir"]
tthr = config["tthr"]
fwhm = config["fwhm"]
buck_fn_1D = config["buck_fn_1D"]
msk = _mask_expr(config)
is_surf = _is_surf(config)
if is_surf:
surf_fn = config["surface_file"]
ext, ext1D = _ext(config), _ext(config, for1D=True)
ns = len(config["data_files"])
cmds = ['cd "%s"' % output_dir]
# generate N random data files (N=number of participants)
# use the output bucket to get datasets with the right size
null_fns = []
for i in range(ns):
fn = _fn(config, "rand_%d" % i, ext1D)
if is_surf:
cmds.append(
"; 1deval -ok_1D_text -a %s'[0]' -expr 'gran(0,1)' > '%s'"
% (buck_fn_1D, fn)
)
else:
cmds.append(
"; 3dcalc -overwrite -prefix %s -a %s'[0]' -expr 'gran(0,1)'"
% (fn, buck_fn_1D)
)
null_fns.append(fn)
# bucket random data
buck_fn = _fn(config, "rand_buck", ext1D)
null_fns_list = " ".join(null_fns)
if is_surf:
cmds.append('; 1dcat %s > "%s"' % (null_fns_list, buck_fn))
else:
cmds.append("; 3dbucket -overwrite -prefix %s %s" % (buck_fn, null_fns_list))
# smooth all data at once, using estimated FWHM
smooth_fn = _fn(config, "rand_buck_smooth", ext1D)
if is_surf:
if config["sigma"] > 0:
sigma_str = "-sigma %s" % config["sigma"]
else:
sigma_str = ""
cmds.append(
"; SurfSmooth -overwrite %s -met HEAT_07 -i_fs %s -input %s "
" -fwhm %f -output %s %s"
% (msk, surf_fn, buck_fn, fwhm, smooth_fn, sigma_str)
)
else:
cmds.append(
"; 3dBlurInMask -overwrite %s -FWHM %f -prefix %s -input %s"
% (msk, fwhm, smooth_fn, buck_fn)
)
# run ttest
if is_surf:
msk = "" # cannot use mask on surface, but that's fine
# as it was used in SurfSmooth
ttest_fn = _fn(config, "rand_buck_smooth_t", ext1D)
cmds.append(
"; 3dttest++ %s -overwrite -prefix %s -setA %s" % (msk, ttest_fn, smooth_fn)
)
# extract maximum cluster size (in mm^2 or number of voxels) from output
# and pipe into size_fn
size_fn = _fn(config, "rand_size", ".1D")
if is_surf:
postfix = "| grep --invert-match '#' | head -1 | cut -c 18-28"
cmds.append(
"; SurfClust -i_fs %s -input %s 1 -rmm -1 "
' -thresh %f -thresh_col 1 %s > "%s"'
% (surf_fn, ttest_fn, tthr, postfix, size_fn)
)
else:
postfix = " | grep --invert-match '#' | head -1 | cut -c1-8"
cmds.append(
"; 3dclust -quiet -1noneg -1clip %f 0 0 %s'[1]' %s > '%s'"
% (tthr, ttest_fn, postfix, size_fn)
)
utils.run_cmds("".join(cmds))
# read maximum cluster size form size_fn
sz_str = None
with open(pathjoin(output_dir, size_fn)) as f:
sz_str = f.read()
try:
sz = float(sz_str)
except:
sz = 0.0 # CHECKME whether this makes sense
print("Null data: maximum size %f" % sz)
if is_surf:
smoothing_fn_rec = pathjoin(
output_dir, _fn(config, "rand_buck_smooth", ".1D.dset.1D.smrec")
)
if not os.path.exists(smoothing_fn_rec):
raise ValueError(
"Smoothing did not succeed. Please check the error"
" messaged. You may have to set sigma manually"
)
with open(smoothing_fn_rec) as f:
s = f.read()
final_fwhm = float(s.split()[-2])
ratio = fwhm / final_fwhm
thr = 0.9
if ratio < thr or 1.0 / ratio < thr:
raise ValueError(
"FWHM converged to %s but expected %s. Consider "
"setting sigma manually" % (final_fwhm, fwhm)
)
# clean up - remove all temporary files
tmpfns = null_fns + [buck_fn, smooth_fn, ttest_fn, size_fn]
_remove_files(config, tmpfns)
return sz
def _remove_files(config, list_of_files):
# removes a list of files, if config allows for it
# in the case of AFNI volume files, it removes HEAD and BRIK files
if not config["keep_files"]:
for fn in list_of_files:
fp = utils.afni_fileparts(fn)
if fp[2]:
# AFNI HEAD/BRIK combination
# ensure we delete all of them
exts = [".HEAD", ".BRIK", ".BRIK.gz"]
fn = "".join(fp[1:3])
else:
exts = [""]
for ext in exts:
full_fn = pathjoin(config["output_dir"], fn + ext)
if os.path.exists(full_fn):
os.remove(full_fn)
def critical_clustersize(config):
"""computes the critical cluster sizes
it does so by calling compute_fwhm and null_clustersize
config['max_size'] is a list with he maximum cluster size of
each iteration"""
compute_fwhm(config)
# this takes a long time
niter = config["niter"]
sz = []
for i in range(niter):
sz.append(null_clustersize(config))
print("Completed null iteration %d / %d" % (i + 1, niter))
config["max_size"] = sz
# store the results in a file
clsize_fn = _fn(config, "critical_cluster_size", ".1D")
with open(pathjoin(config["output_dir"], clsize_fn), "w") as f:
f.write(
"# Critical sizes for tthr=%.3f, fwhm=%.3f, %d files\n"
% (config["tthr"], config["fwhm"], len(config["data_files"]))
)
for s in sz:
f.write("%.5f\n" % s)
return sz
def _critical_size_index(config):
"""computes the index of the critical cluster size
(assuming that these sizes are sorted)"""
pthr = config["pthr"]
nsize = config["niter"]
idx = math.ceil((1.0 - pthr) * nsize) # index of critical cluster size
if idx >= nsize or idx == 0:
raise ValueError(
"Illegal critical index (p=%s): %s; "
"consider increasing --niter" % (pthr, idx)
)
return int(idx)
def apply_clustersize(config):
# applies the critical cluster size to the original data
#
# assumes that critical_clustersize(config) has been run
output_dir = config["output_dir"]
pthr = config["pthr"]
tthr = config["tthr"]
niter = config["niter"]
buck_fn_1D = config["buck_fn_1D"]
is_surf = _is_surf(config)
if is_surf:
surf_fn = config["surface_file"]
cmds = ['cd "%s"' % output_dir]
# run ttest on original data
infix = "ttest_t%(tthr)s" % config
ttest_fn = _fn(config, infix)
msk = _mask_expr(config)
# NOTE: for surfaces, apply mask below (SurfClust)
# but in volumes, apply it here
if is_surf:
cmds.append(
"; 3dttest++ -ok_1D_text -overwrite -prefix %s -setA %s"
% (ttest_fn, buck_fn_1D)
)
else:
cmds.append(
"; 3dttest++ %s -overwrite -prefix %s -setA %s"
% (msk, ttest_fn, buck_fn_1D)
)
# sort cluster sizes
clsize = list(config["max_size"])
clsize.sort()
# get critical cluster size
idx = _critical_size_index(config)
critical_size = clsize[idx]
print("critical size %s (p=%s)" % (critical_size, pthr))
# apply critical size to t-test of original data
infix += "_clustp%s_%dit" % (pthr, niter)
if not is_surf:
# for surfaces the size is included in the filename automatically
infix += "_%svx" % critical_size
# set file names
dset_out = _fn(config, infix)
log_out = _fn(config, infix, ".txt")
if is_surf:
cmds.append(
"; SurfClust %s -i_fs %s -input %s 1 -rmm -1 "
" -thresh %f -thresh_col 1 -amm2 %f -out_clusterdset -prefix %s > %s"
% (msk, surf_fn, ttest_fn, tthr, critical_size, dset_out, log_out)
)
else:
dset_out_msk = _fn(config, infix + "_msk")
cmds.append(
"; 3dclust -overwrite -1noneg -1clip %f "
" -prefix %s -savemask %s 0 -%f %s'[1]' > %s"
% (tthr, dset_out, dset_out_msk, critical_size, ttest_fn, log_out)
)
cmd = "".join(cmds)
utils.run_cmds(cmd)
def run_all(config):
"""main function to estimate critical cluster size
and apply to group t-test result"""
critical_clustersize(config)
apply_clustersize(config)
def get_testing_config():
# for testing only
in_vol = True
c = dict(mask="")
if in_vol:
d = "/Users/nick/organized/210_smoothness/afnidata/glm/"
sids = ["%02d" % i for i in range(1, 13)]
fn_pat = "glm_SUB%s_REML+tlrc.HEAD"
fns = ["%s/%s" % (d, fn_pat % sid) for sid in sids]
c["output_dir"] = "/Users/nick/organized/210_smoothness/_tst"
c["brik_index"] = 2
c["mask"] = d + "../../all/mask+tlrc.HEAD"
c["pad_to_node"] = None
else:
d = "/Users/nick/organized/212_raiders_fmri/ref"
sids = ["ab", "ag", "aw", "jt"]
fn = "cross_ico16-128_mh_100vx_8runs_t3.niml.dset"
fns = ["%s/%s/%s" % (d, sid, fn) for sid in sids]
surffn = "%s/%s/ico16_mh.intermediate_al.asc" % (d, sids[0])
c["output_dir"] = "/Users/nick//tmp/alphasim/"
c["surface_file"] = surffn
c["brik_index"] = 0
c["ext"] = ".niml.dset"
c["pad_to_node"] = 5124 - 1
c["data_files"] = fns
c["tthr"] = 4.4
c["niter"] = 1000
c["pthr"] = 0.05
c["prefix"] = "alphasim_"
c["keep_files"] = False
return c
def get_options():
description = """
Experimental implementation of alternative AlphaSim for volumes and surfaces.\n
Currently only supports group analysis with t-test against 0.
Input paramaters are an uncorrected t-test threshold (tthr)
and cluster-size corrected p-value (a.k.a. alpha level) (pthr).
This program takes the following steps:
(0) Participant's map are t-tested against zero, thresholded by tthr and clustered.
(1) residuals are computed by subtracting the group mean from each participants' map
(2) the average smoothness of these residual maps is estimated
(3) null maps are generated from random gaussian noise that is smoothened
with the estimated smoothness from step (2), thresholded by tthr, and clustered.
The maximum cluster size is stored for each null map.
(4) A cluster from (0) survive pthr if a smaller ratio than pthr of the
null maps generated in (3) have a maximum cluster size.
Copyright 2012 <NAME> <<EMAIL>>
"""
epilog = """This function is *experimental* and may delete files in output_dir or elsewhere.
As of Oct 2012 it has not been validated properly against other
approaches to correct for multiple comparisons. Also, it is slow."""
parser = argparse.ArgumentParser(description=description, epilog=epilog)
parser.add_argument(
"-d",
"--data_files",
required=True,
help=(
"Data input files (that are tested against " "0 with a one-sample t-test)"
),
nargs="+",
)
parser.add_argument("-o", "--output_dir", required=True, help="Output directory")
parser.add_argument(
"-s",
"--surface_file",
required=False,
help="Anatomical surface file (.asc); required" " for surface-based analysis",
default=None,
)
parser.add_argument(
"-t", "--tthr", required=True, help="t-test uncorrected threshold", type=float
)
parser.add_argument(
"-p",
"--pthr",
required=False,
help="p-value for corrected threshold",
type=float,
default=0.05,
)
parser.add_argument(
"-n",
"--niter",
required=False,
help="Number of iterations",
type=int,
default=1000,
)
parser.add_argument(
"-i",
"--brik_index",
required=False,
help="Brik index in input files",
type=int,
default=0,
)
parser.add_argument(
"-P",
"--prefix",
required=False,
help="Prefix for data output",
default="alphasim_",
)
parser.add_argument(
"-m", "--mask", required=False, help="Mask dataset", default=None
)
parser.add_argument(
"-k",
"--keep_files",
required=False,
help="Keep temporary files",
default=False,
action="store_true",
)
parser.add_argument(
"-N",
"--pad_to_node",
required=False,
help="pad_to_node (for surfaces)",
default=0,
type=int,
)
parser.add_argument(
"-S",
"--sigma",
required=False,
help=(
"sigma (smoothing bandwidth) for SurfSmooth. "
"If smoothing of surface "
"data takes a long time, "
"set this value to a value between 1 and "
"1.5 and see how many smoothing "
"iterations are performed. "
"Ideally, the number of "
"smoothing iterations should be between "
"10 and 20"
),
default=0.0,
type=float,
)
args = None
namespace = parser.parse_args(args)
return vars(namespace)
def fix_options(config):
# makes everything an absolute path
# also verifies a few input parameters
def f(x, check=True):
y = os.path.abspath(x)
if check and not os.path.exists(y):
raise ValueError("Not found: %s" % x)
return y
for i in range(len(config["data_files"])):
full_path = f(config["data_files"][i])
if _extension_indicates_surface_file(full_path) and not _is_surf(config):
raise ValueError(
"Input file %d (%s) indicates surface-based "
"input but '--surface_file' was not specified."
" To use this function for surface-based "
"analysis, supply an anatomical surface "
"file (preferably the intermediate surface "
"[=the node-wise average of the pial and "
"white surface], or alternatively, a pial "
"or white surface) using the --surface_file "
"option." % (i + 1, full_path)
)
config["data_files"][i] = f(config["data_files"][i])
config["output_dir"] = f(config["output_dir"], check=False)
if _is_surf(config):
config["surface_file"] = f(config["surface_file"])
if config["mask"]:
config["mask"] = f(config["mask"])
p = config["pthr"]
if p <= 0 or p >= 1:
raise ValueError("Require 0 < pthr < 1")
_critical_size_index(config) # raises error if p too small
def _extension_indicates_surface_file(fn):
surface_extensions = [".gii", ".niml.dset", ".1D", ".1D.dset"]
return any(fn.endswith(ext) for ext in surface_extensions)
if __name__ == "__main__":
# c = get_testing_config()
c = get_options()
fix_options(c)
run_all(c)
|
<filename>generate_feed.py<gh_stars>0
import json
import praw # to connect to reddit
from feedgen.feed import FeedGenerator # to generate the feed
from hackernews import HackerNews # to get entries from hackernews
import feedparser # to parse feeds
import numpy as np
import pandas as pd
from os.path import isfile
from datetime import datetime
from tzlocal import get_localzone
import re
from sys import argv
if len(argv) < 2:
raise ValueError("Config file is not specified!")
with open(argv[1]) as json_data:
config = json.load(json_data)
urls = []
titles = []
selftexts = []
times = []
# Go through subreddits list and get top/stickied posts
if len(config['subreddits']) > 0:
reddit_credentials = config['reddit_credentials']
reddit = praw.Reddit(client_id=reddit_credentials['client_id'],
client_secret=reddit_credentials['client_secret'],
password=reddit_credentials['password'],
user_agent='feed generator',
username=reddit_credentials['username'])
subreddits = config['subreddits']
min_comments = config['reddit_min_comments']
num_posts = config['reddit_num_posts']
# Get top posts
for subreddit in subreddits:
for submission in reddit.subreddit(subreddit).top(subreddits[subreddit], limit=num_posts):
if submission.num_comments > min_comments:
urls.append('https://www.reddit.com' + submission.permalink)
titles.append(subreddit + ' - ' + submission.title)
selftexts.append(submission.selftext)
# Get stickied posts
for subreddit in subreddits:
for submission in reddit.subreddit(subreddit).hot(limit=10):
if submission.stickied and submission.num_comments > min_comments:
urls.append('https://www.reddit.com' + submission.permalink)
titles.append(subreddit + ' - ' + submission.title)
selftexts.append(submission.selftext)
# Get feed entries
if len(config['feed_sources']) > 0:
feed_links = config['feed_sources']
pattern = re.compile("=(.*)$")
for one_feed in feed_links:
one_feed_parsed = feedparser.parse(one_feed)
for one_entry in one_feed_parsed.entries:
urls.append(pattern.search(one_entry.id).group(1))
titles.append(one_entry.title)
selftexts.append(one_entry.description)
# Going through the top hacker news items
if config['add_hn_entries']:
hn = HackerNews()
num_posts = config['hn_num_posts']
for story_id in hn.top_stories(limit=num_posts):
one_item = hn.get_item(story_id)
if one_item.item_type in ['poll', 'story'] and one_item.descendants >= 10:
urls.append('https://news.ycombinator.com/item?id=' + str(one_item.item_id))
titles.append(one_item.title)
selftexts.append('Article from HackerNews')
new_data = pd.DataFrame({
'url': urls,
'title': titles,
'selftext': selftexts,
'time': datetime.now()
})
new_data = new_data.drop_duplicates(subset='url', keep='first')
# Load cache and append new data to old
cache_path = config['cache_path']
if isfile(cache_path):
old_data = pd.read_pickle(cache_path)
new_urls = np.setdiff1d(new_data.url.values, old_data.url.values)
new_data = new_data[new_data['url'].isin(new_urls)]
full_data = old_data.append(new_data)
else:
full_data = new_data
# Keep only the latest n entries for the feed
full_data = full_data.sort_values(by='time', ascending=False)
for_feed = full_data.head(n=config['max_items_feed'])
# Generate feed
fg = FeedGenerator()
fg.id(config['save_path'])
fg.title(config['feed_name'])
fg.link(href=config['feed_url'], rel='self')
fg.language('en')
tz = get_localzone()
for url, title, selftext, timestamp in zip(for_feed.url, for_feed.title, for_feed.selftext, for_feed.time):
fe = fg.add_entry()
fe.id(url)
fe.link({"href": url})
fe.title(title)
fe.content(content=selftext, type='html')
fe.published(tz.localize(timestamp))
fe.updated(tz.localize(timestamp))
# Save the feed
fg.atom_file(config['save_path'])
# Cache the data
full_data.to_pickle(config['cache_path'])
print('ran successfully at ' + str(datetime.now()))
|
<gh_stars>10-100
# Copyright (c) 2012 NetApp, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the NFS driver module."""
import ddt
import errno
import os
import mock
from oslo_utils import units
from cinder import context
from cinder import exception
from cinder.image import image_utils
from cinder import test
from cinder.tests.unit import fake_volume
from cinder.volume import configuration as conf
from cinder.volume.drivers import nfs
from cinder.volume.drivers import remotefs
class RemoteFsDriverTestCase(test.TestCase):
TEST_FILE_NAME = 'test.txt'
TEST_EXPORT = 'nas-host1:/export'
TEST_MNT_POINT = '/mnt/nas'
def setUp(self):
super(RemoteFsDriverTestCase, self).setUp()
self._driver = remotefs.RemoteFSDriver()
self.configuration = mock.Mock(conf.Configuration)
self.configuration.append_config_values(mock.ANY)
self.configuration.nas_secure_file_permissions = 'false'
self.configuration.nas_secure_file_operations = 'false'
self.configuration.max_over_subscription_ratio = 1.0
self.configuration.reserved_percentage = 5
self._driver = remotefs.RemoteFSDriver(
configuration=self.configuration)
mock_exc = mock.patch.object(self._driver, '_execute')
self._execute = mock_exc.start()
self.addCleanup(mock_exc.stop)
def test_create_sparsed_file(self):
self._driver._create_sparsed_file('/path', 1)
self._execute.assert_called_once_with('truncate', '-s', '1G',
'/path', run_as_root=True)
def test_create_regular_file(self):
self._driver._create_regular_file('/path', 1)
self._execute.assert_called_once_with('dd', 'if=/dev/zero',
'of=/path', 'bs=1M',
'count=1024', run_as_root=True)
def test_create_qcow2_file(self):
file_size = 1
self._driver._create_qcow2_file('/path', file_size)
self._execute.assert_called_once_with('qemu-img', 'create', '-f',
'qcow2', '-o',
'preallocation=metadata',
'/path', '%s' %
str(file_size * units.Gi),
run_as_root=True)
def test_set_rw_permissions_for_all(self):
self._driver._set_rw_permissions_for_all('/path')
self._execute.assert_called_once_with('chmod', 'ugo+rw', '/path',
run_as_root=True)
@mock.patch.object(remotefs, 'LOG')
def test_set_rw_permissions_with_secure_file_permissions(self, LOG):
self._driver._mounted_shares = [self.TEST_EXPORT]
self.configuration.nas_secure_file_permissions = 'true'
self._driver._set_rw_permissions(self.TEST_FILE_NAME)
self.assertFalse(LOG.warning.called)
@mock.patch.object(remotefs, 'LOG')
def test_set_rw_permissions_without_secure_file_permissions(self, LOG):
self.configuration.nas_secure_file_permissions = 'false'
self._driver._set_rw_permissions(self.TEST_FILE_NAME)
self.assertTrue(LOG.warning.called)
warn_msg = "%(path)s is being set with open permissions: %(perm)s"
LOG.warning.assert_called_once_with(
warn_msg, {'path': self.TEST_FILE_NAME, 'perm': 'ugo+rw'})
@mock.patch('os.path.join')
@mock.patch('os.path.isfile', return_value=False)
def test_determine_nas_security_options_when_auto_and_new_install(
self,
mock_isfile,
mock_join):
"""Test the setting of the NAS Security Option
In this test case, we will create the marker file. No pre-exxisting
Cinder volumes found during bootup.
"""
self._driver._mounted_shares = [self.TEST_EXPORT]
file_path = '%s/.cinderSecureEnvIndicator' % self.TEST_MNT_POINT
is_new_install = True
self._driver._ensure_shares_mounted = mock.Mock()
nas_mount = self._driver._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
mock_join.return_value = file_path
secure_file_permissions = 'auto'
nas_option = self._driver._determine_nas_security_option_setting(
secure_file_permissions,
nas_mount, is_new_install)
self.assertEqual('true', nas_option)
secure_file_operations = 'auto'
nas_option = self._driver._determine_nas_security_option_setting(
secure_file_operations,
nas_mount, is_new_install)
self.assertEqual('true', nas_option)
@mock.patch('os.path.join')
@mock.patch('os.path.isfile')
def test_determine_nas_security_options_when_auto_and_new_install_exists(
self,
isfile,
join):
"""Test the setting of the NAS Security Option
In this test case, the marker file already exists. Cinder volumes
found during bootup.
"""
drv = self._driver
drv._mounted_shares = [self.TEST_EXPORT]
file_path = '%s/.cinderSecureEnvIndicator' % self.TEST_MNT_POINT
is_new_install = False
drv._ensure_shares_mounted = mock.Mock()
nas_mount = drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
join.return_value = file_path
isfile.return_value = True
secure_file_permissions = 'auto'
nas_option = drv._determine_nas_security_option_setting(
secure_file_permissions,
nas_mount, is_new_install)
self.assertEqual('true', nas_option)
secure_file_operations = 'auto'
nas_option = drv._determine_nas_security_option_setting(
secure_file_operations,
nas_mount, is_new_install)
self.assertEqual('true', nas_option)
@mock.patch('os.path.join')
@mock.patch('os.path.isfile')
def test_determine_nas_security_options_when_auto_and_old_install(self,
isfile,
join):
"""Test the setting of the NAS Security Option
In this test case, the marker file does not exist. There are also
pre-existing Cinder volumes.
"""
drv = self._driver
drv._mounted_shares = [self.TEST_EXPORT]
file_path = '%s/.cinderSecureEnvIndicator' % self.TEST_MNT_POINT
is_new_install = False
drv._ensure_shares_mounted = mock.Mock()
nas_mount = drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
join.return_value = file_path
isfile.return_value = False
secure_file_permissions = 'auto'
nas_option = drv._determine_nas_security_option_setting(
secure_file_permissions,
nas_mount, is_new_install)
self.assertEqual('false', nas_option)
secure_file_operations = 'auto'
nas_option = drv._determine_nas_security_option_setting(
secure_file_operations,
nas_mount, is_new_install)
self.assertEqual('false', nas_option)
def test_determine_nas_security_options_when_admin_set_true(self):
"""Test the setting of the NAS Security Option
In this test case, the Admin set the flag to 'true'.
"""
drv = self._driver
drv._mounted_shares = [self.TEST_EXPORT]
is_new_install = False
drv._ensure_shares_mounted = mock.Mock()
nas_mount = drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
secure_file_permissions = 'true'
nas_option = drv._determine_nas_security_option_setting(
secure_file_permissions,
nas_mount, is_new_install)
self.assertEqual('true', nas_option)
secure_file_operations = 'true'
nas_option = drv._determine_nas_security_option_setting(
secure_file_operations,
nas_mount, is_new_install)
self.assertEqual('true', nas_option)
def test_determine_nas_security_options_when_admin_set_false(self):
"""Test the setting of the NAS Security Option
In this test case, the Admin set the flag to 'false'.
"""
drv = self._driver
drv._mounted_shares = [self.TEST_EXPORT]
is_new_install = False
drv._ensure_shares_mounted = mock.Mock()
nas_mount = drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
secure_file_permissions = 'false'
nas_option = drv._determine_nas_security_option_setting(
secure_file_permissions,
nas_mount, is_new_install)
self.assertEqual('false', nas_option)
secure_file_operations = 'false'
nas_option = drv._determine_nas_security_option_setting(
secure_file_operations,
nas_mount, is_new_install)
self.assertEqual('false', nas_option)
@mock.patch.object(remotefs, 'LOG')
def test_set_nas_security_options(self, LOG):
"""Test setting of NAS Security options.
The RemoteFS driver will force set options to false. The derived
objects will provide an inherited interface to properly set options.
"""
drv = self._driver
is_new_install = False
drv.set_nas_security_options(is_new_install)
self.assertEqual('false', drv.configuration.nas_secure_file_operations)
self.assertEqual('false',
drv.configuration.nas_secure_file_permissions)
self.assertTrue(LOG.warning.called)
def test_secure_file_operations_enabled_true(self):
"""Test nas_secure_file_operations = 'true'
Networked file system based drivers may support secure file
operations. This test verifies the settings when secure.
"""
drv = self._driver
self.configuration.nas_secure_file_operations = 'true'
ret_flag = drv.secure_file_operations_enabled()
self.assertTrue(ret_flag)
def test_secure_file_operations_enabled_false(self):
"""Test nas_secure_file_operations = 'false'
Networked file system based drivers may support secure file
operations. This test verifies the settings when not secure.
"""
drv = self._driver
self.configuration.nas_secure_file_operations = 'false'
ret_flag = drv.secure_file_operations_enabled()
self.assertFalse(ret_flag)
@ddt.ddt
class NfsDriverTestCase(test.TestCase):
"""Test case for NFS driver."""
TEST_NFS_HOST = 'nfs-host1'
TEST_NFS_SHARE_PATH = '/export'
TEST_NFS_EXPORT1 = '%s:%s' % (TEST_NFS_HOST, TEST_NFS_SHARE_PATH)
TEST_NFS_EXPORT2 = 'nfs-host2:/export'
TEST_NFS_EXPORT2_OPTIONS = '-o intr'
TEST_SIZE_IN_GB = 1
TEST_MNT_POINT = '/mnt/nfs'
TEST_MNT_POINT_BASE_EXTRA_SLASH = '/opt/stack/data/cinder//mnt'
TEST_MNT_POINT_BASE = '/mnt/test'
TEST_LOCAL_PATH = '/mnt/nfs/volume-123'
TEST_FILE_NAME = 'test.txt'
TEST_SHARES_CONFIG_FILE = '/etc/cinder/test-shares.conf'
TEST_NFS_EXPORT_SPACES = 'nfs-host3:/export this'
TEST_MNT_POINT_SPACES = '/ 0 0 0 /foo'
def setUp(self):
super(NfsDriverTestCase, self).setUp()
self.configuration = mock.Mock(conf.Configuration)
self.configuration.append_config_values(mock.ANY)
self.configuration.max_over_subscription_ratio = 1.0
self.configuration.reserved_percentage = 5
self.configuration.nfs_shares_config = None
self.configuration.nfs_sparsed_volumes = True
self.configuration.nfs_reserved_percentage = 5.0
self.configuration.nfs_mount_point_base = self.TEST_MNT_POINT_BASE
self.configuration.nfs_mount_options = None
self.configuration.nfs_mount_attempts = 3
self.configuration.nfs_qcow2_volumes = False
self.configuration.nas_secure_file_permissions = 'false'
self.configuration.nas_secure_file_operations = 'false'
self.configuration.nas_host = None
self.configuration.nas_share_path = None
self.configuration.nas_mount_options = None
self.configuration.volume_dd_blocksize = '1M'
self._driver = nfs.NfsDriver(configuration=self.configuration)
self._driver.shares = {}
mock_exc = mock.patch.object(self._driver, '_execute')
self._execute = mock_exc.start()
self.addCleanup(mock_exc.stop)
self.context = context.get_admin_context()
def test_local_path(self):
"""local_path common use case."""
self.configuration.nfs_mount_point_base = self.TEST_MNT_POINT_BASE
drv = self._driver
volume = fake_volume.fake_volume_obj(
self.context,
provider_location=self.TEST_NFS_EXPORT1)
self.assertEqual(
'/mnt/test/2f4f60214cf43c595666dd815f0360a4/%s' % volume.name,
drv.local_path(volume))
@mock.patch.object(image_utils, 'qemu_img_info')
@mock.patch.object(image_utils, 'resize_image')
@mock.patch.object(image_utils, 'fetch_to_raw')
def test_copy_image_to_volume(self, mock_fetch, mock_resize, mock_qemu):
"""resize_image common case usage."""
drv = self._driver
volume = fake_volume.fake_volume_obj(self.context,
size=self.TEST_SIZE_IN_GB)
TEST_IMG_SOURCE = 'volume-%s' % volume.id
with mock.patch.object(drv, 'local_path',
return_value=TEST_IMG_SOURCE):
data = mock.Mock()
data.virtual_size = 1 * units.Gi
mock_qemu.return_value = data
drv.copy_image_to_volume(None, volume, None, None)
mock_fetch.assert_called_once_with(
None, None, None, TEST_IMG_SOURCE, mock.ANY, run_as_root=True,
size=self.TEST_SIZE_IN_GB)
mock_resize.assert_called_once_with(TEST_IMG_SOURCE,
self.TEST_SIZE_IN_GB,
run_as_root=True)
def test_get_mount_point_for_share(self):
"""_get_mount_point_for_share should calculate correct value."""
drv = self._driver
self.configuration.nfs_mount_point_base = self.TEST_MNT_POINT_BASE
self.assertEqual('/mnt/test/2f4f60214cf43c595666dd815f0360a4',
drv._get_mount_point_for_share(self.TEST_NFS_EXPORT1))
def test_get_mount_point_for_share_given_extra_slash_in_state_path(self):
"""_get_mount_point_for_share should calculate correct value."""
# This test gets called with the extra slash
self.configuration.nfs_mount_point_base = (
self.TEST_MNT_POINT_BASE_EXTRA_SLASH)
# The driver gets called with the correct configuration and removes
# the extra slash
drv = nfs.NfsDriver(configuration=self.configuration)
self.assertEqual('/opt/stack/data/cinder/mnt', drv.base)
self.assertEqual(
'/opt/stack/data/cinder/mnt/2f4f60214cf43c595666dd815f0360a4',
drv._get_mount_point_for_share(self.TEST_NFS_EXPORT1))
def test_get_capacity_info(self):
"""_get_capacity_info should calculate correct value."""
drv = self._driver
stat_total_size = 2620544
stat_avail = 2129984
stat_output = '1 %d %d' % (stat_total_size, stat_avail)
du_used = 490560
du_output = '%d /mnt' % du_used
with mock.patch.object(
drv, '_get_mount_point_for_share') as mock_get_mount:
mock_get_mount.return_value = self.TEST_MNT_POINT
self._execute.side_effect = [(stat_output, None),
(du_output, None)]
self.assertEqual((stat_total_size, stat_avail, du_used),
drv._get_capacity_info(self.TEST_NFS_EXPORT1))
mock_get_mount.assert_called_once_with(self.TEST_NFS_EXPORT1)
calls = [mock.call('stat', '-f', '-c', '%S %b %a',
self.TEST_MNT_POINT, run_as_root=True),
mock.call('du', '-sb', '--apparent-size',
'--exclude', '*snapshot*',
self.TEST_MNT_POINT, run_as_root=True)]
self._execute.assert_has_calls(calls)
def test_get_capacity_info_for_share_and_mount_point_with_spaces(self):
"""_get_capacity_info should calculate correct value."""
drv = self._driver
stat_total_size = 2620544
stat_avail = 2129984
stat_output = '1 %d %d' % (stat_total_size, stat_avail)
du_used = 490560
du_output = '%d /mnt' % du_used
with mock.patch.object(
drv, '_get_mount_point_for_share') as mock_get_mount:
mock_get_mount.return_value = self.TEST_MNT_POINT_SPACES
self._execute.side_effect = [(stat_output, None),
(du_output, None)]
self.assertEqual((stat_total_size, stat_avail, du_used),
drv._get_capacity_info(
self.TEST_NFS_EXPORT_SPACES))
mock_get_mount.assert_called_once_with(
self.TEST_NFS_EXPORT_SPACES)
calls = [mock.call('stat', '-f', '-c', '%S %b %a',
self.TEST_MNT_POINT_SPACES, run_as_root=True),
mock.call('du', '-sb', '--apparent-size',
'--exclude', '*snapshot*',
self.TEST_MNT_POINT_SPACES, run_as_root=True)]
self._execute.assert_has_calls(calls)
def test_load_shares_config(self):
drv = self._driver
drv.configuration.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE
with mock.patch.object(
drv, '_read_config_file') as mock_read_config:
config_data = []
config_data.append(self.TEST_NFS_EXPORT1)
config_data.append('#' + self.TEST_NFS_EXPORT2)
config_data.append('')
config_data.append(self.TEST_NFS_EXPORT2 + ' ' +
self.TEST_NFS_EXPORT2_OPTIONS)
config_data.append('broken:share_format')
mock_read_config.return_value = config_data
drv._load_shares_config(drv.configuration.nfs_shares_config)
mock_read_config.assert_called_once_with(
self.TEST_SHARES_CONFIG_FILE)
self.assertIn(self.TEST_NFS_EXPORT1, drv.shares)
self.assertIn(self.TEST_NFS_EXPORT2, drv.shares)
self.assertEqual(2, len(drv.shares))
self.assertEqual(self.TEST_NFS_EXPORT2_OPTIONS,
drv.shares[self.TEST_NFS_EXPORT2])
def test_load_shares_config_nas_opts(self):
drv = self._driver
drv.configuration.nas_host = self.TEST_NFS_HOST
drv.configuration.nas_share_path = self.TEST_NFS_SHARE_PATH
drv.configuration.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE
drv._load_shares_config(drv.configuration.nfs_shares_config)
self.assertIn(self.TEST_NFS_EXPORT1, drv.shares)
self.assertEqual(1, len(drv.shares))
def test_ensure_shares_mounted_should_save_mounting_successfully(self):
"""_ensure_shares_mounted should save share if mounted with success."""
drv = self._driver
config_data = []
config_data.append(self.TEST_NFS_EXPORT1)
drv.configuration.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE
with mock.patch.object(
drv, '_read_config_file') as mock_read_config:
with mock.patch.object(
drv, '_ensure_share_mounted') as mock_ensure:
mock_read_config.return_value = config_data
drv._ensure_share_mounted(self.TEST_NFS_EXPORT1)
mock_ensure.assert_called_once_with(self.TEST_NFS_EXPORT1)
@mock.patch.object(remotefs, 'LOG')
def test_ensure_shares_mounted_should_not_save_mounting_with_error(self,
LOG):
"""_ensure_shares_mounted should not save share if failed to mount."""
drv = self._driver
config_data = []
config_data.append(self.TEST_NFS_EXPORT1)
drv.configuration.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE
with mock.patch.object(
drv, '_read_config_file') as mock_read_config:
with mock.patch.object(
drv, '_ensure_share_mounted') as mock_ensure:
mock_read_config.return_value = config_data
drv._ensure_share_mounted()
self.assertEqual(0, len(drv._mounted_shares))
mock_ensure.assert_called_once_with()
def test_find_share_should_throw_error_if_there_is_no_mounted_share(self):
"""_find_share should throw error if there is no mounted shares."""
drv = self._driver
drv._mounted_shares = []
self.assertRaises(exception.NfsNoSharesMounted, drv._find_share,
self.TEST_SIZE_IN_GB)
def test_find_share(self):
"""_find_share simple use case."""
drv = self._driver
drv._mounted_shares = [self.TEST_NFS_EXPORT1, self.TEST_NFS_EXPORT2]
with mock.patch.object(
drv, '_get_capacity_info') as mock_get_capacity_info:
mock_get_capacity_info.side_effect = [
(5 * units.Gi, 2 * units.Gi, 2 * units.Gi),
(10 * units.Gi, 3 * units.Gi, 1 * units.Gi)]
self.assertEqual(self.TEST_NFS_EXPORT2,
drv._find_share(self.TEST_SIZE_IN_GB))
calls = [mock.call(self.TEST_NFS_EXPORT1),
mock.call(self.TEST_NFS_EXPORT2)]
mock_get_capacity_info.assert_has_calls(calls)
self.assertEqual(2, mock_get_capacity_info.call_count)
def test_find_share_should_throw_error_if_there_is_not_enough_space(self):
"""_find_share should throw error if there is no share to host vol."""
drv = self._driver
drv._mounted_shares = [self.TEST_NFS_EXPORT1, self.TEST_NFS_EXPORT2]
with mock.patch.object(
drv, '_get_capacity_info') as mock_get_capacity_info:
mock_get_capacity_info.side_effect = [
(5 * units.Gi, 0, 5 * units.Gi),
(10 * units.Gi, 0, 10 * units.Gi)]
self.assertRaises(exception.NfsNoSuitableShareFound,
drv._find_share, self.TEST_SIZE_IN_GB)
calls = [mock.call(self.TEST_NFS_EXPORT1),
mock.call(self.TEST_NFS_EXPORT2)]
mock_get_capacity_info.assert_has_calls(calls)
self.assertEqual(2, mock_get_capacity_info.call_count)
def _simple_volume(self):
return fake_volume.fake_volume_obj(self.context,
display_name='volume_name',
provider_location='127.0.0.1:/mnt',
size=10)
def test_create_sparsed_volume(self):
drv = self._driver
volume = self._simple_volume()
self.override_config('nfs_sparsed_volumes', True)
with mock.patch.object(
drv, '_create_sparsed_file') as mock_create_sparsed_file:
with mock.patch.object(
drv, '_set_rw_permissions') as mock_set_rw_permissions:
drv._do_create_volume(volume)
mock_create_sparsed_file.assert_called_once_with(mock.ANY,
mock.ANY)
mock_set_rw_permissions.assert_called_once_with(mock.ANY)
def test_create_nonsparsed_volume(self):
drv = self._driver
self.configuration.nfs_sparsed_volumes = False
volume = self._simple_volume()
self.override_config('nfs_sparsed_volumes', False)
with mock.patch.object(
drv, '_create_regular_file') as mock_create_regular_file:
with mock.patch.object(
drv, '_set_rw_permissions') as mock_set_rw_permissions:
drv._do_create_volume(volume)
mock_create_regular_file.assert_called_once_with(mock.ANY,
mock.ANY)
mock_set_rw_permissions.assert_called_once_with(mock.ANY)
@mock.patch.object(nfs, 'LOG')
def test_create_volume_should_ensure_nfs_mounted(self, mock_log):
"""create_volume ensures shares provided in config are mounted."""
drv = self._driver
drv._find_share = mock.Mock()
drv._find_share.return_value = self.TEST_NFS_EXPORT1
drv._do_create_volume = mock.Mock()
with mock.patch.object(
drv, '_ensure_share_mounted') as mock_ensure_share:
drv._ensure_share_mounted()
volume = fake_volume.fake_volume_obj(self.context,
size=self.TEST_SIZE_IN_GB)
drv.create_volume(volume)
mock_ensure_share.assert_called_once_with()
@mock.patch.object(nfs, 'LOG')
def test_create_volume_should_return_provider_location(self, mock_log):
"""create_volume should return provider_location with found share."""
drv = self._driver
drv._ensure_shares_mounted = mock.Mock()
drv._do_create_volume = mock.Mock()
with mock.patch.object(drv, '_find_share') as mock_find_share:
mock_find_share.return_value = self.TEST_NFS_EXPORT1
volume = fake_volume.fake_volume_obj(self.context,
size=self.TEST_SIZE_IN_GB)
result = drv.create_volume(volume)
self.assertEqual(self.TEST_NFS_EXPORT1,
result['provider_location'])
mock_find_share.assert_called_once_with(self.TEST_SIZE_IN_GB)
def test_delete_volume(self):
"""delete_volume simple test case."""
drv = self._driver
drv._ensure_share_mounted = mock.Mock()
volume = fake_volume.fake_volume_obj(
self.context,
display_name='volume-123',
provider_location=self.TEST_NFS_EXPORT1)
with mock.patch.object(drv, 'local_path') as mock_local_path:
mock_local_path.return_value = self.TEST_LOCAL_PATH
drv.delete_volume(volume)
mock_local_path.assert_called_once_with(volume)
self._execute.assert_called_once_with('rm', '-f',
self.TEST_LOCAL_PATH,
run_as_root=True)
def test_delete_should_ensure_share_mounted(self):
"""delete_volume should ensure that corresponding share is mounted."""
drv = self._driver
volume = fake_volume.fake_volume_obj(
self.context,
display_name='volume-123',
provider_location=self.TEST_NFS_EXPORT1)
with mock.patch.object(
drv, '_ensure_share_mounted') as mock_ensure_share:
drv.delete_volume(volume)
mock_ensure_share.assert_called_once_with(self.TEST_NFS_EXPORT1)
def test_delete_should_not_delete_if_provider_location_not_provided(self):
"""delete_volume shouldn't delete if provider_location missed."""
drv = self._driver
volume = fake_volume.fake_volume_obj(self.context,
name='volume-123',
provider_location=None)
with mock.patch.object(drv, '_ensure_share_mounted'):
drv.delete_volume(volume)
self.assertFalse(self._execute.called)
def test_get_volume_stats(self):
"""get_volume_stats must fill the correct values."""
drv = self._driver
drv._mounted_shares = [self.TEST_NFS_EXPORT1, self.TEST_NFS_EXPORT2]
with mock.patch.object(
drv, '_ensure_shares_mounted') as mock_ensure_share:
with mock.patch.object(
drv, '_get_capacity_info') as mock_get_capacity_info:
mock_get_capacity_info.side_effect = [
(10 * units.Gi, 2 * units.Gi, 2 * units.Gi),
(20 * units.Gi, 3 * units.Gi, 3 * units.Gi)]
drv._ensure_shares_mounted()
drv.get_volume_stats()
calls = [mock.call(self.TEST_NFS_EXPORT1),
mock.call(self.TEST_NFS_EXPORT2)]
mock_get_capacity_info.assert_has_calls(calls)
self.assertTrue(mock_ensure_share.called)
self.assertEqual(30.0, drv._stats['total_capacity_gb'])
self.assertEqual(5.0, drv._stats['free_capacity_gb'])
self.assertEqual(5, drv._stats['reserved_percentage'])
self.assertTrue(drv._stats['sparse_copy_volume'])
def test_get_volume_stats_with_non_zero_reserved_percentage(self):
"""get_volume_stats must fill the correct values."""
self.configuration.reserved_percentage = 10.0
drv = nfs.NfsDriver(configuration=self.configuration)
drv._mounted_shares = [self.TEST_NFS_EXPORT1, self.TEST_NFS_EXPORT2]
with mock.patch.object(
drv, '_ensure_shares_mounted') as mock_ensure_share:
with mock.patch.object(
drv, '_get_capacity_info') as mock_get_capacity_info:
mock_get_capacity_info.side_effect = [
(10 * units.Gi, 2 * units.Gi, 2 * units.Gi),
(20 * units.Gi, 3 * units.Gi, 3 * units.Gi)]
drv._ensure_shares_mounted()
drv.get_volume_stats()
calls = [mock.call(self.TEST_NFS_EXPORT1),
mock.call(self.TEST_NFS_EXPORT2)]
mock_get_capacity_info.assert_has_calls(calls)
self.assertTrue(mock_ensure_share.called)
self.assertEqual(30.0, drv._stats['total_capacity_gb'])
self.assertEqual(5.0, drv._stats['free_capacity_gb'])
self.assertEqual(10.0, drv._stats['reserved_percentage'])
@ddt.data(True, False)
def test_update_volume_stats(self, thin):
self._driver.configuration.max_over_subscription_ratio = 20.0
self._driver.configuration.reserved_percentage = 5.0
self._driver.configuration.nfs_sparsed_volumes = thin
remotefs_volume_stats = {
'volume_backend_name': 'fake_backend_name',
'vendor_name': 'fake_vendor',
'driver_version': 'fake_version',
'storage_protocol': 'NFS',
'total_capacity_gb': 100.0,
'free_capacity_gb': 20.0,
'reserved_percentage': 5.0,
'QoS_support': False,
}
self.mock_object(remotefs.RemoteFSDriver, '_update_volume_stats')
self._driver._stats = remotefs_volume_stats
mock_get_provisioned_capacity = self.mock_object(
self._driver, '_get_provisioned_capacity',
mock.Mock(return_value=25.0))
self._driver._update_volume_stats()
nfs_added_volume_stats = {
'provisioned_capacity_gb': 25.0 if thin else 80.0,
'max_over_subscription_ratio': 20.0,
'reserved_percentage': 5.0,
'thin_provisioning_support': thin,
'thick_provisioning_support': not thin,
}
expected = remotefs_volume_stats
expected.update(nfs_added_volume_stats)
self.assertEqual(expected, self._driver._stats)
self.assertEqual(thin, mock_get_provisioned_capacity.called)
def _check_is_share_eligible(self, total_size, total_available,
total_allocated, requested_volume_size):
with mock.patch.object(self._driver, '_get_capacity_info')\
as mock_get_capacity_info:
mock_get_capacity_info.return_value = (total_size,
total_available,
total_allocated)
return self._driver._is_share_eligible('fake_share',
requested_volume_size)
def test_is_share_eligible(self):
total_size = 100.0 * units.Gi
total_available = 90.0 * units.Gi
total_allocated = 10.0 * units.Gi
requested_volume_size = 1 # GiB
self.assertTrue(self._check_is_share_eligible(total_size,
total_available,
total_allocated,
requested_volume_size))
def test_share_eligibility_with_reserved_percentage(self):
total_size = 100.0 * units.Gi
total_available = 4.0 * units.Gi
total_allocated = 96.0 * units.Gi
requested_volume_size = 1 # GiB
# Check used > used_ratio statement entered
self.assertFalse(self._check_is_share_eligible(total_size,
total_available,
total_allocated,
requested_volume_size))
def test_is_share_eligible_above_oversub_ratio(self):
total_size = 100.0 * units.Gi
total_available = 10.0 * units.Gi
total_allocated = 90.0 * units.Gi
requested_volume_size = 10 # GiB
# Check apparent_available <= requested_volume_size statement entered
self.assertFalse(self._check_is_share_eligible(total_size,
total_available,
total_allocated,
requested_volume_size))
def test_is_share_eligible_reserved_space_above_oversub_ratio(self):
total_size = 100.0 * units.Gi
total_available = 10.0 * units.Gi
total_allocated = 100.0 * units.Gi
requested_volume_size = 1 # GiB
# Check total_allocated / total_size >= oversub_ratio
# statement entered
self.assertFalse(self._check_is_share_eligible(total_size,
total_available,
total_allocated,
requested_volume_size))
def test_extend_volume(self):
"""Extend a volume by 1."""
drv = self._driver
volume = fake_volume.fake_volume_obj(
self.context,
id='80ee16b6-75d2-4d54-9539-ffc1b4b0fb10',
size=1,
provider_location='nfs_share')
path = 'path'
newSize = volume['size'] + 1
with mock.patch.object(image_utils, 'resize_image') as resize:
with mock.patch.object(drv, 'local_path', return_value=path):
with mock.patch.object(drv, '_is_share_eligible',
return_value=True):
with mock.patch.object(drv, '_is_file_size_equal',
return_value=True):
drv.extend_volume(volume, newSize)
resize.assert_called_once_with(path, newSize,
run_as_root=True)
def test_extend_volume_failure(self):
"""Error during extend operation."""
drv = self._driver
volume = fake_volume.fake_volume_obj(
self.context,
id='80ee16b6-75d2-4d54-9539-ffc1b4b0fb10',
size=1,
provider_location='nfs_share')
with mock.patch.object(image_utils, 'resize_image'):
with mock.patch.object(drv, 'local_path', return_value='path'):
with mock.patch.object(drv, '_is_share_eligible',
return_value=True):
with mock.patch.object(drv, '_is_file_size_equal',
return_value=False):
self.assertRaises(exception.ExtendVolumeError,
drv.extend_volume, volume, 2)
def test_extend_volume_insufficient_space(self):
"""Insufficient space on nfs_share during extend operation."""
drv = self._driver
volume = fake_volume.fake_volume_obj(
self.context,
id='80ee16b6-75d2-4d54-9539-ffc1b4b0fb10',
size=1,
provider_location='nfs_share')
with mock.patch.object(image_utils, 'resize_image'):
with mock.patch.object(drv, 'local_path', return_value='path'):
with mock.patch.object(drv, '_is_share_eligible',
return_value=False):
with mock.patch.object(drv, '_is_file_size_equal',
return_value=False):
self.assertRaises(exception.ExtendVolumeError,
drv.extend_volume, volume, 2)
def test_is_file_size_equal(self):
"""File sizes are equal."""
drv = self._driver
path = 'fake/path'
size = 2
data = mock.MagicMock()
data.virtual_size = size * units.Gi
with mock.patch.object(image_utils, 'qemu_img_info',
return_value=data):
self.assertTrue(drv._is_file_size_equal(path, size))
def test_is_file_size_equal_false(self):
"""File sizes are not equal."""
drv = self._driver
path = 'fake/path'
size = 2
data = mock.MagicMock()
data.virtual_size = (size + 1) * units.Gi
with mock.patch.object(image_utils, 'qemu_img_info',
return_value=data):
self.assertFalse(drv._is_file_size_equal(path, size))
@mock.patch.object(nfs, 'LOG')
def test_set_nas_security_options_when_true(self, LOG):
"""Test higher level setting of NAS Security options.
The NFS driver overrides the base method with a driver specific
version.
"""
drv = self._driver
drv._mounted_shares = [self.TEST_NFS_EXPORT1]
is_new_install = True
drv._ensure_shares_mounted = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
drv._determine_nas_security_option_setting = mock.Mock(
return_value='true')
drv.set_nas_security_options(is_new_install)
self.assertEqual('true', drv.configuration.nas_secure_file_operations)
self.assertEqual('true', drv.configuration.nas_secure_file_permissions)
self.assertFalse(LOG.warning.called)
@mock.patch.object(nfs, 'LOG')
def test_set_nas_security_options_when_false(self, LOG):
"""Test higher level setting of NAS Security options.
The NFS driver overrides the base method with a driver specific
version.
"""
drv = self._driver
drv._mounted_shares = [self.TEST_NFS_EXPORT1]
is_new_install = False
drv._ensure_shares_mounted = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(
return_value=self.TEST_MNT_POINT)
drv._determine_nas_security_option_setting = mock.Mock(
return_value='false')
drv.set_nas_security_options(is_new_install)
self.assertEqual('false', drv.configuration.nas_secure_file_operations)
self.assertEqual('false',
drv.configuration.nas_secure_file_permissions)
self.assertTrue(LOG.warning.called)
def test_set_nas_security_options_exception_if_no_mounted_shares(self):
"""Ensure proper exception is raised if there are no mounted shares."""
drv = self._driver
drv._ensure_shares_mounted = mock.Mock()
drv._mounted_shares = []
is_new_cinder_install = 'does not matter'
self.assertRaises(exception.NfsNoSharesMounted,
drv.set_nas_security_options,
is_new_cinder_install)
def test_ensure_share_mounted(self):
"""Case where the mount works the first time."""
self.mock_object(self._driver._remotefsclient, 'mount')
drv = self._driver
drv.configuration.nfs_mount_attempts = 3
drv.shares = {self.TEST_NFS_EXPORT1: ''}
drv._ensure_share_mounted(self.TEST_NFS_EXPORT1)
drv._remotefsclient.mount.called_once()
@mock.patch('time.sleep')
def test_ensure_share_mounted_exception(self, _mock_sleep):
"""Make the configured number of attempts when mounts fail."""
num_attempts = 3
self.mock_object(self._driver._remotefsclient, 'mount',
mock.Mock(side_effect=Exception))
drv = self._driver
drv.configuration.nfs_mount_attempts = num_attempts
drv.shares = {self.TEST_NFS_EXPORT1: ''}
self.assertRaises(exception.NfsException, drv._ensure_share_mounted,
self.TEST_NFS_EXPORT1)
self.assertEqual(num_attempts, drv._remotefsclient.mount.call_count)
def test_ensure_share_mounted_at_least_one_attempt(self):
"""Make at least one mount attempt even if configured for less."""
min_num_attempts = 1
num_attempts = 0
self.mock_object(self._driver._remotefsclient, 'mount',
mock.Mock(side_effect=Exception))
drv = self._driver
drv.configuration.nfs_mount_attempts = num_attempts
drv.shares = {self.TEST_NFS_EXPORT1: ''}
self.assertRaises(exception.NfsException, drv._ensure_share_mounted,
self.TEST_NFS_EXPORT1)
self.assertEqual(min_num_attempts,
drv._remotefsclient.mount.call_count)
class NfsDriverDoSetupTestCase(test.TestCase):
def setUp(self):
super(NfsDriverDoSetupTestCase, self).setUp()
self.context = mock.Mock()
self.create_configuration()
def create_configuration(self):
config = conf.Configuration(None)
config.append_config_values(nfs.nfs_opts)
self.configuration = config
def test_setup_should_throw_error_if_shares_config_not_configured(self):
"""do_setup should throw error if shares config is not configured."""
self.override_config('nfs_shares_config', None)
drv = nfs.NfsDriver(configuration=self.configuration)
mock_os_path_exists = self.mock_object(os.path, 'exists')
with self.assertRaisesRegex(exception.NfsException,
".*no NFS config file configured.*"):
drv.do_setup(self.context)
self.assertEqual(0, mock_os_path_exists.call_count)
def test_setup_should_throw_error_if_shares_file_does_not_exist(self):
"""do_setup should throw error if shares file does not exist."""
drv = nfs.NfsDriver(configuration=self.configuration)
mock_os_path_exists = self.mock_object(os.path, 'exists')
mock_os_path_exists.return_value = False
with self.assertRaisesRegex(exception.NfsException,
"NFS config file.*doesn't exist"):
drv.do_setup(self.context)
mock_os_path_exists.assert_has_calls(
[mock.call(self.configuration.nfs_shares_config)])
def test_setup_should_not_throw_error_if_host_and_share_set(self):
"""do_setup shouldn't throw shares file error if host and share set."""
drv = nfs.NfsDriver(configuration=self.configuration)
self.override_config('nas_host', 'nfs-host1')
self.override_config('nas_share_path', '/export')
mock_os_path_exists = self.mock_object(os.path, 'exists')
mock_os_path_exists.return_value = False
mock_set_nas_sec_options = self.mock_object(nfs.NfsDriver,
'set_nas_security_options')
mock_set_nas_sec_options.return_value = True
mock_execute = self.mock_object(drv, '_execute')
mock_execute.return_value = True
drv.do_setup(self.context)
mock_os_path_exists.assert_not_called()
def test_setup_throw_error_if_shares_file_does_not_exist_no_host(self):
"""do_setup should throw error if no shares file and no host set."""
drv = nfs.NfsDriver(configuration=self.configuration)
self.override_config('nas_share_path', '/export')
mock_os_path_exists = self.mock_object(os.path, 'exists')
mock_os_path_exists.return_value = False
with self.assertRaisesRegex(exception.NfsException,
"NFS config file.*doesn't exist"):
drv.do_setup(self.context)
mock_os_path_exists.assert_has_calls(
[mock.call(self.configuration.nfs_shares_config)])
def test_setup_throw_error_if_shares_file_does_not_exist_no_share(self):
"""do_setup should throw error if no shares file and no share set."""
drv = nfs.NfsDriver(configuration=self.configuration)
self.override_config('nas_host', 'nfs-host1')
mock_os_path_exists = self.mock_object(os.path, 'exists')
mock_os_path_exists.return_value = False
with self.assertRaisesRegex(exception.NfsException,
"NFS config file.*doesn't exist"):
drv.do_setup(self.context)
mock_os_path_exists.assert_has_calls(
[mock.call(self.configuration.nfs_shares_config)])
def test_setup_throw_error_if_shares_file_doesnt_exist_no_share_host(self):
"""do_setup should throw error if no shares file and no host/share."""
drv = nfs.NfsDriver(configuration=self.configuration)
mock_os_path_exists = self.mock_object(os.path, 'exists')
mock_os_path_exists.return_value = False
with self.assertRaisesRegex(exception.NfsException,
"NFS config file.*doesn't exist"):
drv.do_setup(self.context)
mock_os_path_exists.assert_has_calls(
[mock.call(self.configuration.nfs_shares_config)])
def test_setup_should_throw_exception_if_nfs_client_is_not_installed(self):
"""do_setup should throw error if nfs client is not installed."""
drv = nfs.NfsDriver(configuration=self.configuration)
mock_os_path_exists = self.mock_object(os.path, 'exists')
mock_os_path_exists.return_value = True
mock_execute = self.mock_object(drv, '_execute')
mock_execute.side_effect = OSError(
errno.ENOENT, 'No such file or directory.')
with self.assertRaisesRegex(exception.NfsException,
'mount.nfs is not installed'):
drv.do_setup(self.context)
mock_os_path_exists.assert_has_calls(
[mock.call(self.configuration.nfs_shares_config)])
mock_execute.assert_has_calls(
[mock.call('mount.nfs',
check_exit_code=False,
run_as_root=True)])
def test_setup_should_throw_exception_if_mount_nfs_command_fails(self):
"""do_setup should throw error if mount.nfs fails with OSError
This test covers the OSError path when mount.nfs is installed.
"""
drv = nfs.NfsDriver(configuration=self.configuration)
mock_os_path_exists = self.mock_object(os.path, 'exists')
mock_os_path_exists.return_value = True
mock_execute = self.mock_object(drv, '_execute')
mock_execute.side_effect = OSError(
errno.EPERM, 'Operation... BROKEN')
with self.assertRaisesRegex(OSError, '.*Operation... BROKEN'):
drv.do_setup(self.context)
mock_os_path_exists.assert_has_calls(
[mock.call(self.configuration.nfs_shares_config)])
mock_execute.assert_has_calls(
[mock.call('mount.nfs',
check_exit_code=False,
run_as_root=True)])
@mock.patch.object(os, 'rename')
def test_update_migrated_available_volume(self, rename_volume):
self._test_update_migrated_volume('available', rename_volume)
@mock.patch.object(os, 'rename')
def test_update_migrated_available_volume_rename_fail(self, rename_volume):
self._test_update_migrated_volume('available', rename_volume,
rename_exception=True)
@mock.patch.object(os, 'rename')
def test_update_migrated_in_use_volume(self, rename_volume):
self._test_update_migrated_volume('in-use', rename_volume)
def _test_update_migrated_volume(self, volume_status, rename_volume,
rename_exception=False):
drv = nfs.NfsDriver(configuration=self.configuration)
fake_volume_id = 'f51b5730-13b7-11e6-a238-fa163e67a298'
fake_new_volume_id = '12341234-13b7-11e6-a238-fa163e67a298'
fake_provider_source = 'fake_provider_source'
fake_provider = 'fake_provider'
base_dir = '/dir_base/'
volume_name_template = 'volume-%s'
original_volume_name = volume_name_template % fake_volume_id
current_name = volume_name_template % fake_new_volume_id
original_volume_path = base_dir + original_volume_name
current_path = base_dir + current_name
volume = fake_volume.fake_volume_obj(
self.context,
id=fake_volume_id,
size=1,
provider_location=fake_provider_source,
_name_id=None)
new_volume = fake_volume.fake_volume_obj(
self.context,
id=fake_new_volume_id,
size=1,
provider_location=fake_provider,
_name_id=None)
with mock.patch.object(drv, 'local_path') as local_path:
local_path.return_value = base_dir + current_name
if volume_status == 'in-use':
update = drv.update_migrated_volume(self.context,
volume,
new_volume,
volume_status)
self.assertEqual({'_name_id': fake_new_volume_id,
'provider_location': fake_provider}, update)
elif rename_exception:
rename_volume.side_effect = OSError
update = drv.update_migrated_volume(self.context,
volume,
new_volume,
volume_status)
rename_volume.assert_called_once_with(current_path,
original_volume_path)
self.assertEqual({'_name_id': fake_new_volume_id,
'provider_location': fake_provider}, update)
else:
update = drv.update_migrated_volume(self.context,
volume,
new_volume,
volume_status)
rename_volume.assert_called_once_with(current_path,
original_volume_path)
self.assertEqual({'_name_id': None,
'provider_location': fake_provider}, update)
def test_retype_is_there(self):
"Ensure that driver.retype() is there."""
drv = nfs.NfsDriver(configuration=self.configuration)
v1 = fake_volume.fake_volume_obj(self.context)
ret = drv.retype(self.context,
v1,
mock.sentinel.new_type,
mock.sentinel.diff,
mock.sentinel.host)
self.assertEqual((False, None), ret)
|
# python3
# Copyright 2020 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pipeline class definitions."""
import abc
import json
import os
from os import path
import subprocess
import pathlib
import time
import datetime as dt
import jinja2 as jinja
from google.cloud import container_v1
from kubernetes import client, config
from kubernetes.client.rest import ApiException
from ml_pipeline_gen.parsers import NestedNamespace
from ml_pipeline_gen.parsers import parse_yaml
class _Component(object):
"""A BasePipeline component (behaves like a tree)."""
def __init__(self, role, comp_id, params=None):
self.role = role
self.id = comp_id
# TODO(humichael): support children reading parent's params.
self.params = params if params else {}
self.children = []
def add_child(self, comp):
self.children.append(comp)
class BasePipeline(abc.ABC):
"""Abstract class representing an ML pipeline."""
def __init__(self, model=None, config=None):
self.model = model
self.structure = _Component("start", -1)
self.size = 0
if config:
self.config = NestedNamespace(parse_yaml(config))
else:
self.config = config
if self.model:
now = dt.datetime.now().strftime("%y%m%d_%h%m%s")
self.job_id = "{}_{}".format(self.model.model["name"], now)
def list_components(self):
all_components = []
if self.config is not None:
for k, v in self.config.__dict__.items():
if hasattr(v, "component"):
all_components.append(k)
print(all_components)
def add_train_component(self, parent=None, wait_interval=None):
"""Adds a train component after the specified parent."""
if not parent:
parent = self.structure
params = {
"wait_interval": wait_interval,
}
params = {k: v for k, v in params.items() if v is not None}
component = _Component("train", self.size, params=params)
parent.add_child(component)
self.size += 1
return component
def add_deploy_component(self, parent=None, model_uri=None,
wait_interval=None):
"""Adds a deploy component after the specified parent."""
if not parent:
parent = self.structure
params = {
"model_uri": model_uri,
"wait_interval": wait_interval,
}
params = {k: v for k, v in params.items() if v is not None}
component = _Component("deploy", self.size, params=params)
parent.add_child(component)
self.size += 1
return component
def add_predict_component(self, parent=None, version=None,
wait_interval=None):
"""Adds a predict component after the specified parent."""
if not parent:
parent = self.structure
params = {
"version": version,
"wait_interval": wait_interval,
}
params = {k: v for k, v in params.items() if v is not None}
component = _Component("predict", self.size, params=params)
parent.add_child(component)
self.size += 1
return component
def print_structure(self):
"""Prints the structure of the pipeline."""
next_comps = [self.structure]
while next_comps:
comp = next_comps.pop()
if comp.id != -1:
print(comp.id, [x.id for x in comp.children])
next_comps.extend(comp.children)
def to_graph(self):
"""Represents the pipeline as edges and vertices.
Returns:
components: the vertices of the graph.
relations: the edges of the graph in (parent, child) form.
"""
components = [None] * self.size
relations = []
next_comps = [self.structure]
while next_comps:
comp = next_comps.pop()
next_comps.extend(comp.children)
if comp.id != -1:
components[comp.id] = comp
for child in comp.children:
relations.append((comp.id, child.id))
return components, relations
@abc.abstractmethod
def generate_pipeline(self):
"""Creates the files to compile a pipeline."""
pass
class KfpPipeline(BasePipeline):
"""KubeFlow Pipelines class."""
def __init__(self, model=None, config=None):
super().__init__(model, config)
if not self.check_cluster_label("mlpg_wi_auth"):
self.setup_auth()
self.update_hostname()
def setup_auth(self):
"""Calls shell script to verify required auth for KFP cluster.
The called script checks if the GKE cluster has Workload Identity enabled
and configured with a custom label, and if not, enables it and updates
the label.
"""
model = self.model
subprocess.call([
"bin/wi_setup.sh",
model.project_id,
model.cluster_name,
model.cluster_zone,
# TODO(ashokpatelapk): Check if namespace can be a config var.
"default"
])
def update_hostname(self):
"""Updates Hostname (URL) of model object using current kube context."""
# Checks default kubectl context from ~/.kube/config
config.load_kube_config()
name = "inverse-proxy-config"
namespace = "default"
instance = client.CoreV1Api()
response = instance.read_namespaced_config_map(name, namespace)
while response.data is None:
print("Waiting for KFP Dashboard to be updated...")
time.sleep(10)
try:
response = instance.read_namespaced_config_map(name, namespace)
except ApiException as e:
print("Exception -> CoreV1Api: {}}".format(e))
print("Waiting for KFP Dashboard to be updated...")
time.sleep(30)
self.model.orchestration["host"] = response.data["Hostname"]
def check_cluster_label(self, label):
"""Checks a specifed resourceLabel for a GKE cluster"""
model = self.model
client = container_v1.ClusterManagerClient()
cluster_name = "projects/{0}/locations/{1}/clusters/{2}".format(
model.project_id,
model.cluster_zone,
model.cluster_name
)
response = client.get_cluster(name=cluster_name)
return response.resource_labels[label] == "true"
def _get_train_params(self):
"""Returns parameters for training on CAIP."""
model = self.model
package_uri = model.upload_trainer_dist()
params = {
"project_id": model.project_id,
"job_id_prefix": "train_{}".format(self.job_id),
"training_input": {
"scaleTier": model.scale_tier,
"packageUris": [package_uri],
"pythonModule": "trainer.task",
"args": [
"--model_dir", model.get_model_dir(),
],
"jobDir": model.get_job_dir(),
"region": model.region,
"runtimeVersion": model.runtime_version,
"pythonVersion": model.python_version,
},
}
return json.dumps(params, indent=4)
def _get_deploy_params(self):
"""Returns parameters for deploying on CAIP."""
model = self.model
params = {
"project_id": model.project_id,
"model_id": "{}_kfp".format(model.model["name"]),
"runtime_version": model.runtime_version,
"python_version": model.python_version,
}
if model.framework != "tensorflow":
params["model_uri"] = model.get_model_dir()
return json.dumps(params, indent=4)
def _get_predict_params(self):
"""Returns parameters for predicting on CAIP."""
model = self.model
if not model.supports_batch_predict():
raise RuntimeError("Batch predict not supported for this model.")
pred_info = model.data["prediction"]
inputs = pred_info["input_data_paths"]
if not isinstance(inputs, list):
inputs = [inputs]
input_format = (pred_info["input_format"] if "input_format" in pred_info
else "DATA_FORMAT_UNSPECIFIED")
output_format = (pred_info["output_format"]
if "output_format" in pred_info else "JSON")
params = {
"project_id": model.project_id,
"model_path": model.get_parent(model=True),
"input_paths": inputs,
"input_data_format": input_format,
"output_path": model.get_pred_output_path(),
"region": model.region,
"output_data_format": output_format,
"job_id_prefix": "train_{}".format(self.job_id),
}
return json.dumps(params, indent=4)
def _write_template(self, env, template_path, args, dest):
template = env.get_template(template_path)
body = template.render(**args)
with open(dest, "w+") as f:
f.write(body)
def generate_pipeline(self):
"""Creates the files to compile a pipeline."""
loader = jinja.PackageLoader("ml_pipeline_gen", "templates")
env = jinja.Environment(loader=loader, trim_blocks=True,
lstrip_blocks="True")
components, relations = self.to_graph()
model = self.model
model_dir = model.get_model_dir()
if model.framework == "tensorflow":
# TODO(humichael): Need custom component to get best model.
# VS: The componet is available
model_dir = os.path.join(model_dir, "1", "export", "export")
pipeline_args = {
"train_params": self._get_train_params(),
"model_dir": model_dir,
"deploy_params": self._get_deploy_params(),
"prediction_params": self._get_predict_params(),
"components": components,
"relations": relations,
"host": model.orchestration["host"],
}
self._write_template(env, "kfp_pipeline.py", pipeline_args,
"orchestration/pipeline.py")
def generate_pipeline_from_config(self):
"""Creates the files to compile a pipeline from config file."""
template_files = [
("kfp_pipeline_from_config.py", "orchestration/pipeline.py"),
("example_pipeline.ipynb", "orchestration/pipeline.ipynb")
]
loader = jinja.PackageLoader("ml_pipeline_gen", "templates")
env = jinja.Environment(loader=loader, trim_blocks=True,
lstrip_blocks="True")
for in_file, out_file in template_files:
pipeline_template = env.get_template(in_file)
pipeline_file = pipeline_template.render(
config=self.config,
)
output_file = path.join(self.config.output_package, out_file)
pathlib.Path(output_file).parent.mkdir(parents=True, exist_ok=True)
with open(output_file, "w+") as f:
f.write(pipeline_file)
|
<reponame>Jajc09/sifco-api
# clase para obtener información de la tabla sifco_request
from app import db
from app.utils.errors import BadRequestException
import datetime
import pytz
import psycopg2
import hashlib
# contiene la función update_request_assign y update_request_redeem
# actualiza en sifco_request la información de las consultas que se hacen a Sifco para asignación y redención
class Sifco_request:
def update_request_assign(input, name):
tz = pytz.timezone('America/Bogota')
date = datetime.datetime.now(tz = tz).strftime('%Y/%m/%d')
time = datetime.datetime.now(tz = tz).strftime('%H:%M:%S')
col_names = ['nu_tarjeta_sha256', 'status', 'terminal', 'cluster', 'monto_trx_cliente_historico',
'pckt_mcc_cant_trx', 'pckt_mcc_tck', 'activo_3m',
'no_trx_time', 'perc_cant_trx_total_historico', 'perc_cant_cliente_historico',
'perc_cant_trx_competencia_historico', 'perc_monto_trx_total_historico',
'perc_monto_cliente_historico',
'perc_monto_trx_competencia_historico', 'fecha', 'tipo_trx', 'hora']
if len(input) == 3:
input['card'] = input['card'] if len(input['card']) == 64 else hashlib.sha256(
str.encode(input['card'])).hexdigest()
values = tuple([input['card']] + ['404'] + [input['terminal']] +
[None] * (len(col_names) - 5) + [date] + ['cliente_nuevo'])
else:
input['status'], input['fecha'], input['hora'] = '200', date, time
values = tuple([str(input[key]) for key in col_names if key in input])
nombres_col = ','.join([nombre.lower() for nombre in col_names])
try:
cursor = db.cursor()
strings = ['%s' for i in range(len(nombres_col.split(',')))]
sql_insert_query = """ insert into sifco_requests_"""+name+""" (""" + \
nombres_col + """ ) values ( """ + ','.join(
strings) + """) """
cursor.execute(sql_insert_query, values)
db.commit()
cursor.close()
return None
except (Exception, psycopg2.Error) as error:
raise BadRequestException("Falla en la carga de datos")
def update_request_redeem(input, name, codes_query):
# sólo se guardan las transacciones no exitosas de redención en base de datos, la transacción
# exitosa se puede ver en la tabla sifco_discount
# codes_query == 0 -> la tarjeta es nueva
# codes_query == 1 -> el código es válido
# codes_query == 2 -> el código no es válido
colnames = [column.lower() for column in input]
input = dict(zip(colnames, input.values()))
tz = pytz.timezone('America/Bogota')
date = datetime.datetime.now(tz=tz).strftime('%Y/%m/%d %H:%M:%S')
col_names = ['nu_tarjeta_sha256', 'status', 'terminal', 'cluster', 'monto_trx_cliente_historico',
'pckt_mcc_cant_trx', 'pckt_mcc_tck', 'activo_3m',
'no_trx_time', 'perc_cant_trx_total_historico', 'perc_cant_cliente_historico',
'perc_cant_trx_competencia_historico', 'perc_monto_trx_total_historico',
'perc_monto_cliente_historico',
'perc_monto_trx_competencia_historico', 'fecha', 'tipo_trx']
if len(input) == 4:
input['card'] = input['card'] if len(input['card']) == 64 else hashlib.sha256(
str.encode(input['card'])).hexdigest()
values = tuple(
[input['card']] + ['404'] + [input['terminal']] + [None] * (len(col_names) - 5) + [date] + [
'redención'])
else:
input['status'], input['fecha'] = '200', date
values = tuple([str(input[key]) for key in col_names if key in input])
nombres_col = ','.join([nombre.lower() for nombre in col_names])
try:
if codes_query == 1 or codes_query == 0:
cursor = db.cursor()
strings = ['%s' for i in range(len(nombres_col.split(',')))]
sql_insert_query = """ insert into sifco_requests_""" + name + """ (""" + \
nombres_col + """ ) values ( """ + ','.join(
strings) + """) """
cursor.execute(sql_insert_query, values)
db.commit()
cursor.close()
return None
except (Exception, psycopg2.Error) as error:
raise BadRequestException("Falló la carga de los datos")
def get_trx_from_card(input):
# Retorna la cantidad de intentos de redención que ha hecho una tarjeta
keys = [column.lower() for column in input]
values = input.values()
input = dict(zip(keys, values))
if len(input) == 4:
input['nu_tarjeta_sha256'] = input.pop('card')
cursor = db.cursor()
query = """
select
count(tipo_trx)
from
sifco_requests_%s as sd
where
nu_tarjeta_sha256 = '%s'
and to_char(current_date, 'YYYY-MM-DD') =
to_char(to_timestamp(sd.fecha, 'YYYY-MM-DD'), 'YYYY-MM-DD')
""" % (input['name'], input['nu_tarjeta_sha256'])
cursor.execute(query)
results = cursor.fetchall()
cursor.close()
return results[0][0] |
# System imports
import os
import sys
import time
import utils
import argparse
# Local imports
from utils import ENGYN_CONFIG
parser = argparse.ArgumentParser(description = "This is a utility to create boilerplate apps")
create_args = parser.add_argument_group("create options")
create_args.add_argument('-c', '--create', action = 'store_true')
delete_args = parser.add_argument_group("delete options")
class app_generator:
def __init__(self, app_name, app_id, app_type):
self.app_name = app_name
self.app_id = app_id
self.app_type = app_type
self.err = 0
# Get filestem, remove awiros from front
if(self.app_name.startswith('awiros_')):
self.app_stem = app_name[7:]
def init(self):
print("Generating boilerplate ..")
self.gen_app_files()
self.entry_in_app_registry()
#self.integrate_app_with_engyn()
self.build()
self.run()
self.cleanup()
print("Done!")
def gen_app_files(self):
status = self.create_header_file()
if (status != 0):
self.err_exit("create_header_file")
status = self.create_cpp_file()
if (status != 0):
self.err_exit("create_cpp_file")
def entry_in_app_registry(self):
with open(ENGYN_CONFIG.SRC_DIR+'/app-registry.txt', 'a') as file:
file.write(str(self.app_id)+' '+self.app_stem)
def create_header_file(self):
os.mkdir(ENGYN_CONFIG.SRC_DIR+'/'+self.app_stem)
filepath = os.path.join(ENGYN_CONFIG.SRC_DIR, self.app_stem ,"{}.{}".format(self.app_stem, ENGYN_CONFIG.HEADER_EXT))
# create empty
status = utils.touch(filepath)
if(status != 0):
self.err = status
return status
# parse content
content = utils.parse(ENGYN_CONFIG.DEFAULT_HEADER)
# inject class name
content = content.replace(ENGYN_CONFIG.DEFAULT_APP_STEM, self.app_stem)\
.replace(ENGYN_CONFIG.DEFAULT_APP_STEM.upper(), self.app_stem.upper())
# parse content
status = utils.write_content(content, filepath)
if(status != 0):
self.err = status
return status
def create_cpp_file(self):
# create empty
filepath = os.path.join(ENGYN_CONFIG.SRC_DIR, self.app_stem,"{}.{}".format(self.app_stem, ENGYN_CONFIG.CPP_EXT))
status = utils.touch(filepath)
if(status != 0):
self.err = status
return status
# parse content
content = utils.parse(ENGYN_CONFIG.DEFAULT_CPP)
# inject class name
content = content.replace(ENGYN_CONFIG.DEFAULT_APP_STEM, self.app_stem)
# parse content
status = utils.write_content(content, filepath)
if(status != 0):
self.err = status
return status
def integrate_app_with_engyn(self):
status = self.parse_consts()
if (status != 0):
self.err_exit("parse_consts")
status = self.parse_handleacs()
if (status != 0):
self.err_exit("parse_handleacs")
status = self.parse_conversion_layer()
if (status != 0):
self.err_exit("parse_conversion_layer")
def parse_consts(self):
filepath = os.path.join(ENGYN_CONFIG.SRC_DIR, ENGYN_CONFIG.CONSTS_FILE)
# parse
content = utils.parse_content_to_list(filepath)
# locate closing line of struct
idx = utils.locate_element_with_substr(content, '};')
func_name = ENGYN_CONFIG.CONSTS_APP_PREFIX + self.app_stem.upper()
# add app number
content.insert(idx - 1, " {} = {},".format(func_name, self.app_id))
# consolidate
content = '\n'.join(map(str, content))
# write back
status = utils.write_content(content, filepath)
if (status != 0):
self.err = status
return status
def parse_handleacs(self):
filepath = os.path.join(ENGYN_CONFIG.SRC_DIR, ENGYN_CONFIG.HANDLEACS_FILE)
# parse
content = utils.parse_content_to_list(filepath)
# locate closing of swith case
idx = utils.locate_element_with_substr(content, 'default:')
func_name = ENGYN_CONFIG.CONSTS_APP_PREFIX + self.app_stem.upper()
# add switch case
content.insert(idx - 1, "\tbreak;".expandtabs(8))
content.insert(idx - 1,
"\tconv_layer_obj.start_app<{}>();".format(self.app_name).expandtabs(8))
content.insert(idx - 1, "\tcase {}:".format(func_name).expandtabs(4))
content.insert(idx - 1, "")
# consolidate
content = '\n'.join(map(str, content))
# write back
status = utils.write_content(content, filepath)
if (status != 0):
self.err = status
return status
def parse_conversion_layer(self):
filepath = os.path.join(ENGYN_CONFIG.SRC_DIR, ENGYN_CONFIG.CONVERSION_FILE)
# parse
content = utils.parse_content_to_list(filepath)
# locate
idx = utils.locate_element_with_substr(content, '#include', last_occ = True)
# add header include
content.insert(idx + 1, '#include "{}/{}.{}"'.format(ENGYN_CONFIG.SRC_DIR, self.app_stem, ENGYN_CONFIG.HEADER_EXT))
# consolidate
content = '\n'.join(map(str, content))
# write back
status = utils.write_content(content, filepath)
if (status != 0):
self.err = status
return status
def build(self):
return 1
def run(self):
return 1
def cleanup(self):
return 1
def perror(self, msg):
print("error: {}".format(msg))
def err_exit(self, msg):
self.perror(msg)
self.cleanup()
quit()
if __name__ == "__main__":
"""
Get necessary variables from user
"""
app_name = "awiros_" + raw_input("Enter app name: awiros_")
app_id = input("Enter app ID: ")
app_type = raw_input("Enter app type (Defualt: basic): ")
if(len(app_type) == 0):
app_type = "basic"
app = app_generator(app_name, app_id, app_type)
#app = app_generator("awiros_test", 1000, "basic")
app.init()
|
<gh_stars>0
import numpy as np
from numpy.matlib import rand,zeros,ones,empty,eye
import scipy
class matrixops():
def __init__(self):
self.LnDetPsi = None
self.Psi = np.zeros((self.n,self.n), dtype=np.float)
self.psi = np.zeros((self.n,1))
self.one = np.ones(self.n)
self.mu = None
self.U = None
self.SigmaSqr = None
self.Lambda = 1
self.updateData()
def updateData(self):
self.distance = np.zeros((self.n,self.n, self.k))
for i in range(self.n):
for j in range(i+1,self.n):
self.distance[i,j]= np.abs((self.X[i]-self.X[j]))
def updatePsi(self):
self.Psi = np.zeros((self.n,self.n), dtype=np.float)
self.one = np.ones(self.n)
self.psi = np.zeros((self.n,1))
newPsi = np.exp(-np.sum(self.theta*np.power(self.distance,self.pl), axis=2))
self.Psi = np.triu(newPsi,1)
self.Psi = self.Psi + self.Psi.T + np.mat(eye(self.n))+np.multiply(np.mat(eye(self.n)),np.spacing(1))
self.U = np.linalg.cholesky(self.Psi)
self.U = self.U.T
def regupdatePsi(self):
self.Psi = np.zeros((self.n,self.n), dtype=np.float)
self.one = np.ones(self.n)
self.psi = np.zeros((self.n,1))
newPsi = np.exp(-np.sum(self.theta*np.power(self.distance,self.pl), axis=2))
self.Psi = np.triu(newPsi,1)
self.Psi = self.Psi + self.Psi.T + eye(self.n) + eye(self.n) * (self.Lambda)
self.U = np.linalg.cholesky(self.Psi)
self.U = np.matrix(self.U.T)
def neglikelihood(self):
self.LnDetPsi=2*np.sum(np.log(np.abs(np.diag(self.U))))
a = np.linalg.solve(self.U.T, self.one.T)
b = np.linalg.solve(self.U, a)
c = self.one.T.dot(b)
d = np.linalg.solve(self.U.T, self.y)
e = np.linalg.solve(self.U, d)
self.mu=(self.one.T.dot(e))/c
self.SigmaSqr = ((self.y-self.one.dot(self.mu)).T.dot(np.linalg.solve(self.U,np.linalg.solve(self.U.T,(self.y-self.one.dot(self.mu))))))/self.n
self.NegLnLike=-1.*(-(self.n/2.)*np.log(self.SigmaSqr) - 0.5*self.LnDetPsi)
def regneglikelihood(self):
self.LnDetPsi=2*np.sum(np.log(np.abs(np.diag(self.U))))
mu=(self.one.T.dot(np.linalg.solve(self.U, np.linalg.solve(self.U.T, self.y))))/self.one.T.dot(np.linalg.solve(self.U, np.linalg.solve(self.U.T, self.one)))
self.mu=mu
self.SigmaSqr = ((self.y-self.one.dot(self.mu)).T.dot(np.linalg.solve(self.U,np.linalg.solve(self.U.T,(self.y-self.one.dot(self.mu))))))/self.n
self.NegLnLike=-1.*(-(self.n/2.)*np.log(self.SigmaSqr) - 0.5*self.LnDetPsi)
def predict_normalized(self,x):
"""
Comments!
"""
for i in range(self.n):
self.psi[i]=np.exp(-np.sum(self.theta*np.power((np.abs(self.X[i]-x)),self.pl)))
z = self.y-self.one.dot(self.mu)
a = np.linalg.solve(self.U.T, z)
b=np.linalg.solve(self.U, a)
c=self.psi.T.dot(b)
f=self.mu + c
return f[0]
def predicterr_normalized(self,x):
for i in range(self.n):
try:
self.psi[i]=np.exp(-np.sum(self.theta*np.power((np.abs(self.X[i]-x)),self.pl)))
except Exception as e:
print(Exception,e)
try:
SSqr=self.SigmaSqr*(1-self.psi.T.dot(np.linalg.solve(self.U, np.linalg.solve(self.U.T,self.psi))))
except Exception as e:
print(self.U.shape)
print(self.SigmaSqr.shape)
print(self.psi.shape)
print(Exception,e)
pass
SSqr = np.abs(SSqr[0])
return np.power(SSqr,0.5)[0]
def regression_predicterr_normalized(self,x):
for i in range(self.n):
try:
self.psi[i]=np.exp(-np.sum(self.theta*np.power((np.abs(self.X[i]-x)),self.pl)))
except Exception as e:
print(Exception,e)
try:
SSqr=self.SigmaSqr*(1+self.Lambda-self.psi.T.dot(np.linalg.solve(self.U, np.linalg.solve(self.U.T,self.psi))))
except Exception as e:
print(Exception,e)
pass
SSqr = np.abs(SSqr[0])
return np.power(SSqr,0.5)[0] |
<reponame>TaoZheng9/br<filename>trainPMOriginEval.py
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import time
import torch
from options.train_options import TrainOptions
from data.data_loader import CreateDataLoader
from models.customBM_models import ModelBuilder
from models.modelRatioPMOrigin2in_audioVisual import AudioVisualModel
from torch.autograd import Variable
from tensorboardX import SummaryWriter
def create_optimizer(nets, opt):
(net_visual, net_audio) = nets
param_groups = [{'params': net_visual.parameters(), 'lr': opt.lr_visual},
{'params': net_audio.parameters(), 'lr': opt.lr_audio}]
if opt.optimizer == 'sgd':
return torch.optim.SGD(param_groups, momentum=opt.beta1, weight_decay=opt.weight_decay)
elif opt.optimizer == 'adam':
return torch.optim.Adam(param_groups, betas=(opt.beta1,0.999), weight_decay=opt.weight_decay)
def decrease_learning_rate(optimizer, decay_factor=0.94):
for param_group in optimizer.param_groups:
param_group['lr'] *= decay_factor
#used to display validation loss
def display_val(model, loss_criterion, writer, index, dataset_val, opt):
batch_loss = []
batch_mse_loss = []
batch_bm_loss = []
batch_phase_loss = []
with torch.no_grad():
for i, val_data in enumerate(dataset_val):
if i < opt.validation_batches:
output = model.forward(val_data)
mse_loss = loss_criterion(output['predicted_spectrogram'], output['audio_gt'])
bm_loss = loss_criterion(output['BM_pred'], output['BM_gt'])
phase_loss = loss_criterion(output['pred_ratio_phase'], output['ratio_phase_gt'])
loss=bm_loss+phase_loss # mse_loss+
batch_phase_loss.append(phase_loss)
batch_loss.append(loss.item())
batch_mse_loss.append(mse_loss.item())
batch_bm_loss.append(bm_loss.item())
else:
break
avg_loss = sum(batch_loss)/len(batch_loss)
avg_mse_loss = sum(batch_mse_loss)/len(batch_mse_loss)
avg_bm_loss = sum(batch_bm_loss)/len(batch_bm_loss)
avg_phase_loss = sum(batch_phase_loss)/len(batch_phase_loss)
if opt.tensorboard:
writer.add_scalar('data/val_loss', avg_loss, index)
writer.add_scalar('data/mse_loss', avg_mse_loss, index)
writer.add_scalar('data/bm__loss', avg_bm_loss, index)
print('val loss: %.3f' % avg_loss)
print('val mse loss: %.3f' % avg_mse_loss)
print('val bm loss: %.3f' % avg_bm_loss)
print('val ph loss: %.3f' % avg_phase_loss)
return avg_loss
def display_otherMetric(model, writer, dataset_val, opt):
eps=1e-8
eps2=1e-4
magBiases = []
logMagBiases = []
magBiases2 = []
logMagBiases2 = []
with torch.no_grad():
for i, val_data in enumerate(dataset_val):
if i < opt.validation_batches:
output = model.forward(val_data)
# B C F T
#print("shape of output:",output['BM_pred'].shape,output['BM_gt'].shape)
one=torch.ones(1).to(opt.device)
magM=((val_data['audio_input_spec'][:,0:1]**2+val_data['audio_input_spec'][:,1:]**2)[:,:,:-1]**0.5).to(opt.device)# input 0:1 1: prevent B C F t to be BFT
#print("max min magM",magM.max(),magM.min())
logMagM=torch.log1p(magM)
magHatD=(output['BM_pred'])
magGtD=(output['BM_gt'])
#print("shape of mag:",magM.shape,magHatD.shape,magGtD.shape)
#print("device:",one.device,magM.device,magHatD.device,magGtD.device)
FtBias=torch.zeros_like(magHatD)
FtBias[magHatD<=magGtD]=(one-magHatD/magGtD)[magHatD<=magGtD]
FtBias[magHatD>magGtD]=(magGtD/magHatD-one)[magHatD>magGtD]
logMagBias=torch.sum(logMagM*(FtBias),dim=[2,3])/torch.sum(logMagM,dim=[2,3])
#print("shape of weighted average bias",logMagBias.shape)
logMagBias=torch.mean(logMagBias)
#print("shape of mean of weighted average bias",logMagBias.shape)
magBias=torch.sum(magM*(FtBias),dim=[2,3])/torch.sum(magM,dim=[2,3])
magBias=torch.mean(magBias)
magBiases.append(magBias.item())
logMagBiases.append(logMagBias.item())
else:
break
avg_loss = sum(magBiases)/len(magBiases)
avg_logloss = sum(logMagBiases)/len(logMagBiases)
if opt.tensorboard:
writer.add_scalar('data/val_loss', avg_loss)
print('bias log: %.3f' % avg_logloss)
print('bias: %.3f' % avg_loss)
return avg_loss
#parse arguments
opt = TrainOptions().parse()
opt.device = torch.device("cuda")
#construct data loader
data_loader = CreateDataLoader(opt)
dataset = data_loader.load_data()
dataset_size = len(data_loader)
print('#training clips = %d' % dataset_size)
#create validation set data loader if validation_on option is set
if opt.validation_on:
#temperally set to val to load val data
opt.mode = 'val'
data_loader_val = CreateDataLoader(opt)
dataset_val = data_loader_val.load_data()
dataset_size_val = len(data_loader_val)
print('#validation clips = %d' % dataset_size_val)
opt.mode = 'train' #set it back
if opt.tensorboard:
from tensorboardX import SummaryWriter
writer = SummaryWriter(comment=opt.name)
else:
writer = None
# network builders
builder = ModelBuilder()
net_visual = builder.build_visual(weights=opt.weights_visual)
net_audio = builder.build_audio(
ngf=opt.unet_ngf,
input_nc=opt.unet_input_nc,
output_nc=opt.unet_output_nc,
weights=opt.weights_audio,relu=True,sigmoid=False,batch_norm=True)
nets = (net_visual, net_audio)
# construct our audio-visual model
model = AudioVisualModel(nets, opt)
model = torch.nn.DataParallel(model, device_ids=opt.gpu_ids)
model.to(opt.device)
# set up optimizer
optimizer = create_optimizer(nets, opt)
# set up loss function
loss_criterion = torch.nn.MSELoss()
if(len(opt.gpu_ids) > 0):
loss_criterion.cuda(opt.gpu_ids[0])
# initialization
total_steps = 0
data_loading_time = []
model_forward_time = []
model_backward_time = []
batch_loss = []
batch_mse_loss = []
batch_bm_loss = []
batch_phase_loss = []
best_err = float("inf")
for epoch in range(1, opt.niter+1):
torch.cuda.synchronize()
epoch_start_time = time.time()
if(opt.measure_time):
iter_start_time = time.time()
for i, data in enumerate(dataset):
if(opt.measure_time):
torch.cuda.synchronize()
iter_data_loaded_time = time.time()
total_steps += opt.batchSize
# forward pass
model.zero_grad()
output = model.forward(data)
# compute loss
mse_loss = loss_criterion(output['predicted_spectrogram'], output['audio_gt'])
bm_loss = loss_criterion(output['BM_pred'], output['BM_gt'])
phase_loss = loss_criterion(output['pred_ratio_phase'], output['ratio_phase_gt'])
loss=bm_loss+phase_loss # mse_loss
batch_phase_loss.append(phase_loss)
batch_loss.append(loss.item())
batch_mse_loss.append(mse_loss.item())
batch_bm_loss.append(bm_loss.item())
if(opt.measure_time):
torch.cuda.synchronize()
iter_data_forwarded_time = time.time()
# update optimizer
optimizer.zero_grad()
loss.backward()
optimizer.step()
if(opt.measure_time):
iter_model_backwarded_time = time.time()
data_loading_time.append(iter_data_loaded_time - iter_start_time)
model_forward_time.append(iter_data_forwarded_time - iter_data_loaded_time)
model_backward_time.append(iter_model_backwarded_time - iter_data_forwarded_time)
if(total_steps // opt.batchSize % opt.display_freq == 0):
print('Display training progress at (epoch %d, total_steps %d)' % (epoch, total_steps))
avg_loss = sum(batch_loss) / len(batch_loss)
print('Average loss: %.3f' % (avg_loss))
batch_loss = []
avg_loss = sum(batch_mse_loss) / len(batch_mse_loss)
print('mse loss: %.3f' % (avg_loss))
batch_mse_loss = []
avg_loss = sum(batch_bm_loss) / len(batch_bm_loss)
print('Bratio loss: %.3f' % (avg_loss))
avg_loss = sum(batch_phase_loss) / len(batch_phase_loss)
print('Pratio loss: %.3f' % (avg_loss))
batch_bm_loss = []
if opt.tensorboard:
writer.add_scalar('data/loss', avg_loss, total_steps)
if(opt.measure_time):
print('average data loading time: ' + str(sum(data_loading_time)/len(data_loading_time)))
print('average forward time: ' + str(sum(model_forward_time)/len(model_forward_time)))
print('average backward time: ' + str(sum(model_backward_time)/len(model_backward_time)))
data_loading_time = []
model_forward_time = []
model_backward_time = []
# print('end of display \n')
if(total_steps // opt.batchSize % opt.save_latest_freq == 0):
print('saving the latest model (epoch %d, total_steps %d)' % (epoch, total_steps))
torch.save(net_visual.state_dict(), os.path.join('.', opt.checkpoints_dir, opt.name, 'visual_latest.pth'))
torch.save(net_audio.state_dict(), os.path.join('.', opt.checkpoints_dir, opt.name, 'audio_latest.pth'))
if(epoch % opt.validation_freq == (opt.validation_freq-1) and opt.validation_on):
print("begin evaluation")
model.eval()
opt.mode = 'val'
print('Display validation results at (epoch %d, total_steps %d)' % (epoch, total_steps))
val_err = display_val(model, loss_criterion, writer, total_steps, dataset_val, opt)
print('end of display \n')
model.train()
opt.mode = 'train'
#save the model that achieves the smallest validation error
if val_err < best_err:
best_err = val_err
print('saving the best model (epoch %d, total_steps %d) with validation error %.3f\n' % (epoch, total_steps, val_err))
torch.save(net_visual.state_dict(), os.path.join('.', opt.checkpoints_dir, opt.name, 'visual_best.pth'))
torch.save(net_audio.state_dict(), os.path.join('.', opt.checkpoints_dir, opt.name, 'audio_best.pth'))
print("\r\n eval bias in the best model")
display_otherMetric(model, writer, dataset_val, opt)
if(opt.measure_time):
iter_start_time = time.time()
if(epoch % opt.save_epoch_freq == 0):
print('saving the model at the end of epoch %d, total_steps %d' % (epoch, total_steps))
torch.save(net_visual.state_dict(), os.path.join('.', opt.checkpoints_dir, opt.name, str(epoch) + '_visual.pth'))
torch.save(net_audio.state_dict(), os.path.join('.', opt.checkpoints_dir, opt.name, str(epoch) + '_audio.pth'))
#decrease learning rate 6% every opt.learning_rate_decrease_itr epochs
if(opt.learning_rate_decrease_itr > 0 and epoch % opt.learning_rate_decrease_itr == 0):
decrease_learning_rate(optimizer, opt.decay_factor)
print('decreased learning rate by ', opt.decay_factor)
|
import os
import unittest
from decimal import Decimal
from .common import JinsiTestCase
class JinsiExamples(JinsiTestCase):
def test_simple_template(self):
doc = """\
::let:
user:
::object:
- ::titlecase:
::get: $user.username
- Type: AWS::IAM::User
Properties:
UserName:
::get: $user.username
Groups:
- Administrators
LoginProfile:
Password:
::get: $user.password
::else: default
PasswordResetRequired: Yes
users:
::merge:
::each $ as $user:
::call user:
Resources:
::call users:
- username: jim
password: <PASSWORD>
- username: jack
password: <PASSWORD>
- username: johnny
"""
expected = {
"Resources": {
"Jim": {
"Type": "AWS::IAM::User",
"Properties": {
"UserName": "jim",
"Groups": ["Administrators"],
"LoginProfile": {
"Password": "<PASSWORD>",
"PasswordResetRequired": True
}
}
},
"Jack": {
"Type": "AWS::IAM::User",
"Properties": {
"UserName": "jack",
"Groups": ["Administrators"],
"LoginProfile": {
"Password": "<PASSWORD>",
"PasswordResetRequired": True
}
}
},
"Johnny": {
"Type": "AWS::IAM::User",
"Properties": {
"UserName": "johnny",
"Groups": ["Administrators"],
"LoginProfile": {
"Password": "<PASSWORD>",
"PasswordResetRequired": True
}
}
}
}
}
self.check(expected, doc)
def test_example_0(self):
doc = """\
::let:
user:
::object:
- ::titlecase:
::get: $user.username
- Type: AWS::IAM::User
Properties:
UserName:
::get: $user.username
Groups:
- Administrators
LoginProfile:
Password:
::get: $user.password
::else: default
PasswordResetRequired: Yes
users:
::merge:
::each $ as $user:
::call user:
Resources:
::call users:
- username: jim
password: <PASSWORD>
- username: jack
password: <PASSWORD>
- username: johnny
"""
expected = {
'Resources': {
'Jim': {
'Type': 'AWS::IAM::User',
'Properties': {
'UserName': 'jim', 'Groups': ['Administrators'],
'LoginProfile': {
'Password': '<PASSWORD>',
'PasswordResetRequired': True
}}},
'Jack': {
'Type': 'AWS::IAM::User',
'Properties': {
'UserName': 'jack', 'Groups': ['Administrators'],
'LoginProfile': {
'Password': '<PASSWORD>',
'PasswordResetRequired': True
}}},
'Johnny': {
'Type': 'AWS::IAM::User',
'Properties': {
'UserName': 'johnny', 'Groups': ['Administrators'],
'LoginProfile': {
'Password': '<PASSWORD>',
'PasswordResetRequired': True
}}}
}
}
self.check(expected, doc)
def test_example_1(self):
doc = """\
::let:
name: XMLRpcParser_Main
docs:
- ::snakecase: <<name>>
- ::uppercase:
::snakecase:
::get: name
- ::uppercase:
::kebabcase:
::get: name
- ::kebabcase:
::get: name
- ::titlecase:
::get: name
- ::camelcase:
::get: name
- ::titlecase: XmlRPCProcessor_23
- ::div:
- 1
- 7
- ::div:
- 355
- 113
- ::div:
- 2.7
- 3.01
"""
expected = {
'docs': [
'xml_rpc_parser_main',
'XML_RPC_PARSER_MAIN',
'XML-RPC-PARSER-MAIN',
'xml-rpc-parser-main',
'XmlRpcParserMain',
'xmlRpcParserMain',
'XmlRpcProcessor23',
Decimal('0.1428571428571428571428571429'),
Decimal('3.141592920353982300884955752'),
Decimal('0.8970099667774086378737541528')
]
}
self.check(expected, doc, dezimal_foo=True)
def test_example_2(self):
doc = """\
::let:
x: foo
y: bar
$x: qux
$y: quuz
template:
- ::get: x
- ::get: y
- ::get: $x
- ::get: $y
- ::get: $z
formatted: hello <<x>> woohoo <<y>> yeah <<$x>> and <<$y>>
cool:
some-<<x>>: <<y>>
woohoo: <<x>>/<<y>>
list:
- ::get: x
- ::get: y
- ::get: $x
- ::get: $y
- ::get: JINSI_TEST_SHELL
- ::get: JINSI_TEST_HOSTNAME
::else: unknown
x:
::let:
something:
::get: $m
y:
- ::get: something
::let:
$m: 3
::else:
::get: $z
- ::get: $q
::else: All okay.
applied:
::let:
x: keyfoo
y: keybar
$x: keyqux
$y: keyquuz
$z: zeeee
::call template:
$y: callquuz
applied2:
::call: template
"""
expected = {
'formatted': 'hello foo woohoo bar yeah qux and quuz',
'cool': {
'some-foo': 'bar',
'woohoo': 'foo/bar',
},
'list': [
'foo',
'bar',
'qux',
'quuz',
'/bin/hash',
'unknown',
],
'x': {
'y': [
3.0,
'All okay.',
]
},
'applied': [
'foo',
'bar',
'keyqux',
'callquuz',
'zeeee',
],
'applied2': [
'foo',
'bar',
'qux',
'quuz',
'zzz',
],
}
os.environ['JINSI_TEST_SHELL'] = '/bin/hash'
self.check(expected, doc, args={'z': 'zzz'})
def test_example_3(self):
doc = """\
::let:
users:
::each $ as $account:
::object:
- ::get: $account
- Properties:
Some: props
robousers:
::each $ as $account:
::object:
- ::get: $account
- Properties:
Some: props
Resources:
::call users:
- jane
- john
::call robousers:
- ava
- hal
a: b
c: d
"""
expected = {
'Resources': {
'jane': {
'Properties': {
'Some': 'props'
}
},
'john': {
'Properties': {
'Some': 'props'
}
}, 'ava': {
'Properties': {
'Some': 'props'
}
}, 'hal': {
'Properties': {
'Some': 'props'
}
},
'a': 'b',
'c': 'd',
}
}
self.check(expected, doc)
def test_example_4(self):
doc = """\
::let:
xs:
- 1
- 2
- 7
- 3
- 2
- 9
- 4
- 2
- 5
doc:
ys:
::get: xs
zs:
::let:
x:
::explode:
- " "
- hello world out there
::get: x
qs:
::let:
xs:
::explode:
- " "
- hello out there
::each xs as x:
::get: x
"""
expected = {
'doc': {
'ys': [1, 2, 7, 3, 2, 9, 4, 2, 5],
'zs': ['hello', 'world', 'out', 'there'],
'qs': ['hello', 'out', 'there'],
}
}
self.check(expected, doc)
def test_example_5(self):
doc = """\
::let:
fib:
::when:
::get: $n == 0 or $n == 1
::then:
::get: $n
::else:
::add:
- ::call fib:
$n:
::get: $n - 1
- ::call fib:
$n:
::get: $n - 2
fibs:
::range_exclusive:
- 0
- ::get: $max
::else: 10
result:
::each fibs as $n:
::call: fib
"""
expected = {
'result': [
0,
1,
1,
2,
3,
5,
8,
13,
21,
34,
]
}
self.check(expected, doc)
def test_example_6(self):
doc = """\
result:
::any:
- ::when: false
::then: 1
- ::when: false
::then: 2
- 7
result2:
::any:
- []
- {}
- false
- null
- 1
- 2
- 3
"""
expected = {
'result': 7,
'result2': 1,
}
self.check(expected, doc)
if __name__ == '__main__':
unittest.main()
|
<filename>tests/functional/conftest.py
import time
import pytest
from scrapli_netconf.driver.async_driver import AsyncNetconfDriver
from scrapli_netconf.driver.sync_driver import NetconfDriver
from ..test_data.devices import DEVICES, PRIVATE_KEY
NETCONF_1_0_DEVICE_TYPES = ["cisco_iosxe_1_0", "juniper_junos_1_0"]
NETCONF_1_1_DEVICE_TYPES = ["cisco_iosxe_1_1", "cisco_iosxr_1_1"]
NETCONF_ALL_VERSIONS_DEVICE_TYPES = NETCONF_1_0_DEVICE_TYPES + NETCONF_1_1_DEVICE_TYPES
@pytest.fixture(
scope="session",
params=NETCONF_1_0_DEVICE_TYPES,
)
def device_type_1_0(request):
yield request.param
@pytest.fixture(
scope="session",
params=NETCONF_1_1_DEVICE_TYPES,
)
def device_type_1_1(request):
yield request.param
@pytest.fixture(
scope="session",
params=NETCONF_ALL_VERSIONS_DEVICE_TYPES,
)
def device_type(request):
yield request.param
@pytest.fixture(scope="class", params=["system", "ssh2", "paramiko"])
def transport(request):
yield request.param
@pytest.fixture(scope="session", params=["password"])
def auth_type(request):
yield request.param
@pytest.fixture(scope="function")
def sync_conn_1_0(device_type_1_0, auth_type, transport):
device = DEVICES[device_type_1_0].copy()
if auth_type == "key":
device.pop("auth_password")
device["auth_private_key"] = PRIVATE_KEY
conn = NetconfDriver(**device, transport=transport)
yield conn, device_type_1_0
if conn.isalive():
conn.close()
# at the very least iosxr vm seems to not handle back to back to back connections very well
# a small sleep seems to appease it
time.sleep(1)
@pytest.fixture(scope="function")
async def async_conn_1_0(device_type_1_0, auth_type):
device = DEVICES[device_type_1_0].copy()
device["transport"] = "asyncssh"
if auth_type == "key":
device.pop("auth_password")
device["auth_private_key"] = PRIVATE_KEY
conn = AsyncNetconfDriver(**device)
yield conn, device_type_1_0
if conn.isalive():
await conn.close()
# at the very least iosxr vm seems to not handle back to back to back connections very well
# a small sleep seems to appease it
time.sleep(1)
@pytest.fixture(scope="function")
def sync_conn_1_1(device_type_1_1, auth_type, transport):
device = DEVICES[device_type_1_1].copy()
if auth_type == "key":
device.pop("auth_password")
device["auth_private_key"] = PRIVATE_KEY
conn = NetconfDriver(**device, transport=transport)
yield conn, device_type_1_1
if conn.isalive():
conn.close()
# at the very least iosxr vm seems to not handle back to back to back connections very well
# a small sleep seems to appease it
time.sleep(1)
@pytest.fixture(scope="function")
async def async_conn_1_1(device_type_1_1, auth_type):
device = DEVICES[device_type_1_1].copy()
device["transport"] = "asyncssh"
if auth_type == "key":
device.pop("auth_password")
device["auth_private_key"] = PRIVATE_KEY
conn = AsyncNetconfDriver(**device)
yield conn, device_type_1_1
if conn.isalive():
await conn.close()
# at the very least iosxr vm seems to not handle back to back to back connections very well
# a small sleep seems to appease it
time.sleep(1)
@pytest.fixture(scope="function")
def sync_conn(device_type, auth_type, transport):
device = DEVICES[device_type].copy()
if auth_type == "key":
device.pop("auth_password")
device["auth_private_key"] = PRIVATE_KEY
conn = NetconfDriver(**device, transport=transport)
yield conn, device_type
if conn.isalive():
conn.close()
# at the very least iosxr vm seems to not handle back to back to back connections very well
# a small sleep seems to appease it
time.sleep(1)
@pytest.fixture(scope="function")
async def async_conn(device_type, auth_type):
device = DEVICES[device_type].copy()
device["transport"] = "asyncssh"
if auth_type == "key":
device.pop("auth_password")
device["auth_private_key"] = PRIVATE_KEY
conn = AsyncNetconfDriver(**device)
yield conn, device_type
if conn.isalive():
await conn.close()
# at the very least iosxr vm seems to not handle back to back to back connections very well
# a small sleep seems to appease it
time.sleep(1)
|
"""A module for compute scheduling."""
#pylint: disable=too-many-instance-attributes, no-self-use, missing-docstring
import networkx as nx
import matplotlib.pyplot as plt
from ordered_set import OrderedSet
from copy import deepcopy
from .tvm import tensor
from .tvm import make as _make
from .tvm import stmt as _stmt
from .tvm import expr as _expr
from .tvm import api as tvm_api
from .tvm import _api_internal
from .tvm._api_internal import _ExternOp
from .debug import DSLError, APIError
from . import util
from .devices import Device, DevMediaPair
from itertools import count
class Schedule(object):
"""Create a compute schedule.
This is a wrapper class for :obj:`tvm.schedule._Schedule`.
Parameters
----------
sch : tvm.schedule._Schedule
The TVM schedule
inputs : list of Tensor
Tensors that are the inputs to the schedule
"""
stage_ops = []
last_stages = OrderedSet([])
_ids = count(0)
def __init__(self, sch, inputs, name=""):
self.id = next(self._ids)
self.sch = sch
self.inputs = inputs
self.placement = dict()
if self.id > 0 and name == "":
self.name = "s{}".format(self.id)
else:
self.name = name
def __getitem__(self, stage):
try:
return self.sch[stage._op]
except AttributeError:
return self.sch[stage.op]
def dataflow_graph(self, stages=None, level=0, plot=False):
"""Create a dataflow graph for a given schedule.
Parameters
----------
stages : list of Stage, optional
The finals stages in the graph. If not specified, draw all the
stages
level : int, optional
The level of stages to draw. If not specified, draw to the
inner-most stages
plot : bool, optional
Whether draw the graph with ``matplotlib`` or not
Returns
-------
networkx.DiGraph
A directional graph that describes the dataflow
"""
graph = nx.DiGraph()
level_count = [0]
op_map = dict()
pos = {}
def gen_graph(stage, y):
names = []
for input_stage in stage.input_stages:
if len(level_count) == y:
level_count.append(0)
names += gen_graph(input_stage, y+1)
name_with_prefix = stage.name_with_prefix
# op_map from string to tensor op
op_map[name_with_prefix] = self.sch[stage._op]
if len(name_with_prefix.split('.')) <= level or level == 0:
for name in names:
# insert intermediate stage
if name in self.placement.keys():
channel, new_stage, dev = self.placement[name]
op_map[channel.op.name] = channel
graph.add_edge(name, channel.op.name)
pos[name] = (level_count[y], y)
op_map[new_stage.op.name] = new_stage
graph.add_edge(channel.op.name, new_stage.op.name)
pos[channel.op.name] = (level_count[y], y)
graph.add_edge(new_stage.op.name, name_with_prefix)
pos[new_stage.op.name] = (level_count[y], y)
if plot:
print(name_with_prefix, "<==", new_stage.op.name, "<==", \
channel.op.name, "<==", name)
elif name.replace("_top.", "") in self.placement.keys():
channel, new_stage, dev = self.placement[name.replace("_top.", "")]
op_map[channel.op.name] = channel
graph.add_edge(name, channel.op.name)
pos[name] = (level_count[y], y)
op_map[new_stage.op.name] = new_stage
graph.add_edge(channel.op.name, new_stage.op.name)
pos[channel.op.name] = (level_count[y], y)
graph.add_edge(new_stage.op.name, name_with_prefix)
pos[new_stage.op.name] = (level_count[y], y)
if plot:
print(name_with_prefix, "<==", new_stage.op.name, "<==", \
channel.op.name, "<==", name)
# add children nodes to graph
else:
if plot:
print(name_with_prefix, " <=== ", name)
graph.add_edge(name, name_with_prefix)
pos[name] = (level_count[y], y)
level_count[y] += 1
return [name_with_prefix]
return names
if stages is None:
stages = Schedule.last_stages
else:
if not isinstance(stages, (tuple, list)):
stages = [stages]
x = 0
for stage in stages:
gen_graph(stage, 1)
pos[stage.name_with_prefix] = (x, 0)
x += 1
if plot: # draw the network
try:
from networkx.drawing.nx_agraph import graphviz_layout
except ImportError:
raise ImportError("Graphviz and either PyGraphviz or Pydot required")
pos=graphviz_layout(graph)
nx.draw(graph, pos, with_labels=True)
plt.show()
return graph, op_map
def subgraph(self, inputs, outputs):
assert len(inputs) > 0, "empty inputs"
assert len(outputs) > 0, "empty outputs"
# check availability
graph, op_map = self.dataflow_graph()
inputs = [ _.name for _ in inputs ]
outputs = [ _.name for _ in outputs ]
# from root to parents
stack = deepcopy(outputs)
subgraph = list()
while len(stack) > 0:
op = stack.pop()
if op in subgraph: continue
if op not in outputs:
subgraph.insert(0, op)
if op not in graph.nodes:
op = "_top." + op
assert op in graph.nodes, \
"cannot find node " + op + " in " + str(graph.nodes)
for _ in graph.predecessors(op):
if not op in inputs:
stack.append(_)
subgraph = OrderedSet(subgraph)
return subgraph, op_map
def duplicate(self, inputs, outputs, factor=2):
"""Extract kernel and duplicate the compute unit"""
subgraph, op_map = self.subgraph(inputs, outputs)
# combine the stages in subgraph
for index in range(len(subgraph)):
if index == len(subgraph) - 1: break;
pre_stage = op_map[subgraph[index]]
post_stage = op_map[subgraph[index+1]]
axis_num = len(post_stage.op.axis)
axis = post_stage.op.axis[axis_num-1]
pre_stage.compute_at(post_stage, axis)
# split kernel
post_stage.split(post_stage.op.axis[0], factor)
return post_stage
def reuse_at(self, target, parent, axis, name=None):
"""Create a reuse buffer reusing the output of current stage
This returns a new tensor representing the reuse buffer. A stage
is also built correspondingly. The new stage will be a sub-stage of
the parent stage under the specified axis. Thus, the axis must be
inside the axis list of the parent stage.
Parameters
----------
target : Tensor
The tensor whose values will be reused
parent : Stage
The stage that reuses the output of the current stage
axis : IterVar
The axis that generates the reuse values
name : string, optional
The name of the reuse buffer
Returns
-------
Tensor
"""
try:
target = target.tensor
except (AttributeError, ValueError):
try:
target = target._op
except AttributeError:
pass
if name is None:
name = target.name + ".reuse"
return self.sch.reuse_at(target, parent, axis, name)
def join(self, srcs, dest=None):
""" join multiple tensors to single dest """
assert len(srcs) > 0, "joined tensors should be " + \
"collectde from more than one srcs"
# create channels and collector stage
if dest is not None:
if isinstance(dest, tuple):
dest, target = dest
dest = self[dest]
elif isinstance(dest, Stage):
target = dest._op
elif isinstance(dest, tuple):
src, target = dest
else: # target tensor
target = dest.tensor
else: target = dest
for src in srcs:
if isinstance(src, tuple):
src, tensor = src
assert tensor == target, + \
"inconsistent tensor joining"
self.sch.join(target, dest, self[src])
def fork(self, tensor, dests, axis=0):
""" fork tensor to multiple dests """
assert len(dests) > 0, "forked tensor should be " + \
"broadcast to more than one dest"
# dest as tvm stages
for dest in dests:
self.to(tensor, self[dest])
def to(self, tensors, dst, src=None, axis=0,
mode=_expr.IO.DMA, depth=1, local_buffer=True, name=None):
"""Stream a list of Tensors to dst devices
Parameters
----------
tensors : list of Tensor
The tensors to be moved
dst : device or stage
The destination of data movement
src : device or stage
The source of data movement
axis : axis index
Move axis-th loop body to xcel scope
mode : data movement type
The modes of data movement (FIFO, DMA, MMIO)
For inter-kernel data movemnet, only FIFO is supported
depth : channel depth
The streaming channel depth
local_buffer : boolean
create local buffer for data on-device
"""
if mode not in [ _expr.IO.DMA, _expr.IO.FIFO ]:
raise APIError("Invalid channel type")
rets = list()
if not isinstance(tensors, list):
tensors = [tensors]
for tensor in tensors:
try:
if isinstance(tensor, Stage):
target = tensor._op
# unpack tuple of src stage and tensor
elif isinstance(tensor, tuple):
src, target = tensor
# from hcl stage to tvm stage
src = self.__getitem__(src)
else: # target tensor
target = tensor.tensor
except (AttributeError, ValueError):
target = tensor
# convert hcl stage
try: dst = self[dst]
except: pass
move_to_device = False
if src is None:
# move to device
if isinstance(dst, Device) or \
isinstance(dst, DevMediaPair):
if axis == 0:
move_to_device = True
else: # inner-stage movement
assert isinstance(tensor, Stage)
target = self[tensor]
else: # inter-stage
src = self[tensor]
# target can be stage or tensor
ret = self.sch.to(target, dst, src, axis, mode, depth, local_buffer)
# record the placement information
if move_to_device:
channel, ret = ret
self.placement[target.name] = \
(self.__getitem__(channel), \
self.__getitem__(ret), dst)
rets.append(ret)
if len(rets) == 1: return rets[0]
else: return rets
def partition(self, target, partition_type=_stmt.Partition.Complete, dim=0, factor=0):
"""Partition a Tensor into smaller Tensors or even registers
Users can specify the partition type, which includes Complete, Block,
and Cyclic. The default type is Complete, which means we completely
partition the specified dimension. If Block is specified, the tensor
is partitioned into N blocks with equal size. The number N is specified
by the factor. Otherwise, if Cyclic is specified, the elements of the
tensor is partition in a cyclic manner. For example, if the factor is
three, the 1st element will be assigned to the 1st partitioned tensor;
the 2nd element will be assigned to the 2nd one; and so on. Finally, if
Complete is specified, the factor will be ignored. If `dim` is set to
0, it means we partition all dimensions.
Parameters
----------
target : Tensor
The tensor to be partitioned
partition_type : {Complete, Block, Cyclic}, optional
The partition type
dim : int, optional
The dimension to be partitioned
factor : int, optional
The partition factor
"""
if partition_type > 2:
raise APIError("Invalid partition type")
if dim < 0:
raise APIError("Invalid dimension")
if factor < 0:
raise APIError("Invalid factor")
try:
target = target.tensor
except (AttributeError, ValueError):
try:
target = target._op
except AttributeError:
pass
return self.sch.partition(target, partition_type, dim, factor)
def reshape(self, target, shape):
"""Reshape a Tensor to a specified new shape
Parameters
----------
target : Tensor
The tensor to be reshaped
shape : tuple of int
The new shape of the tensor
"""
try:
target = target.tensor
except (AttributeError, ValueError):
try:
target = target._op
except AttributeError:
pass
_api_internal._ScheduleReshape(self.sch, target, shape)
class Stage(object):
"""Create a stage in the algorithm.
Stage is needed when an imperative DSL block is not used within any other
compute APIs. We can further use the created stage to help us schedule
the imperative components within it. It can also be used to describe a
higher level of computation hierarchy. For example, we can wrap several
compute APIs into a single stage.
Parameters
----------
name : str, optional
The name of the Stage
Attributes
----------
stmt_stack : list of list of Stmt
Store all statements. There are two levels. The outer level is
for different scopes of statement. The inner level is for
different statements
var_dict : dict(str, _Var)
A dictionary whose key is the name of the variable
and the value is the variable itself. This enables users to
access a variable inside a Stage via a Python attribute
axis_list : list of IterVar
A list of axes appeared in this Stage
has_break : bool
Set to `True` if there is a `break` statement within the stage
has_return : bool
Set to `True` if there is a `return` statement within the stage
ret_dtype : Type
The returned data type. Only exists for `heterocl.compute`
for_level : int
The level of a loop nest where the current statement is.
for_id : int
An index used to label the unnamed axes
input_stages : set of Stage
A set of stages that are the input to the Stage
lhs_tensors : set of Tensor
The tensors that are updated at the left-hand side
last_substages : set of Stage
A set of sub-stages that are last used in the current stage
name_with_prefix : str
The full name of the stage. This is used when two stages at different
levels share the same name
Examples
--------
.. code-block:: python
A = hcl.placeholder((10,))
with hcl.Stage():
A[0] = 5
with hcl.for_(1, 10) as i:
A[i] = A[i-1] * 2
"""
_current = []
"""Store all living `Stage`. The newest is at the end."""
def __init__(self, name=None, dtype=None, shape=()):
# Attributes related to a single stage
self.name = util.get_name("stage", name)
self.stmt_stack = [[]]
self.var_dict = {}
self.axis_list = []
self.has_break = False
self.has_return = False
self.ret_dtype = None
self.for_level = 0
self.for_ID = 0
self.substages = []
# Attributes for cross-stage relation
self.input_stages = set([])
self.lhs_tensors = set([])
self.last_substages = set([])
self.name_with_prefix = self.name if Stage.get_len() == 0 \
else Stage.get_current().name_with_prefix + "." + self.name
# Attribute for constant tensor
self.init_values = None
self.is_const = False
# Private attributes for building a stage
self._op = None
self._hcl_dtype = util.get_dtype(dtype, self.name_with_prefix)
self._dtype = util.get_tvm_dtype(dtype, self.name_with_prefix)
self._buf = tvm_api.decl_buffer(shape, self._dtype, self.name)
self._shape = self._buf.shape
def __enter__(self):
Stage._current.append(self)
return self
def __exit__(self, ptype, value, trace):
# update input_stages: the union of the last substages and original input stages
# collected in the stage
self.input_stages = self.last_substages.union(self.input_stages)
# create the output operation
input_ops = [i._op for i in self.input_stages]
input_bufs = [i._buf for i in self.input_stages]
output_bufs = [self._buf]
body = self.pop_stmt()
Stage._current.pop()
if self.init_values is not None:
op = _ExternOp(self.name, "", self.axis_list, input_ops,
input_bufs, output_bufs, body,
self.init_values, self.is_const)
else:
op = _ExternOp(self.name, "", self.axis_list, input_ops,
input_bufs, output_bufs, body)
self._op = op.output(0)
# update last_update stages
# if this stage is a substage of other stages
if Stage._current:
superstage = Stage._current[-1]
# add attribute statement for later stage insertion
superstage.emit(
lambda x: _make.AttrStmt(self._buf, "attach_scope",
_make.StringImm(superstage.name), x))
# update the input stages of the superstage:
# input_stages = original input stages + current input stages - last substages
superstage.input_stages = superstage.input_stages.union(self.input_stages)
superstage.input_stages.difference_update(superstage.last_substages)
# update the last substages of the superstage:
# last_substages = original substages + current stage - inputs of current stage
superstage.last_substages.add(self)
superstage.last_substages.difference_update(self.input_stages)
# update lhs_tensors:
# lhs_tensors = original tensors + lhs tensors of current stage
superstage.lhs_tensors.update(self.lhs_tensors)
# update var_dict
superstage.var_dict[self.name] = self
# update prefix
self.name_with_prefix = superstage.name_with_prefix + "." + self.name
# update superstage's substages
superstage.substages.append(self)
# Otherwise update the list of stages globally
else:
Schedule.stage_ops.append(self)
Schedule.last_stages.add(self)
Schedule.last_stages -= self.input_stages
def __repr__(self):
return self.name
def __getattr__(self, name):
try:
if name in self.var_dict:
return self.var_dict[name]
else:
# return stage and target tensor op
for tensor in self.lhs_tensors:
if tensor.name == name:
return (self, tensor._tensor)
# check tensors in input stages
for stage in self.input_stages:
if stage.name == name:
return (self, stage._op)
# check tensors in input_stage.lhs
for stage in self.input_stages:
lhs = stage.lhs_tensors
for tensor in lhs:
if tensor.name == name:
return (self, tensor._tensor)
raise ValueError("Member " + name + \
" not found in " + str(self.lhs_tensors) + " or " + \
str(self.input_stages))
except KeyError:
raise ValueError("Uknown member " + name + " of " + self.name)
def emit(self, stmt):
"""Insert statements to the current stage."""
if self.has_break:
raise DSLError("Cannot write statements after break")
self.stmt_stack[-1].append(stmt)
def replace_else(self, if_stmt, else_stmt):
"""Add an ELSE or ELIF branch to an existing IF or ELIF branch."""
assert isinstance(if_stmt, _stmt.IfThenElse), "Wrong if statement"
if isinstance(if_stmt.else_case, _stmt.IfThenElse):
return _make.IfThenElse(if_stmt.condition, if_stmt.then_case,
self.replace_else(if_stmt.else_case, else_stmt))
return _make.IfThenElse(if_stmt.condition, if_stmt.then_case, else_stmt)
def pop_stmt(self):
"""Create a statement from the statements within current stage."""
stmts = self.stmt_stack.pop()
if not stmts or callable(stmts[-1]):
stmts.append(_make.Evaluate(0))
stmt = stmts[-1]
for s in reversed(stmts[:-1]):
if callable(s):
stmt = s(stmt)
else:
assert isinstance(s, _stmt.Stmt)
stmt = _make.Block(s, stmt)
return stmt
@staticmethod
def get_current():
"""Get the current stage."""
return Stage._current[-1]
@staticmethod
def get_len():
"""Get the level of stages."""
return len(Stage._current)
@property
def axis(self):
"""Get the axes of the stage."""
return self._op.op.axis
|
<filename>syncless/reactor.py
#! /usr/local/bin/python2.4
"""A Syncless-based implementation of the Twisted main loop.
This Python module was written by reusing the source code of
libevent.reactor v0.3, available from http://launchpad.net/python-libevent
(simple BSD license).
To install the event loop (and you should do this before any connections,
listeners or connectors are added):
import syncless.reactor
syncless.reactor.install()
API Stability: stable
Maintainer of LibEventReactor: U{<NAME> <mailto:<EMAIL>>}
Copyright (c) 2007-2008 Twisted Matrix Laboratories.
"""
__author__ = '<EMAIL> (<NAME>)'
import sys
from zope.interface import implements
from twisted.internet.error import ConnectionFdescWentAway
from twisted.internet.posixbase import PosixReactorBase
from twisted.internet.main import installReactor
from twisted.python import log
from twisted.internet.interfaces import IReactorFDSet
from twisted.python.runtime import platform
from twisted.python.runtime import platformType
# We don't want to import anything from syncless at the top-level (so the
# Syncless event wakeup tasklet won't be created). We only import syncless
# from within the install() function.
class SynclessReactor(PosixReactorBase):
"""
A reactor that uses Syncless (which uses libevent).
@ivar _selectables: A dictionary mapping integer file descriptors to
instances of L{FileDescriptor} which have been registered with the
reactor. All L{FileDescriptors} which are currently receiving read or
write readiness notifications will be present as values in this
dictionary.
@ivar _reads: A dictionary mapping integer file descriptors to libevent
event objects. Keys in this dictionary will be registered with
libevent for read readiness notifications which will
be dispatched to the corresponding L{FileDescriptor} instances in
C{_selectables}.
@ivar _writes: A dictionary mapping integer file descriptors to libevent
event objects. Keys in this dictionary will be registered with
libevent for write readiness notifications which will
be dispatched to the corresponding L{FileDescriptor} instances in
C{_selectables}.
"""
implements(IReactorFDSet)
def __init__(self):
"""Initialize reactor and local fd storage."""
# These inits really ought to be before
# L{PosixReactorBase.__init__} call, because it adds the
# waker in the process
self._reads = {}
self._writes = {}
self._selectables = {}
self._signal_handlers = []
from syncless import coio
self._wakeup_info = coio.wakeup_info()
self._pending_events = self._wakeup_info.pending_events
PosixReactorBase.__init__(self)
def _add(self, xer, mode, mdict):
"""Create the event for reader/writer."""
fd = xer.fileno()
if fd not in mdict:
mdict[fd] = self._wakeup_info.create_event(fd, mode)
self._selectables[fd] = xer
def addReader(self, reader):
"""Add a FileDescriptor for notification of data available to read."""
self._add(reader, 1, self._reads)
def addWriter(self, writer):
"""Add a FileDescriptor for notification of data available to write."""
self._add(writer, 2, self._writes)
def _remove(self, selectable, mdict, other):
"""Remove an event if found."""
fd = selectable.fileno()
if fd == -1:
for fd, fdes in self._selectables.items():
if selectable is fdes:
break
else:
return
if fd in mdict:
# Call event_del() on the event object.
mdict.pop(fd).delete()
if fd not in other:
del self._selectables[fd]
def removeReader(self, reader):
"""Remove a selectable for notification of data available to read."""
return self._remove(reader, self._reads, self._writes)
def removeWriter(self, writer):
"""Remove a selectable for notification of data available to write."""
return self._remove(writer, self._writes, self._reads)
def removeAll(self):
"""Remove all selectables, and return a list of them."""
if self.waker is not None:
self.removeReader(self.waker)
result = self._selectables.values()
events = self._reads.copy()
events.update(self._writes)
self._reads.clear()
self._writes.clear()
self._selectables.clear()
for event in events.values():
event.delete()
if self.waker is not None:
self.addReader(self.waker)
return result
def getReaders(self):
return [self._selectables[fd] for fd in self._reads]
def getWriters(self):
return [self._selectables[fd] for fd in self._writes]
def _handleSigchld(self, signum, frame, _threadSupport=platform.supportsThreads()):
from twisted.internet.process import reapAllProcesses
if _threadSupport:
self.callFromThread(reapAllProcesses)
else:
self.callLater(0, reapAllProcesses)
def _handleSignals(self):
import signal
from syncless import coio
self._signal_handlers.append(coio.signal(signal.SIGINT, self.sigInt))
self._signal_handlers.append(coio.signal(signal.SIGTERM, self.sigTerm))
# Catch Ctrl-Break in windows
if hasattr(signal, 'SIGBREAK'):
self._signal_handlers.append(
coio.signal(signal.SIGBREAK, self.sigBreak))
if platformType == 'posix':
# Install a dummy SIGCHLD handler, to shut up warning. We could
# install the normal handler, but it would lead to unnecessary reap
# calls
signal.signal(signal.SIGCHLD, lambda *args: None)
self._signal_handlers.append(
coio.signal(signal.SIGCHLD, self._handleSigchld))
def _doReadOrWrite(self, fd, mode, selectable):
"""
C{fd} is available for read or write, make the work and raise errors
if necessary.
"""
why = None
inRead = False
try:
if mode & 1:
why = selectable.doRead()
inRead = True
if not why and mode & 2:
why = selectable.doWrite()
inRead = False
if selectable.fileno() != fd:
why = ConnectionFdescWentAway('Filedescriptor went away')
inRead = False
except:
log.err()
why = sys.exc_info()[1]
if why:
self._disconnectSelectable(selectable, why, inRead)
def _runPendingEvents(self, pending_events):
# pending_events is a list of (fd, mode) pairs.
while pending_events:
fd, mode = pending_events.pop()
if fd in self._selectables:
selectable = self._selectables[fd]
log.callWithLogger(selectable,
self._doReadOrWrite, fd, mode, selectable)
def doIteration(self, timeout):
"""Call one iteration of the Syncless loop."""
self._runPendingEvents(self._wakeup_info.tick(timeout))
def crash(self):
PosixReactorBase.crash(self)
for handler in self._signal_handlers:
handler.delete()
del self._signal_handlers[:]
def install():
"""Install the Syncless reactor."""
# As a side effect, this calls `from syncless import coio', which
# creates and starts the coio.main_loop_tasklet, which calls
# event_loop() indefinitely.
installReactor(SynclessReactor())
__all__ = ["SynclessReactor", "install"]
|
<filename>tests/integration/test_psycopg2_connector.py
import functools
import json
import psycopg2.errors
import pytest
from procrastinate import psycopg2_connector
@pytest.fixture
def psycopg2_connector_factory(connection_params):
connectors = []
def _(**kwargs):
json_dumps = kwargs.pop("json_dumps", None)
json_loads = kwargs.pop("json_loads", None)
connection_params.update(kwargs)
connector = psycopg2_connector.Psycopg2Connector(
json_dumps=json_dumps, json_loads=json_loads, **connection_params
)
connectors.append(connector)
connector.open()
return connector
yield _
for connector in connectors:
connector.close()
def test_connection(psycopg2_connector_factory, connection_params):
connector = psycopg2_connector_factory()
with connector._connection() as connection:
assert connection.dsn == "dbname=" + connection_params["dbname"]
@pytest.mark.parametrize("exception", [Exception, psycopg2.errors.AdminShutdown])
def test_connection_exception(psycopg2_connector_factory, connection_params, exception):
connector = psycopg2_connector_factory()
with pytest.raises(exception):
with connector._connection():
raise exception
@pytest.mark.parametrize(
"method_name, expected",
[
("execute_query_one", {"json": {"a": "a", "b": "foo"}}),
("execute_query_all", [{"json": {"a": "a", "b": "foo"}}]),
],
)
def test_execute_query_json_dumps(
psycopg2_connector_factory, mocker, method_name, expected
):
class NotJSONSerializableByDefault:
pass
def encode(obj):
if isinstance(obj, NotJSONSerializableByDefault):
return "foo"
raise TypeError()
query = "SELECT %(arg)s::jsonb as json"
arg = {"a": "a", "b": NotJSONSerializableByDefault()}
json_dumps = functools.partial(json.dumps, default=encode)
connector = psycopg2_connector_factory(json_dumps=json_dumps)
method = getattr(connector, method_name)
result = method(query, arg=arg)
assert result == expected
def test_json_loads(psycopg2_connector_factory, mocker):
# sync json_loads is only used for CLI defer.
loads = mocker.Mock()
connector = psycopg2_connector_factory(json_loads=loads)
assert connector.json_loads is loads
def test_execute_query(psycopg2_connector):
psycopg2_connector.execute_query("COMMENT ON TABLE \"procrastinate_jobs\" IS 'foo'")
result = psycopg2_connector.execute_query_one(
"SELECT obj_description('public.procrastinate_jobs'::regclass)"
)
assert result == {"obj_description": "foo"}
result = psycopg2_connector.execute_query_all(
"SELECT obj_description('public.procrastinate_jobs'::regclass)"
)
assert result == [{"obj_description": "foo"}]
def test_execute_query_percent(psycopg2_connector):
psycopg2_connector.execute_query("SELECT '%'")
result = psycopg2_connector.execute_query_one("SELECT '%'")
assert result == {"?column?": "%"}
result = psycopg2_connector.execute_query_all("SELECT '%'")
assert result == [{"?column?": "%"}]
def test_execute_query_arg(psycopg2_connector):
psycopg2_connector.execute_query("SELECT %(arg)s", arg=1)
result = psycopg2_connector.execute_query_one("SELECT %(arg)s", arg=1)
assert result == {"?column?": 1}
result = psycopg2_connector.execute_query_all("SELECT %(arg)s", arg=1)
assert result == [{"?column?": 1}]
def test_close(psycopg2_connector):
pool = psycopg2_connector._pool
psycopg2_connector.close()
assert pool.closed is True
|
from typing import Optional
from botocore.client import BaseClient
from typing import Dict
from botocore.paginate import Paginator
from botocore.waiter import Waiter
from typing import Union
from typing import List
class Client(BaseClient):
def add_attachments_to_set(self, attachments: List, attachmentSetId: str = None) -> Dict:
"""
Adds one or more attachments to an attachment set. If an ``attachmentSetId`` is not specified, a new attachment set is created, and the ID of the set is returned in the response. If an ``attachmentSetId`` is specified, the attachments are added to the specified set, if it exists.
An attachment set is a temporary container for attachments that are to be added to a case or case communication. The set is available for one hour after it is created; the ``expiryTime`` returned in the response indicates when the set expires. The maximum number of attachments in a set is 3, and the maximum size of any attachment in the set is 5 MB.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/support-2013-04-15/AddAttachmentsToSet>`_
**Request Syntax**
::
response = client.add_attachments_to_set(
attachmentSetId='string',
attachments=[
{
'fileName': 'string',
'data': b'bytes'
},
]
)
**Response Syntax**
::
{
'attachmentSetId': 'string',
'expiryTime': 'string'
}
**Response Structure**
- *(dict) --*
The ID and expiry time of the attachment set returned by the AddAttachmentsToSet operation.
- **attachmentSetId** *(string) --*
The ID of the attachment set. If an ``attachmentSetId`` was not specified, a new attachment set is created, and the ID of the set is returned in the response. If an ``attachmentSetId`` was specified, the attachments are added to the specified set, if it exists.
- **expiryTime** *(string) --*
The time and date when the attachment set expires.
:type attachmentSetId: string
:param attachmentSetId:
The ID of the attachment set. If an ``attachmentSetId`` is not specified, a new attachment set is created, and the ID of the set is returned in the response. If an ``attachmentSetId`` is specified, the attachments are added to the specified set, if it exists.
:type attachments: list
:param attachments: **[REQUIRED]**
One or more attachments to add to the set. The limit is 3 attachments per set, and the size limit is 5 MB per attachment.
- *(dict) --*
An attachment to a case communication. The attachment consists of the file name and the content of the file.
- **fileName** *(string) --*
The name of the attachment file.
- **data** *(bytes) --*
The content of the attachment file.
:rtype: dict
:returns:
"""
pass
def add_communication_to_case(self, communicationBody: str, caseId: str = None, ccEmailAddresses: List = None, attachmentSetId: str = None) -> Dict:
"""
Adds additional customer communication to an AWS Support case. You use the ``caseId`` value to identify the case to add communication to. You can list a set of email addresses to copy on the communication using the ``ccEmailAddresses`` value. The ``communicationBody`` value contains the text of the communication.
The response indicates the success or failure of the request.
This operation implements a subset of the features of the AWS Support Center.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/support-2013-04-15/AddCommunicationToCase>`_
**Request Syntax**
::
response = client.add_communication_to_case(
caseId='string',
communicationBody='string',
ccEmailAddresses=[
'string',
],
attachmentSetId='string'
)
**Response Syntax**
::
{
'result': True|False
}
**Response Structure**
- *(dict) --*
The result of the AddCommunicationToCase operation.
- **result** *(boolean) --*
True if AddCommunicationToCase succeeds. Otherwise, returns an error.
:type caseId: string
:param caseId:
The AWS Support case ID requested or returned in the call. The case ID is an alphanumeric string formatted as shown in this example: case-*12345678910-2013-c4c1d2bf33c5cf47*
:type communicationBody: string
:param communicationBody: **[REQUIRED]**
The body of an email communication to add to the support case.
:type ccEmailAddresses: list
:param ccEmailAddresses:
The email addresses in the CC line of an email to be added to the support case.
- *(string) --*
:type attachmentSetId: string
:param attachmentSetId:
The ID of a set of one or more attachments for the communication to add to the case. Create the set by calling AddAttachmentsToSet
:rtype: dict
:returns:
"""
pass
def can_paginate(self, operation_name: str = None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you\'d normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator(\"create_foo\")``.
:return: ``True`` if the operation can be paginated,
``False`` otherwise.
"""
pass
def create_case(self, subject: str, communicationBody: str, serviceCode: str = None, severityCode: str = None, categoryCode: str = None, ccEmailAddresses: List = None, language: str = None, issueType: str = None, attachmentSetId: str = None) -> Dict:
"""
Creates a new case in the AWS Support Center. This operation is modeled on the behavior of the AWS Support Center `Create Case <https://console.aws.amazon.com/support/home#/case/create>`__ page. Its parameters require you to specify the following information:
* **issueType.** The type of issue for the case. You can specify either "customer-service" or "technical." If you do not indicate a value, the default is "technical."
* **serviceCode.** The code for an AWS service. You obtain the ``serviceCode`` by calling DescribeServices .
* **categoryCode.** The category for the service defined for the ``serviceCode`` value. You also obtain the category code for a service by calling DescribeServices . Each AWS service defines its own set of category codes.
* **severityCode.** A value that indicates the urgency of the case, which in turn determines the response time according to your service level agreement with AWS Support. You obtain the SeverityCode by calling DescribeSeverityLevels .
* **subject.** The **Subject** field on the AWS Support Center `Create Case <https://console.aws.amazon.com/support/home#/case/create>`__ page.
* **communicationBody.** The **Description** field on the AWS Support Center `Create Case <https://console.aws.amazon.com/support/home#/case/create>`__ page.
* **attachmentSetId.** The ID of a set of attachments that has been created by using AddAttachmentsToSet .
* **language.** The human language in which AWS Support handles the case. English and Japanese are currently supported.
* **ccEmailAddresses.** The AWS Support Center **CC** field on the `Create Case <https://console.aws.amazon.com/support/home#/case/create>`__ page. You can list email addresses to be copied on any correspondence about the case. The account that opens the case is already identified by passing the AWS Credentials in the HTTP POST method or in a method or function call from one of the programming languages supported by an `AWS SDK <http://aws.amazon.com/tools/>`__ .
.. note::
To add additional communication or attachments to an existing case, use AddCommunicationToCase .
A successful CreateCase request returns an AWS Support case number. Case numbers are used by the DescribeCases operation to retrieve existing AWS Support cases.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/support-2013-04-15/CreateCase>`_
**Request Syntax**
::
response = client.create_case(
subject='string',
serviceCode='string',
severityCode='string',
categoryCode='string',
communicationBody='string',
ccEmailAddresses=[
'string',
],
language='string',
issueType='string',
attachmentSetId='string'
)
**Response Syntax**
::
{
'caseId': 'string'
}
**Response Structure**
- *(dict) --*
The AWS Support case ID returned by a successful completion of the CreateCase operation.
- **caseId** *(string) --*
The AWS Support case ID requested or returned in the call. The case ID is an alphanumeric string formatted as shown in this example: case-*12345678910-2013-c4c1d2bf33c5cf47*
:type subject: string
:param subject: **[REQUIRED]**
The title of the AWS Support case.
:type serviceCode: string
:param serviceCode:
The code for the AWS service returned by the call to DescribeServices .
:type severityCode: string
:param severityCode:
The code for the severity level returned by the call to DescribeSeverityLevels .
.. note::
The availability of severity levels depends on each customer\'s support subscription. In other words, your subscription may not necessarily require the urgent level of response time.
:type categoryCode: string
:param categoryCode:
The category of problem for the AWS Support case.
:type communicationBody: string
:param communicationBody: **[REQUIRED]**
The communication body text when you create an AWS Support case by calling CreateCase .
:type ccEmailAddresses: list
:param ccEmailAddresses:
A list of email addresses that AWS Support copies on case correspondence.
- *(string) --*
:type language: string
:param language:
The ISO 639-1 code for the language in which AWS provides support. AWS Support currently supports English (\"en\") and Japanese (\"ja\"). Language parameters must be passed explicitly for operations that take them.
:type issueType: string
:param issueType:
The type of issue for the case. You can specify either \"customer-service\" or \"technical.\" If you do not indicate a value, the default is \"technical.\"
:type attachmentSetId: string
:param attachmentSetId:
The ID of a set of one or more attachments for the case. Create the set by using AddAttachmentsToSet .
:rtype: dict
:returns:
"""
pass
def describe_attachment(self, attachmentId: str) -> Dict:
"""
Returns the attachment that has the specified ID. Attachment IDs are generated by the case management system when you add an attachment to a case or case communication. Attachment IDs are returned in the AttachmentDetails objects that are returned by the DescribeCommunications operation.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/support-2013-04-15/DescribeAttachment>`_
**Request Syntax**
::
response = client.describe_attachment(
attachmentId='string'
)
**Response Syntax**
::
{
'attachment': {
'fileName': 'string',
'data': b'bytes'
}
}
**Response Structure**
- *(dict) --*
The content and file name of the attachment returned by the DescribeAttachment operation.
- **attachment** *(dict) --*
The attachment content and file name.
- **fileName** *(string) --*
The name of the attachment file.
- **data** *(bytes) --*
The content of the attachment file.
:type attachmentId: string
:param attachmentId: **[REQUIRED]**
The ID of the attachment to return. Attachment IDs are returned by the DescribeCommunications operation.
:rtype: dict
:returns:
"""
pass
def describe_cases(self, caseIdList: List = None, displayId: str = None, afterTime: str = None, beforeTime: str = None, includeResolvedCases: bool = None, nextToken: str = None, maxResults: int = None, language: str = None, includeCommunications: bool = None) -> Dict:
"""
Returns a list of cases that you specify by passing one or more case IDs. In addition, you can filter the cases by date by setting values for the ``afterTime`` and ``beforeTime`` request parameters. You can set values for the ``includeResolvedCases`` and ``includeCommunications`` request parameters to control how much information is returned.
Case data is available for 12 months after creation. If a case was created more than 12 months ago, a request for data might cause an error.
The response returns the following in JSON format:
* One or more CaseDetails data types.
* One or more ``nextToken`` values, which specify where to paginate the returned records represented by the ``CaseDetails`` objects.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/support-2013-04-15/DescribeCases>`_
**Request Syntax**
::
response = client.describe_cases(
caseIdList=[
'string',
],
displayId='string',
afterTime='string',
beforeTime='string',
includeResolvedCases=True|False,
nextToken='string',
maxResults=123,
language='string',
includeCommunications=True|False
)
**Response Syntax**
::
{
'cases': [
{
'caseId': 'string',
'displayId': 'string',
'subject': 'string',
'status': 'string',
'serviceCode': 'string',
'categoryCode': 'string',
'severityCode': 'string',
'submittedBy': 'string',
'timeCreated': 'string',
'recentCommunications': {
'communications': [
{
'caseId': 'string',
'body': 'string',
'submittedBy': 'string',
'timeCreated': 'string',
'attachmentSet': [
{
'attachmentId': 'string',
'fileName': 'string'
},
]
},
],
'nextToken': 'string'
},
'ccEmailAddresses': [
'string',
],
'language': 'string'
},
],
'nextToken': 'string'
}
**Response Structure**
- *(dict) --*
Returns an array of CaseDetails objects and a ``nextToken`` that defines a point for pagination in the result set.
- **cases** *(list) --*
The details for the cases that match the request.
- *(dict) --*
A JSON-formatted object that contains the metadata for a support case. It is contained the response from a DescribeCases request. **CaseDetails** contains the following fields:
* **caseId.** The AWS Support case ID requested or returned in the call. The case ID is an alphanumeric string formatted as shown in this example: case-*12345678910-2013-c4c1d2bf33c5cf47* .
* **categoryCode.** The category of problem for the AWS Support case. Corresponds to the CategoryCode values returned by a call to DescribeServices .
* **displayId.** The identifier for the case on pages in the AWS Support Center.
* **language.** The ISO 639-1 code for the language in which AWS provides support. AWS Support currently supports English ("en") and Japanese ("ja"). Language parameters must be passed explicitly for operations that take them.
* **recentCommunications.** One or more Communication objects. Fields of these objects are ``attachments`` , ``body`` , ``caseId`` , ``submittedBy`` , and ``timeCreated`` .
* **nextToken.** A resumption point for pagination.
* **serviceCode.** The identifier for the AWS service that corresponds to the service code defined in the call to DescribeServices .
* **severityCode.** The severity code assigned to the case. Contains one of the values returned by the call to DescribeSeverityLevels .
* **status.** The status of the case in the AWS Support Center.
* **subject.** The subject line of the case.
* **submittedBy.** The email address of the account that submitted the case.
* **timeCreated.** The time the case was created, in ISO-8601 format.
- **caseId** *(string) --*
The AWS Support case ID requested or returned in the call. The case ID is an alphanumeric string formatted as shown in this example: case-*12345678910-2013-c4c1d2bf33c5cf47*
- **displayId** *(string) --*
The ID displayed for the case in the AWS Support Center. This is a numeric string.
- **subject** *(string) --*
The subject line for the case in the AWS Support Center.
- **status** *(string) --*
The status of the case.
- **serviceCode** *(string) --*
The code for the AWS service returned by the call to DescribeServices .
- **categoryCode** *(string) --*
The category of problem for the AWS Support case.
- **severityCode** *(string) --*
The code for the severity level returned by the call to DescribeSeverityLevels .
- **submittedBy** *(string) --*
The email address of the account that submitted the case.
- **timeCreated** *(string) --*
The time that the case was case created in the AWS Support Center.
- **recentCommunications** *(dict) --*
The five most recent communications between you and AWS Support Center, including the IDs of any attachments to the communications. Also includes a ``nextToken`` that you can use to retrieve earlier communications.
- **communications** *(list) --*
The five most recent communications associated with the case.
- *(dict) --*
A communication associated with an AWS Support case. The communication consists of the case ID, the message body, attachment information, the account email address, and the date and time of the communication.
- **caseId** *(string) --*
The AWS Support case ID requested or returned in the call. The case ID is an alphanumeric string formatted as shown in this example: case-*12345678910-2013-c4c1d2bf33c5cf47*
- **body** *(string) --*
The text of the communication between the customer and AWS Support.
- **submittedBy** *(string) --*
The email address of the account that submitted the AWS Support case.
- **timeCreated** *(string) --*
The time the communication was created.
- **attachmentSet** *(list) --*
Information about the attachments to the case communication.
- *(dict) --*
The file name and ID of an attachment to a case communication. You can use the ID to retrieve the attachment with the DescribeAttachment operation.
- **attachmentId** *(string) --*
The ID of the attachment.
- **fileName** *(string) --*
The file name of the attachment.
- **nextToken** *(string) --*
A resumption point for pagination.
- **ccEmailAddresses** *(list) --*
The email addresses that receive copies of communication about the case.
- *(string) --*
- **language** *(string) --*
The ISO 639-1 code for the language in which AWS provides support. AWS Support currently supports English ("en") and Japanese ("ja"). Language parameters must be passed explicitly for operations that take them.
- **nextToken** *(string) --*
A resumption point for pagination.
:type caseIdList: list
:param caseIdList:
A list of ID numbers of the support cases you want returned. The maximum number of cases is 100.
- *(string) --*
:type displayId: string
:param displayId:
The ID displayed for a case in the AWS Support Center user interface.
:type afterTime: string
:param afterTime:
The start date for a filtered date search on support case communications. Case communications are available for 12 months after creation.
:type beforeTime: string
:param beforeTime:
The end date for a filtered date search on support case communications. Case communications are available for 12 months after creation.
:type includeResolvedCases: boolean
:param includeResolvedCases:
Specifies whether resolved support cases should be included in the DescribeCases results. The default is *false* .
:type nextToken: string
:param nextToken:
A resumption point for pagination.
:type maxResults: integer
:param maxResults:
The maximum number of results to return before paginating.
:type language: string
:param language:
The ISO 639-1 code for the language in which AWS provides support. AWS Support currently supports English (\"en\") and Japanese (\"ja\"). Language parameters must be passed explicitly for operations that take them.
:type includeCommunications: boolean
:param includeCommunications:
Specifies whether communications should be included in the DescribeCases results. The default is *true* .
:rtype: dict
:returns:
"""
pass
def describe_communications(self, caseId: str, beforeTime: str = None, afterTime: str = None, nextToken: str = None, maxResults: int = None) -> Dict:
"""
Returns communications (and attachments) for one or more support cases. You can use the ``afterTime`` and ``beforeTime`` parameters to filter by date. You can use the ``caseId`` parameter to restrict the results to a particular case.
Case data is available for 12 months after creation. If a case was created more than 12 months ago, a request for data might cause an error.
You can use the ``maxResults`` and ``nextToken`` parameters to control the pagination of the result set. Set ``maxResults`` to the number of cases you want displayed on each page, and use ``nextToken`` to specify the resumption of pagination.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/support-2013-04-15/DescribeCommunications>`_
**Request Syntax**
::
response = client.describe_communications(
caseId='string',
beforeTime='string',
afterTime='string',
nextToken='string',
maxResults=123
)
**Response Syntax**
::
{
'communications': [
{
'caseId': 'string',
'body': 'string',
'submittedBy': 'string',
'timeCreated': 'string',
'attachmentSet': [
{
'attachmentId': 'string',
'fileName': 'string'
},
]
},
],
'nextToken': 'string'
}
**Response Structure**
- *(dict) --*
The communications returned by the DescribeCommunications operation.
- **communications** *(list) --*
The communications for the case.
- *(dict) --*
A communication associated with an AWS Support case. The communication consists of the case ID, the message body, attachment information, the account email address, and the date and time of the communication.
- **caseId** *(string) --*
The AWS Support case ID requested or returned in the call. The case ID is an alphanumeric string formatted as shown in this example: case-*12345678910-2013-c4c1d2bf33c5cf47*
- **body** *(string) --*
The text of the communication between the customer and AWS Support.
- **submittedBy** *(string) --*
The email address of the account that submitted the AWS Support case.
- **timeCreated** *(string) --*
The time the communication was created.
- **attachmentSet** *(list) --*
Information about the attachments to the case communication.
- *(dict) --*
The file name and ID of an attachment to a case communication. You can use the ID to retrieve the attachment with the DescribeAttachment operation.
- **attachmentId** *(string) --*
The ID of the attachment.
- **fileName** *(string) --*
The file name of the attachment.
- **nextToken** *(string) --*
A resumption point for pagination.
:type caseId: string
:param caseId: **[REQUIRED]**
The AWS Support case ID requested or returned in the call. The case ID is an alphanumeric string formatted as shown in this example: case-*12345678910-2013-c4c1d2bf33c5cf47*
:type beforeTime: string
:param beforeTime:
The end date for a filtered date search on support case communications. Case communications are available for 12 months after creation.
:type afterTime: string
:param afterTime:
The start date for a filtered date search on support case communications. Case communications are available for 12 months after creation.
:type nextToken: string
:param nextToken:
A resumption point for pagination.
:type maxResults: integer
:param maxResults:
The maximum number of results to return before paginating.
:rtype: dict
:returns:
"""
pass
def describe_services(self, serviceCodeList: List = None, language: str = None) -> Dict:
"""
Returns the current list of AWS services and a list of service categories that applies to each one. You then use service names and categories in your CreateCase requests. Each AWS service has its own set of categories.
The service codes and category codes correspond to the values that are displayed in the **Service** and **Category** drop-down lists on the AWS Support Center `Create Case <https://console.aws.amazon.com/support/home#/case/create>`__ page. The values in those fields, however, do not necessarily match the service codes and categories returned by the ``DescribeServices`` request. Always use the service codes and categories obtained programmatically. This practice ensures that you always have the most recent set of service and category codes.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/support-2013-04-15/DescribeServices>`_
**Request Syntax**
::
response = client.describe_services(
serviceCodeList=[
'string',
],
language='string'
)
**Response Syntax**
::
{
'services': [
{
'code': 'string',
'name': 'string',
'categories': [
{
'code': 'string',
'name': 'string'
},
]
},
]
}
**Response Structure**
- *(dict) --*
The list of AWS services returned by the DescribeServices operation.
- **services** *(list) --*
A JSON-formatted list of AWS services.
- *(dict) --*
Information about an AWS service returned by the DescribeServices operation.
- **code** *(string) --*
The code for an AWS service returned by the DescribeServices response. The ``name`` element contains the corresponding friendly name.
- **name** *(string) --*
The friendly name for an AWS service. The ``code`` element contains the corresponding code.
- **categories** *(list) --*
A list of categories that describe the type of support issue a case describes. Categories consist of a category name and a category code. Category names and codes are passed to AWS Support when you call CreateCase .
- *(dict) --*
A JSON-formatted name/value pair that represents the category name and category code of the problem, selected from the DescribeServices response for each AWS service.
- **code** *(string) --*
The category code for the support case.
- **name** *(string) --*
The category name for the support case.
:type serviceCodeList: list
:param serviceCodeList:
A JSON-formatted list of service codes available for AWS services.
- *(string) --*
:type language: string
:param language:
The ISO 639-1 code for the language in which AWS provides support. AWS Support currently supports English (\"en\") and Japanese (\"ja\"). Language parameters must be passed explicitly for operations that take them.
:rtype: dict
:returns:
"""
pass
def describe_severity_levels(self, language: str = None) -> Dict:
"""
Returns the list of severity levels that you can assign to an AWS Support case. The severity level for a case is also a field in the CaseDetails data type included in any CreateCase request.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/support-2013-04-15/DescribeSeverityLevels>`_
**Request Syntax**
::
response = client.describe_severity_levels(
language='string'
)
**Response Syntax**
::
{
'severityLevels': [
{
'code': 'string',
'name': 'string'
},
]
}
**Response Structure**
- *(dict) --*
The list of severity levels returned by the DescribeSeverityLevels operation.
- **severityLevels** *(list) --*
The available severity levels for the support case. Available severity levels are defined by your service level agreement with AWS.
- *(dict) --*
A code and name pair that represent a severity level that can be applied to a support case.
- **code** *(string) --*
One of four values: "low," "medium," "high," and "urgent". These values correspond to response times returned to the caller in ``severityLevel.name`` .
- **name** *(string) --*
The name of the severity level that corresponds to the severity level code.
:type language: string
:param language:
The ISO 639-1 code for the language in which AWS provides support. AWS Support currently supports English (\"en\") and Japanese (\"ja\"). Language parameters must be passed explicitly for operations that take them.
:rtype: dict
:returns:
"""
pass
def describe_trusted_advisor_check_refresh_statuses(self, checkIds: List) -> Dict:
"""
Returns the refresh status of the Trusted Advisor checks that have the specified check IDs. Check IDs can be obtained by calling DescribeTrustedAdvisorChecks .
.. note::
Some checks are refreshed automatically, and their refresh statuses cannot be retrieved by using this operation. Use of the ``DescribeTrustedAdvisorCheckRefreshStatuses`` operation for these checks causes an ``InvalidParameterValue`` error.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/support-2013-04-15/DescribeTrustedAdvisorCheckRefreshStatuses>`_
**Request Syntax**
::
response = client.describe_trusted_advisor_check_refresh_statuses(
checkIds=[
'string',
]
)
**Response Syntax**
::
{
'statuses': [
{
'checkId': 'string',
'status': 'string',
'millisUntilNextRefreshable': 123
},
]
}
**Response Structure**
- *(dict) --*
The statuses of the Trusted Advisor checks returned by the DescribeTrustedAdvisorCheckRefreshStatuses operation.
- **statuses** *(list) --*
The refresh status of the specified Trusted Advisor checks.
- *(dict) --*
The refresh status of a Trusted Advisor check.
- **checkId** *(string) --*
The unique identifier for the Trusted Advisor check.
- **status** *(string) --*
The status of the Trusted Advisor check for which a refresh has been requested: "none", "enqueued", "processing", "success", or "abandoned".
- **millisUntilNextRefreshable** *(integer) --*
The amount of time, in milliseconds, until the Trusted Advisor check is eligible for refresh.
:type checkIds: list
:param checkIds: **[REQUIRED]**
The IDs of the Trusted Advisor checks to get the status of. **Note:** Specifying the check ID of a check that is automatically refreshed causes an ``InvalidParameterValue`` error.
- *(string) --*
:rtype: dict
:returns:
"""
pass
def describe_trusted_advisor_check_result(self, checkId: str, language: str = None) -> Dict:
"""
Returns the results of the Trusted Advisor check that has the specified check ID. Check IDs can be obtained by calling DescribeTrustedAdvisorChecks .
The response contains a TrustedAdvisorCheckResult object, which contains these three objects:
* TrustedAdvisorCategorySpecificSummary
* TrustedAdvisorResourceDetail
* TrustedAdvisorResourcesSummary
In addition, the response contains these fields:
* **status.** The alert status of the check: "ok" (green), "warning" (yellow), "error" (red), or "not_available".
* **timestamp.** The time of the last refresh of the check.
* **checkId.** The unique identifier for the check.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/support-2013-04-15/DescribeTrustedAdvisorCheckResult>`_
**Request Syntax**
::
response = client.describe_trusted_advisor_check_result(
checkId='string',
language='string'
)
**Response Syntax**
::
{
'result': {
'checkId': 'string',
'timestamp': 'string',
'status': 'string',
'resourcesSummary': {
'resourcesProcessed': 123,
'resourcesFlagged': 123,
'resourcesIgnored': 123,
'resourcesSuppressed': 123
},
'categorySpecificSummary': {
'costOptimizing': {
'estimatedMonthlySavings': 123.0,
'estimatedPercentMonthlySavings': 123.0
}
},
'flaggedResources': [
{
'status': 'string',
'region': 'string',
'resourceId': 'string',
'isSuppressed': True|False,
'metadata': [
'string',
]
},
]
}
}
**Response Structure**
- *(dict) --*
The result of the Trusted Advisor check returned by the DescribeTrustedAdvisorCheckResult operation.
- **result** *(dict) --*
The detailed results of the Trusted Advisor check.
- **checkId** *(string) --*
The unique identifier for the Trusted Advisor check.
- **timestamp** *(string) --*
The time of the last refresh of the check.
- **status** *(string) --*
The alert status of the check: "ok" (green), "warning" (yellow), "error" (red), or "not_available".
- **resourcesSummary** *(dict) --*
Details about AWS resources that were analyzed in a call to Trusted Advisor DescribeTrustedAdvisorCheckSummaries .
- **resourcesProcessed** *(integer) --*
The number of AWS resources that were analyzed by the Trusted Advisor check.
- **resourcesFlagged** *(integer) --*
The number of AWS resources that were flagged (listed) by the Trusted Advisor check.
- **resourcesIgnored** *(integer) --*
The number of AWS resources ignored by Trusted Advisor because information was unavailable.
- **resourcesSuppressed** *(integer) --*
The number of AWS resources ignored by Trusted Advisor because they were marked as suppressed by the user.
- **categorySpecificSummary** *(dict) --*
Summary information that relates to the category of the check. Cost Optimizing is the only category that is currently supported.
- **costOptimizing** *(dict) --*
The summary information about cost savings for a Trusted Advisor check that is in the Cost Optimizing category.
- **estimatedMonthlySavings** *(float) --*
The estimated monthly savings that might be realized if the recommended actions are taken.
- **estimatedPercentMonthlySavings** *(float) --*
The estimated percentage of savings that might be realized if the recommended actions are taken.
- **flaggedResources** *(list) --*
The details about each resource listed in the check result.
- *(dict) --*
Contains information about a resource identified by a Trusted Advisor check.
- **status** *(string) --*
The status code for the resource identified in the Trusted Advisor check.
- **region** *(string) --*
The AWS region in which the identified resource is located.
- **resourceId** *(string) --*
The unique identifier for the identified resource.
- **isSuppressed** *(boolean) --*
Specifies whether the AWS resource was ignored by Trusted Advisor because it was marked as suppressed by the user.
- **metadata** *(list) --*
Additional information about the identified resource. The exact metadata and its order can be obtained by inspecting the TrustedAdvisorCheckDescription object returned by the call to DescribeTrustedAdvisorChecks . **Metadata** contains all the data that is shown in the Excel download, even in those cases where the UI shows just summary data.
- *(string) --*
:type checkId: string
:param checkId: **[REQUIRED]**
The unique identifier for the Trusted Advisor check.
:type language: string
:param language:
The ISO 639-1 code for the language in which AWS provides support. AWS Support currently supports English (\"en\") and Japanese (\"ja\"). Language parameters must be passed explicitly for operations that take them.
:rtype: dict
:returns:
"""
pass
def describe_trusted_advisor_check_summaries(self, checkIds: List) -> Dict:
"""
Returns the summaries of the results of the Trusted Advisor checks that have the specified check IDs. Check IDs can be obtained by calling DescribeTrustedAdvisorChecks .
The response contains an array of TrustedAdvisorCheckSummary objects.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/support-2013-04-15/DescribeTrustedAdvisorCheckSummaries>`_
**Request Syntax**
::
response = client.describe_trusted_advisor_check_summaries(
checkIds=[
'string',
]
)
**Response Syntax**
::
{
'summaries': [
{
'checkId': 'string',
'timestamp': 'string',
'status': 'string',
'hasFlaggedResources': True|False,
'resourcesSummary': {
'resourcesProcessed': 123,
'resourcesFlagged': 123,
'resourcesIgnored': 123,
'resourcesSuppressed': 123
},
'categorySpecificSummary': {
'costOptimizing': {
'estimatedMonthlySavings': 123.0,
'estimatedPercentMonthlySavings': 123.0
}
}
},
]
}
**Response Structure**
- *(dict) --*
The summaries of the Trusted Advisor checks returned by the DescribeTrustedAdvisorCheckSummaries operation.
- **summaries** *(list) --*
The summary information for the requested Trusted Advisor checks.
- *(dict) --*
A summary of a Trusted Advisor check result, including the alert status, last refresh, and number of resources examined.
- **checkId** *(string) --*
The unique identifier for the Trusted Advisor check.
- **timestamp** *(string) --*
The time of the last refresh of the check.
- **status** *(string) --*
The alert status of the check: "ok" (green), "warning" (yellow), "error" (red), or "not_available".
- **hasFlaggedResources** *(boolean) --*
Specifies whether the Trusted Advisor check has flagged resources.
- **resourcesSummary** *(dict) --*
Details about AWS resources that were analyzed in a call to Trusted Advisor DescribeTrustedAdvisorCheckSummaries .
- **resourcesProcessed** *(integer) --*
The number of AWS resources that were analyzed by the Trusted Advisor check.
- **resourcesFlagged** *(integer) --*
The number of AWS resources that were flagged (listed) by the Trusted Advisor check.
- **resourcesIgnored** *(integer) --*
The number of AWS resources ignored by Trusted Advisor because information was unavailable.
- **resourcesSuppressed** *(integer) --*
The number of AWS resources ignored by Trusted Advisor because they were marked as suppressed by the user.
- **categorySpecificSummary** *(dict) --*
Summary information that relates to the category of the check. Cost Optimizing is the only category that is currently supported.
- **costOptimizing** *(dict) --*
The summary information about cost savings for a Trusted Advisor check that is in the Cost Optimizing category.
- **estimatedMonthlySavings** *(float) --*
The estimated monthly savings that might be realized if the recommended actions are taken.
- **estimatedPercentMonthlySavings** *(float) --*
The estimated percentage of savings that might be realized if the recommended actions are taken.
:type checkIds: list
:param checkIds: **[REQUIRED]**
The IDs of the Trusted Advisor checks.
- *(string) --*
:rtype: dict
:returns:
"""
pass
def describe_trusted_advisor_checks(self, language: str) -> Dict:
"""
Returns information about all available Trusted Advisor checks, including name, ID, category, description, and metadata. You must specify a language code; English ("en") and Japanese ("ja") are currently supported. The response contains a TrustedAdvisorCheckDescription for each check.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/support-2013-04-15/DescribeTrustedAdvisorChecks>`_
**Request Syntax**
::
response = client.describe_trusted_advisor_checks(
language='string'
)
**Response Syntax**
::
{
'checks': [
{
'id': 'string',
'name': 'string',
'description': 'string',
'category': 'string',
'metadata': [
'string',
]
},
]
}
**Response Structure**
- *(dict) --*
Information about the Trusted Advisor checks returned by the DescribeTrustedAdvisorChecks operation.
- **checks** *(list) --*
Information about all available Trusted Advisor checks.
- *(dict) --*
The description and metadata for a Trusted Advisor check.
- **id** *(string) --*
The unique identifier for the Trusted Advisor check.
- **name** *(string) --*
The display name for the Trusted Advisor check.
- **description** *(string) --*
The description of the Trusted Advisor check, which includes the alert criteria and recommended actions (contains HTML markup).
- **category** *(string) --*
The category of the Trusted Advisor check.
- **metadata** *(list) --*
The column headings for the data returned by the Trusted Advisor check. The order of the headings corresponds to the order of the data in the **Metadata** element of the TrustedAdvisorResourceDetail for the check. **Metadata** contains all the data that is shown in the Excel download, even in those cases where the UI shows just summary data.
- *(string) --*
:type language: string
:param language: **[REQUIRED]**
The ISO 639-1 code for the language in which AWS provides support. AWS Support currently supports English (\"en\") and Japanese (\"ja\"). Language parameters must be passed explicitly for operations that take them.
:rtype: dict
:returns:
"""
pass
def generate_presigned_url(self, ClientMethod: str = None, Params: Dict = None, ExpiresIn: int = None, HttpMethod: str = None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
``ClientMethod``.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method\'s model.
:returns: The presigned url
"""
pass
def get_paginator(self, operation_name: str = None) -> Paginator:
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you\'d normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator(\"create_foo\")``.
:raise OperationNotPageableError: Raised if the operation is not
pageable. You can use the ``client.can_paginate`` method to
check if an operation is pageable.
:rtype: L{botocore.paginate.Paginator}
:return: A paginator object.
"""
pass
def get_waiter(self, waiter_name: str = None) -> Waiter:
"""
Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters
section of the service docs for a list of available waiters.
:returns: The specified waiter object.
:rtype: botocore.waiter.Waiter
"""
pass
def refresh_trusted_advisor_check(self, checkId: str) -> Dict:
"""
Requests a refresh of the Trusted Advisor check that has the specified check ID. Check IDs can be obtained by calling DescribeTrustedAdvisorChecks .
.. note::
Some checks are refreshed automatically, and they cannot be refreshed by using this operation. Use of the ``RefreshTrustedAdvisorCheck`` operation for these checks causes an ``InvalidParameterValue`` error.
The response contains a TrustedAdvisorCheckRefreshStatus object, which contains these fields:
* **status.** The refresh status of the check: "none", "enqueued", "processing", "success", or "abandoned".
* **millisUntilNextRefreshable.** The amount of time, in milliseconds, until the check is eligible for refresh.
* **checkId.** The unique identifier for the check.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/support-2013-04-15/RefreshTrustedAdvisorCheck>`_
**Request Syntax**
::
response = client.refresh_trusted_advisor_check(
checkId='string'
)
**Response Syntax**
::
{
'status': {
'checkId': 'string',
'status': 'string',
'millisUntilNextRefreshable': 123
}
}
**Response Structure**
- *(dict) --*
The current refresh status of a Trusted Advisor check.
- **status** *(dict) --*
The current refresh status for a check, including the amount of time until the check is eligible for refresh.
- **checkId** *(string) --*
The unique identifier for the Trusted Advisor check.
- **status** *(string) --*
The status of the Trusted Advisor check for which a refresh has been requested: "none", "enqueued", "processing", "success", or "abandoned".
- **millisUntilNextRefreshable** *(integer) --*
The amount of time, in milliseconds, until the Trusted Advisor check is eligible for refresh.
:type checkId: string
:param checkId: **[REQUIRED]**
The unique identifier for the Trusted Advisor check to refresh. **Note:** Specifying the check ID of a check that is automatically refreshed causes an ``InvalidParameterValue`` error.
:rtype: dict
:returns:
"""
pass
def resolve_case(self, caseId: str = None) -> Dict:
"""
Takes a ``caseId`` and returns the initial state of the case along with the state of the case after the call to ResolveCase completed.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/support-2013-04-15/ResolveCase>`_
**Request Syntax**
::
response = client.resolve_case(
caseId='string'
)
**Response Syntax**
::
{
'initialCaseStatus': 'string',
'finalCaseStatus': 'string'
}
**Response Structure**
- *(dict) --*
The status of the case returned by the ResolveCase operation.
- **initialCaseStatus** *(string) --*
The status of the case when the ResolveCase request was sent.
- **finalCaseStatus** *(string) --*
The status of the case after the ResolveCase request was processed.
:type caseId: string
:param caseId:
The AWS Support case ID requested or returned in the call. The case ID is an alphanumeric string formatted as shown in this example: case-*12345678910-2013-c4c1d2bf33c5cf47*
:rtype: dict
:returns:
"""
pass
|
import io
from nose.tools import istest, assert_equal
import funk
import sdmx
@istest
def dataset_key_family_is_retrieved_from_dsd():
dataset_file = io.BytesIO(
b"""<message:CompactData xmlns="http://www.SDMX.org/resources/SDMXML/schemas/v2_0/message" xmlns:common="http://www.SDMX.org/resources/SDMXML/schemas/v2_0/common" xmlns:compact="http://www.SDMX.org/resources/SDMXML/schemas/v2_0/compact" xmlns:oecd="http://oecd.stat.org/Data" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.SDMX.org/resources/SDMXML/schemas/v2_0/message http://www.sdmx.org/docs/2_0/SDMXMessage.xsd http://oecd.stat.org/Data http://stats.oecd.org/RestSDMX/sdmx.ashx/GetSchema/MON2012TSE_O" xmlns:message="http://www.SDMX.org/resources/SDMXML/schemas/v2_0/message">
<oecd:DataSet keyFamilyURI="http://stats.oecd.org/RestSDMX/sdmx.ashx/GetKeyFamily/MON2012TSE_O/OECD/?resolveRef=true" xmlns:oecd="http://oecd.stat.org/Data">
</oecd:DataSet>
</message:CompactData>""")
dataset_reader = _reader(dataset_file)
dataset, = dataset_reader.datasets()
assert_equal("2012 A) OECD: Estimate of support to agriculture", dataset.key_family().name("en"))
assert_equal(["Country", "Indicator"], dataset.key_family().describe_dimensions("en"))
@istest
def key_family_from_passed_dsd_is_used_if_key_family_uri_is_missing():
dataset_file = io.BytesIO(
b"""<message:CompactData xmlns="http://www.SDMX.org/resources/SDMXML/schemas/v2_0/message" xmlns:common="http://www.SDMX.org/resources/SDMXML/schemas/v2_0/common" xmlns:compact="http://www.SDMX.org/resources/SDMXML/schemas/v2_0/compact" xmlns:oecd="http://oecd.stat.org/Data" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.SDMX.org/resources/SDMXML/schemas/v2_0/message http://www.sdmx.org/docs/2_0/SDMXMessage.xsd http://oecd.stat.org/Data http://stats.oecd.org/RestSDMX/sdmx.ashx/GetSchema/MON2012TSE_O" xmlns:message="http://www.SDMX.org/resources/SDMXML/schemas/v2_0/message">
<oecd:DataSet>
</oecd:DataSet>
</message:CompactData>""")
dataset_reader = _reader(dataset_file, dsd_fileobj=_dsd_fileobj())
dataset, = dataset_reader.datasets()
assert_equal("2012 A) OECD: Estimate of support to agriculture", dataset.key_family().name("en"))
@istest
def series_key_is_read_using_dsd_concepts_and_code_lists():
dataset_file = io.BytesIO(
b"""<message:CompactData xmlns="http://www.SDMX.org/resources/SDMXML/schemas/v2_0/message" xmlns:common="http://www.SDMX.org/resources/SDMXML/schemas/v2_0/common" xmlns:compact="http://www.SDMX.org/resources/SDMXML/schemas/v2_0/compact" xmlns:oecd="http://oecd.stat.org/Data" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.SDMX.org/resources/SDMXML/schemas/v2_0/message http://www.sdmx.org/docs/2_0/SDMXMessage.xsd http://oecd.stat.org/Data http://stats.oecd.org/RestSDMX/sdmx.ashx/GetSchema/MON2012TSE_O" xmlns:message="http://www.SDMX.org/resources/SDMXML/schemas/v2_0/message">
<oecd:DataSet keyFamilyURI="http://stats.oecd.org/RestSDMX/sdmx.ashx/GetKeyFamily/MON2012TSE_O/OECD/?resolveRef=true" xmlns:oecd="http://oecd.stat.org/Data">
<oecd:Series COUNTRY="OECD-E" INDIC="TO-VP" TIME_FORMAT="P1Y">
</oecd:Series>
</oecd:DataSet>
</message:CompactData>""")
dataset_reader = _reader(dataset_file)
dataset, = dataset_reader.datasets()
series, = dataset.series()
assert_equal(
[("Country", ["OECD(EUR million)"]), ("Indicator", ["Total value of production (at farm gate)"])],
list(series.describe_key(lang="en").items()),
)
@istest
def observations_have_time_and_value():
dataset_file = io.BytesIO(
b"""<message:CompactData xmlns="http://www.SDMX.org/resources/SDMXML/schemas/v2_0/message" xmlns:common="http://www.SDMX.org/resources/SDMXML/schemas/v2_0/common" xmlns:compact="http://www.SDMX.org/resources/SDMXML/schemas/v2_0/compact" xmlns:oecd="http://oecd.stat.org/Data" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.SDMX.org/resources/SDMXML/schemas/v2_0/message http://www.sdmx.org/docs/2_0/SDMXMessage.xsd http://oecd.stat.org/Data http://stats.oecd.org/RestSDMX/sdmx.ashx/GetSchema/MON2012TSE_O" xmlns:message="http://www.SDMX.org/resources/SDMXML/schemas/v2_0/message">
<oecd:DataSet keyFamilyURI="http://stats.oecd.org/RestSDMX/sdmx.ashx/GetKeyFamily/MON2012TSE_O/OECD/?resolveRef=true" xmlns:oecd="http://oecd.stat.org/Data">
<oecd:Series COUNTRY="OECD-E" INDIC="TO-VP" TIME_FORMAT="P1Y">
<oecd:Obs TIME="1986" OBS_VALUE="538954.220075479" />
<oecd:Obs TIME="1987" OBS_VALUE="598184.668422966" />
</oecd:Series>
</oecd:DataSet>
</message:CompactData>""")
dataset_reader = _reader(dataset_file)
dataset, = dataset_reader.datasets()
series, = dataset.series()
first_obs, second_obs = series.observations()
assert_equal("1986", first_obs.time)
assert_equal("538954.220075479", first_obs.value)
assert_equal("1987", second_obs.time)
assert_equal("598184.668422966", second_obs.value)
def _reader(dataset_file, **kwargs):
context = funk.Context()
requests = context.mock()
response = context.mock()
funk.allows(requests).get("http://stats.oecd.org/RestSDMX/sdmx.ashx/GetKeyFamily/MON2012TSE_O/OECD/?resolveRef=true").returns(response)
funk.allows(response).iter_content(16 * 1024).returns(_dsd_chunks())
return sdmx.compact_data_message_reader(fileobj=dataset_file, requests=requests, **kwargs)
def _dsd_fileobj():
return io.BytesIO(b"""<?xml version="1.0" encoding="UTF-8"?>
<Structure xmlns="http://www.SDMX.org/resources/SDMXML/schemas/v2_0/message" xmlns:structure="http://www.SDMX.org/resources/SDMXML/schemas/v2_0/structure">
<CodeLists>
<structure:CodeList id="CL_MON2012TSE_O_COUNTRY" agencyID="OECD">
<structure:Code value="OECD-E">
<structure:Description xml:lang="en">OECD(EUR million)</structure:Description>
</structure:Code>
</structure:CodeList>
<structure:CodeList id="CL_MON2012TSE_O_INDIC" agencyID="OECD">
<structure:Code value="TO-VP">
<structure:Description xml:lang="en">Total value of production (at farm gate)</structure:Description>
</structure:Code>
<structure:Code value="TO-VP1P" parentCode="TO-VP">
<structure:Description xml:lang="en">of which: share of MPS commodities, percentage</structure:Description>
</structure:Code>
</structure:CodeList>
</CodeLists>
<Concepts>
<structure:Concept id="COUNTRY">
<structure:Name xml:lang="en">Country</structure:Name>
</structure:Concept>
<structure:Concept id="INDIC">
<structure:Name xml:lang="en">Indicator</structure:Name>
</structure:Concept>
</Concepts>
<KeyFamilies>
<structure:KeyFamily id="MON2012TSE_O" agencyID="OECD">
<structure:Name xml:lang="en">2012 A) OECD: Estimate of support to agriculture</structure:Name>
<structure:Components>
<structure:Dimension conceptRef="COUNTRY" codelist="CL_MON2012TSE_O_COUNTRY"/>
<structure:Dimension conceptRef="INDIC" codelist="CL_MON2012TSE_O_INDIC"/>
<structure:TimeDimension conceptRef="TIME" />
<structure:PrimaryMeasure conceptRef="OBS_VALUE"><TextFormat textType="Double" /></structure:PrimaryMeasure>
</structure:Components>
</structure:KeyFamily>
</KeyFamilies>
</Structure>""")
def _dsd_chunks():
fileobj = _dsd_fileobj()
buf = []
while True:
buf = fileobj.read(16 * 1024)
if buf:
yield buf
else:
return
|
<filename>model.py
"""model.py
Neural network to train on the labeled data.
"""
import sys
import torch
from torch import nn
class Net(nn.Module):
def __init__(self, batch_size=32, auxiliary=True):
super(Net, self).__init__()
self.batch_size = batch_size
self.auxiliary = auxiliary
# CNN 2
self.features = nn.Sequential( # 1 x 512 x 96
nn.Conv2d(1, 8, kernel_size=3, stride=1, padding=1), # 8 x 512 x 96
nn.MaxPool2d(kernel_size=2, stride=2), # 8 x 256 x 48
nn.Conv2d(8, 16, kernel_size=3, stride=1, padding=1),
nn.MaxPool2d(kernel_size=2, stride=2), # 16 x 128 x 24
nn.Conv2d(16, 32, kernel_size=3, stride=1, padding=1),
nn.MaxPool2d(kernel_size=4, stride=4), # 32 x 32 x 6
nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1),
nn.MaxPool2d(kernel_size=(2, 4), stride=(2, 4)), # 64 x 8 x 3
nn.Conv2d(64, 512, kernel_size=3, stride=1, padding=1),
nn.MaxPool2d(kernel_size=(3, 8), stride=(3, 8)) # 512 x 1 x 1
)
"""
# CNN 1
self.features = nn.Sequential( # 1 x 512 x 96
nn.Conv2d(1, 8, kernel_size=4, stride=4), # 8 x 128 x 24
nn.Conv2d(8, 16, kernel_size=3, stride=1, padding=1),
nn.MaxPool2d(kernel_size=2, stride=2), # 16 x 64 x 12
nn.Conv2d(16, 32, kernel_size=3, stride=1, padding=1),
nn.MaxPool2d(kernel_size=2, stride=2), # 32 x 32 x 6
nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1),
nn.MaxPool2d(kernel_size=(2, 4), stride=(2, 4)), # 64 x 8 x 3
nn.Conv2d(64, 512, kernel_size=3, stride=1, padding=1),
nn.MaxPool2d(kernel_size=(3, 8), stride=(3, 8)) # 512 x 1 x 1
)
"""
if self.auxiliary:
self.classifier_1 = nn.Sequential(
nn.Linear(512, 512),
nn.Dropout(),
nn.ReLU(inplace=True),
nn.Linear(512, 2),
)
self.classifier_2 = nn.Sequential(
nn.Linear(512, 512),
nn.Dropout(),
nn.ReLU(inplace=True),
nn.Linear(512, 2),
)
self.classifier_final = nn.Sequential(
nn.Linear(512, 512),
nn.Dropout(),
nn.ReLU(inplace=True),
nn.Linear(512, 512),
nn.Dropout(),
nn.ReLU(inplace=True),
nn.Linear(512, 4),
)
def set_batch_size(self, batch_size):
self.batch_size = batch_size
def forward(self, x):
x = self.features(x).view(self.batch_size, -1)
if self.auxiliary:
x_1 = self.classifier_1(x)
x_2 = self.classifier_2(x)
x_fin = self.classifier_final(x)
return x_1, x_2, x_fin
else:
x_fin = self.classifier_final(x)
return x_fin
def _update_progress(self, progress, avg_loss, val_loss):
length = 20
status = ""
try:
progress = float(progress)
except TypeError:
progress = 0
status = "Error: progress must be numeric\r\n"
if progress < 0:
progress = 0
status = "Error: progress must be >= 0\r\n"
if progress >= 1:
progress = 1
status = "Fin\n"
block = int(round(length * progress))
text = \
"\rPercent: [{}] {:3.0f}% " \
"TL:{:1.5f} VL:{:1.5f} {}".format(
"#" * block + "-" * (length - block),
round(progress * 100, 2),
avg_loss, val_loss, status
)
sys.stdout.write(text)
sys.stdout.flush()
|
import marshal
import sys
import os
class Cache:
def __init__(self, root_dir):
self.root = root_dir
self.dump = marshal.dump
self.load = marshal.load
# ---[ key1, key2, value interface ]----------------------------------------
def get(self, k1, k2, default=None):
path = _path(self.root, k1, k2)
try:
with open(path,'rb') as f:
return self.load(f)
except:
return default
def set(self, k1, k2, value):
path = _path(self.root, k1, k2)
if value is not None:
f = open(path, 'wb')
self.dump(value, f)
else:
if os.path.exists(path):
os.remove(path)
#print('xxx set/remove',k1,k2)
def keys(self, k1=None):
if k1:
path = _path(self.root, k1, '')
return os.listdir(path)
else:
return os.listdir(self.root)
# --------------------------------------------------------------------------
def top_keys(self, k1, k2_from, k2_to, inclusive=True):
all = self.keys(k1)
in_range = _limit_range(all, k2_from, k2_to, inclusive)
top = _limit_top(in_range)
return top
def top(self, k1, k2_from, k2_to, inclusive=True):
top_keys = self.top_keys(k1, k2_from, k2_to, inclusive)
data = [self.get(k1,k) for k in top_keys]
return data, top_keys
def invalid(self, k1, k2, inclusive=False):
out = []
k2_list = self.keys(k1)
for k in k2_list:
if k2.startswith(k):
if k2 != k or inclusive:
out.append(k)
return out
def hint_optimize(self): pass # TODO
# HELPERS: KEY-VALUE
def _path(root, k1, k2):
"join path and create directory root/k1 if not exists"
dir = os.path.join(root,k1)
if not os.path.exists(dir):
os.makedirs(dir)
path = os.path.join(dir,k2)
return path
# HELPERS: RANGE
def _limit_range(all, k_from, k_to=None, inclusive=True):
""
if k_to:
if inclusive:
out = [k for k in all if k>=k_from and (k<k_to or k.startswith(k_to))]
else:
out = [k for k in all if k>=k_from and k<k_to]
else:
# TODO k_from is not string (list / tuple / set)
out = [k for k in all if k.startswith(k_from)]
out.sort()
return out
def _limit_top(k_list):
""
if not k_list: return []
curr = k_list[0]
out = [curr]
for k in k_list[1:]:
if k.startswith(curr):
pass
else:
out += [k]
curr = k
return out
def _agg(all, top):
pass
# ---[ PROCESSING ]-------------------------------------------------------------
# ------------------------------------------------------------------------------
if __name__=="__main__":
c = Cache('test_dir')
c.set('k1','k2',None)
c.set('k1','k21',[1])
c.set('k1','k22',[2])
c.set('k1','k23',[3])
c.set('k1','k3',[3,4,5])
c.set('k1','k34',[3,4])
c.set('k1','k35',[3,5])
c.set('k1','k4',[4,5,6])
c.set('k1','k',[1,2,3,4,5,6])
v = c.get('k1','k3')
print(v)
all = c.keys('k1')
print('list_all:k1',all)
print('list_all',c.keys())
r = _limit_range(all,'k1','k4',inclusive=True)
print('range',r)
print('top',_limit_top(r))
print('top',c.top('k1','k2','k4'))
c.invalid('k1','k35',True)
|
import pickle
import numpy as np
import os
def _analyze_query_point_assignment(
query_data_dict: dict,
init_Rdata_dict: dict,
init_Edata_dict: dict,
num_R: int,
query_point_assignment_array: np.ndarray,
root: str,
n_points_to_copy=50,
):
"""
Analyzes and visualizes qDCA results.
:param query_data_dict: raw query data.
:param init_Rdata_dict: raw R data.
:param init_Edata_dict: raw E data.
:param num_R: total number of R points.
:param query_point_assignment_array: query point assignments results.
:param root: root directory of the experiment.
:param n_points_to_copy: number of images to save.
:return: accuracy of qDCA assignments; list of (R, query) points with same label;
list of (R, query) points with different label
"""
true_query_data_labels = query_data_dict["labels"]
assigned_R = query_point_assignment_array[
query_point_assignment_array[:, 1] < num_R, 1
]
assigned_E = query_point_assignment_array[
query_point_assignment_array[:, 1] >= num_R, 1
]
assigned_R_labels = init_Rdata_dict["labels"][assigned_R]
assigned_E_labels = init_Edata_dict["labels"][assigned_E - num_R]
assigned_query_data_labels = np.empty(
shape=query_point_assignment_array.shape[0]
).astype(np.int32)
assigned_query_data_labels[
query_point_assignment_array[:, 1] < num_R
] = assigned_R_labels
assigned_query_data_labels[
query_point_assignment_array[:, 1] >= num_R
] = assigned_E_labels
accuracy = (
true_query_data_labels == assigned_query_data_labels
).sum() / assigned_query_data_labels.shape[0]
same_label_idx = np.where(true_query_data_labels == assigned_query_data_labels)[0]
wrong_label_idx = np.where(true_query_data_labels != assigned_query_data_labels)[0]
correct_pairs = []
for i in query_point_assignment_array[same_label_idx]:
query_idx, init_idx = i
if init_idx < num_R:
correct_pairs.append(
[
query_data_dict["paths"].astype(object)[query_idx],
init_Rdata_dict["paths"].astype(object)[init_idx],
query_data_dict["labels"][query_idx],
init_Rdata_dict["labels"][init_idx],
]
)
else:
correct_pairs.append(
[
query_data_dict["paths"].astype(object)[query_idx],
init_Edata_dict["paths"].astype(object)[init_idx - num_R],
query_data_dict["labels"][query_idx],
init_Edata_dict["labels"][init_idx - num_R],
]
)
wrong_pairs = []
for i in query_point_assignment_array[wrong_label_idx]:
query_idx, init_idx = i
if init_idx < num_R:
wrong_pairs.append(
[
query_data_dict["paths"].astype(object)[query_idx],
init_Rdata_dict["paths"].astype(object)[init_idx],
query_data_dict["labels"][query_idx],
init_Rdata_dict["labels"][init_idx],
]
)
else:
wrong_pairs.append(
[
query_data_dict["paths"].astype(object)[query_idx],
init_Edata_dict["paths"].astype(object)[init_idx - num_R],
query_data_dict["labels"][query_idx],
init_Edata_dict["labels"][init_idx - num_R],
]
)
with open(
os.path.join(root, "logs", "analyzed_query_point_assignments.pkl"), "wb"
) as f:
pickle.dump(
{
"accuracy": accuracy,
"same_label_idx": same_label_idx,
"wrong_label_idx": wrong_label_idx,
"correct_pairs": correct_pairs,
"wrong_pairs": wrong_pairs,
"query_point_assignment_array": query_point_assignment_array,
},
f,
)
same_label_image_path = os.path.join(root, "visualization", "same_label_images")
wrong_label_image_path = os.path.join(root, "visualization", "wrong_label_images")
if not os.path.exists(wrong_label_image_path):
os.mkdir(wrong_label_image_path)
if not os.path.exists(same_label_image_path):
os.mkdir(same_label_image_path)
for i in range(n_points_to_copy):
query_image_path, init_image_path, query_label, init_label = correct_pairs[i]
path_to_copy = os.path.join(
same_label_image_path,
"i{0}_init_image_querylabel{1}_initlabel{2}.png".format(
str(i), str(query_label), str(init_label)
),
)
os.system("cp {0} {1}".format(init_image_path, path_to_copy))
path_to_copy2 = os.path.join(
same_label_image_path,
"i{0}_query_image_querylabel{1}_initlabel{2}.png".format(
str(i), str(query_label), str(init_label)
),
)
os.system("cp {0} {1}".format(query_image_path, path_to_copy2))
(
w_query_image_path,
w_init_image_path,
w_query_label,
w_init_label,
) = wrong_pairs[i]
path_to_copy_w = os.path.join(
wrong_label_image_path,
"i{0}_init_image_querylabel{1}_initlabel{2}.png".format(
str(i), str(w_query_label), str(w_init_label)
),
)
os.system("cp {0} {1}".format(w_init_image_path, path_to_copy_w))
path_to_copy_w2 = os.path.join(
wrong_label_image_path,
"i{0}_query_image_querylabel{1}_initlabel{2}.png".format(
i, w_query_label, w_init_label
),
)
os.system("cp {0} {1}".format(w_query_image_path, path_to_copy_w2))
return accuracy, correct_pairs, wrong_pairs
def _generate_query_sets(version: str, N: int = 5000):
"""
Generates query sets for qDCA experiment in Section 4.3.
:param version: either version1 (dogs vs kitchen utils) or version2 (random).
:param N: number of points to sample for R used in DCA.
"""
with open(f"representations/vgg16/{version}/Rfeatures.pkl", "rb") as f:
Rdata_v1 = pickle.load(f)
with open(f"representations/vgg16/{version}/Efeatures.pkl", "rb") as f:
Edata_v1 = pickle.load(f)
init_Ridxs = np.random.choice(
np.arange(len(Rdata_v1["feat_lin1"])), size=N, replace=False
)
query_Ridxs = np.setdiff1d(np.arange(len(Rdata_v1["feat_lin1"])), init_Ridxs)
init_Eidxs = np.random.choice(
np.arange(len(Edata_v1["feat_lin1"])), size=N, replace=False
)
query_Eidxs = np.setdiff1d(np.arange(len(Edata_v1["feat_lin1"])), init_Eidxs)
with open(f"representations/vgg16/{version}/sampled_Rfeatures.pkl", "wb") as f:
pickle.dump(
{
"feat_lin1": Rdata_v1["feat_lin1"][init_Ridxs],
"feat_lin2": Rdata_v1["feat_lin2"][init_Ridxs],
"labels": Rdata_v1["labels"][init_Ridxs],
"paths": np.array(Rdata_v1["paths"])[init_Ridxs],
"init_Ridx": init_Ridxs,
"query_Ridx": query_Ridxs,
},
f,
)
with open(f"representations/vgg16/{version}/sampled_Efeatures.pkl", "wb") as f:
pickle.dump(
{
"feat_lin1": Edata_v1["feat_lin1"][init_Eidxs],
"feat_lin2": Edata_v1["feat_lin2"][init_Eidxs],
"labels": Edata_v1["labels"][init_Eidxs],
"paths": np.array(Edata_v1["paths"])[init_Eidxs],
"init_Eidx": init_Eidxs,
"query_Eidx": query_Eidxs,
},
f,
)
with open(f"representations/vgg16/{version}/query_features.pkl", "wb") as f:
pickle.dump(
{
"feat_lin1": np.concatenate(
[
Rdata_v1["feat_lin1"][query_Ridxs],
Edata_v1["feat_lin1"][query_Eidxs],
]
),
"feat_lin2": np.concatenate(
[
Rdata_v1["feat_lin2"][query_Ridxs],
Edata_v1["feat_lin2"][query_Eidxs],
]
),
"labels": np.concatenate(
[Rdata_v1["labels"][query_Ridxs], Edata_v1["labels"][query_Eidxs]]
),
"paths": np.concatenate(
[
np.array(Rdata_v1["paths"])[query_Ridxs],
np.array(Edata_v1["paths"])[query_Eidxs],
]
),
"init_Eidxs": init_Eidxs,
"query_Eidxs": query_Eidxs,
"init_Ridxs": init_Ridxs,
"query_Ridxs": query_Ridxs,
},
f,
)
|
<filename>boa/worker.py
"""
worker.py
Implements the actual reverse engineering used by both the
"""
import os
import io
import re
import sys
import typing as t
import hashlib
import contextlib
import logging
import coloredlogs
import lief
import ssdeep
import requests
from boa.unfreeze import get_installer
from boa.unpack import get_packer
logger = logging.getLogger(__name__)
coloredlogs.install(level="INFO")
@contextlib.contextmanager
def stdout_redirected(to=os.devnull):
""" Silences shared libraries """
fd = sys.stdout.fileno()
def _redirect_stdout(to):
sys.stdout.close() # + implicit flush()
os.dup2(to.fileno(), fd) # fd writes to 'to' file
sys.stdout = os.fdopen(fd, "w") # Python writes to fd
with os.fdopen(os.dup(fd), "w") as old_stdout:
with open(to, "w") as file:
_redirect_stdout(to=file)
try:
yield # allow code to be run with the redirected stdout
finally:
_redirect_stdout(to=old_stdout)
class WorkerException(Exception):
pass
class BoaWorker:
def __init__(self, filepath: str, out_dir: str, cli=False):
self.filepath = filepath
self.out_dir = out_dir
# if set, will not attempt to pingback any endpoints
self.cli = cli
# wrap LIEF over to parse executable
with stdout_redirected():
self.binary = lief.parse(self.filepath)
if isinstance(self.binary, lief.PE.Binary):
self.format = "PE"
elif isinstance(self.binary, lief.ELF.Binary):
self.format = "ELF"
elif isinstance(self.binary, lief.MachO.Binary):
raise WorkerException("Mach-O's are not supported yet.")
else:
raise WorkerException("Unknown filetype")
# regex search for python dependency
self.pyver: t.Optional[float] = self._parse_pyver()
def _parse_pyver(self) -> t.Optional[float]:
""" Generically searches for python dependency (DLL/SO) in executable format """
# iterate over symbols
section = self.binary.get_section(".data")
data: bytes = bytearray(section.content)
# search for python dependency
expr: str = r"python(\d+)"
matches = re.search(expr, str(data))
if matches is None:
return None
# strip out name and extension
res: t.List[str] = list(matches.group(0).split("python")[1].strip(".dll"))
res.insert(1, ".")
return float("".join(res))
def run_detect(self) -> t.Dict[str, str]:
""" Given a blob of data, run initial detection to gather metadata """
with open(self.filepath, "rb") as fd:
data = fd.read()
# generate table of hashes useful for analyst
hashes: t.Dict[str, str] = {}
hashes["MD5"] = hashlib.md5(data).hexdigest()
hashes["SHA256"] = hashlib.sha256(data).hexdigest()
hashes["Similiarity Hash (ssdeep)"] = ssdeep.hash(data)
# VT checks are optional, and only occur if $VT_API is set
vt_api: t.Optional[str] = os.environ.get("VT_API")
if vt_api:
params = {"apiKey": vt_api}
files = {"file": binary}
resp = requests.post(
"https://www.virustotal.com/vtapi/v2/file/scan",
files=files,
params=params,
)
print(resp.json())
return hashes
def run_unpack(self) -> int:
""" Implements functionality for detecting any unpackers and extrapolating resources """
# instantiate unfreezer
unfreezer = get_installer(self.filepath)
if unfreezer is None:
logger.error(
"Unable to detect the installer used to freeze the executable."
)
return 1
# given the output dir, run the unpacking routine
with unfreezer:
logger.info(f"Detected installer: {unfreezer}")
version = unfreezer.parse_version()
if version is not None:
logger.info(f"Installer Version: {version}")
logger.info("Unfreezing resources from the given executable")
sources = unfreezer.thaw(self.out_dir)
logger.info(f"Found {len(sources)} relevant bytecode files for decompilation.")
logger.info(f"Done unpacking all resources to `{self.out_dir}`")
return 0
def run_decompile(self) -> int:
return 0
|
<reponame>jbrockmendel/sm2
import os
import numpy as np
cur_dir = os.path.dirname(os.path.abspath(__file__))
class ARLagResults(object):
"""
Results are from R vars::VARselect for sunspot data.
Comands run were
var_select <- VARselect(SUNACTIVITY, lag.max=16, type=c("const"))
"""
def __init__(self, type="const"):
# order of results is AIC, HQ, SC, FPE
if type == "const":
# TODO: Dont use name `type`
ic = [6.311751824815273, 6.321813007357017, 6.336872456958734,
551.009492543133547, 5.647615009344886, 5.662706783157502,
5.685295957560077, 283.614444209634655, 5.634199640773091,
5.654322005856580, 5.684440905060013, 279.835333966272003,
5.639415797766900, 5.664568754121261, 5.702217378125553,
281.299267441683185, 5.646102475432464, 5.676286023057697,
5.721464371862848, 283.187210932784524, 5.628416873122441,
5.663631012018546, 5.716339085624555, 278.223839284844701,
5.584204185137150, 5.624448915304128, 5.684686713710994,
266.191975554941564, 5.541163244029505, 5.586438565467356,
5.654206088675081, 254.979353737235556, 5.483155367013447,
5.533461279722170, 5.608758527730753, 240.611088468544949,
5.489939895595428, 5.545276399575022, 5.628103372384465,
242.251199397394288, 5.496713895370946, 5.557080990621412,
5.647437688231713, 243.900349905069504, 5.503539311586831,
5.568936998108170, 5.666823420519329, 245.573823561989144,
5.510365149977393, 5.580793427769605, 5.686209574981622,
247.259396991133599, 5.513740912139918, 5.589199781203001,
5.702145653215877, 248.099655693709479, 5.515627471325321,
5.596116931659277, 5.716592528473011, 248.572915484827206,
5.515935627515806, 5.601455679120634, 5.729461000735226,
248.654927915301300]
self.ic = np.asarray(ic).reshape(4, -1, order='F')
class ARResultsOLS(object):
"""
Results of fitting an AR(9) model to the sunspot data.
Results were taken from Stata using the var command.
"""
def __init__(self, constant=True):
self.avobs = 300.
if constant:
self.params = [6.7430535917332, 1.1649421971129, -.40535742259304,
-.16653934246587, .14980629416032, -.09462417064796,
.00491001240749, .0504665930841, -.08635349190816,
.25349103194757]
# These are returned by stata VAR, using the (V)AR scale/sigma
# we return the true OLS bse by default the stata residuals
# can be achived by np.sqrt(np.diag(res1.cov_params()))
self.bse_stata = [2.413485601, .0560359041, .0874490762,
.0900894414, .0899348339, .0900100797,
.0898385666, .0896997939, .0869773089,
.0559505756]
# The below are grom gretl's ARIMA command with
# conditional maxium likelihood
self.bse_gretl = [2.45474, 0.0569939, 0.0889440,
0.0916295, 0.0914723, 0.0915488,
0.0913744, 0.0912332, 0.0884642,
0.0569071]
self.rmse = 15.1279294937327
self.fpe = 236.4827257929261
self.llf = -1235.559128419549
# NOTE: we use a different definition of these ic than Stata
# but our order selection results agree with R VARselect
# close to Stata for Lutkepohl but we penalize the ic for
# the trend terms
# self.bic = 8.427186938618863
# self.aic = 8.30372752279699
# self.hqic = 8.353136159250697
# NOTE: predictions were taken from gretl, but agree with Stata
# test predict
# TODO: remove one of the files
filename = os.path.join(cur_dir, "AROLSConstantPredict.csv")
predictresults = np.loadtxt(filename)
fv = predictresults[:300, 0]
pv = predictresults[300:, 1]
del predictresults
# cases - in sample predict
# n = -1, start = 0 (fitted values)
self.FVOLSnneg1start0 = fv
# n=-1, start=9
self.FVOLSnneg1start9 = fv
# n=-1, start=100
self.FVOLSnneg1start100 = fv[100 - 9:]
# n = 200, start = 0
self.FVOLSn200start0 = fv[:192]
# n = 200, start = 200
self.FVOLSn200start200 = np.hstack((fv[200 - 9:], pv[:101 - 9]))
# n = 200, start = -109 use above
self.FVOLSn200startneg109 = self.FVOLSn200start200
# n = 100, start = 325, post-sample forecasting
self.FVOLSn100start325 = np.hstack((fv[-1], pv))
# n = 301, start = 9
self.FVOLSn301start9 = np.hstack((fv, pv[:2]))
# n = 301, start = 0
self.FVOLSdefault = fv
# n = 4, start = 312
self.FVOLSn4start312 = np.hstack((fv[-1], pv[:8]))
# n = 15, start = 312
self.FVOLSn15start312 = np.hstack((fv[-1], pv[:19]))
elif not constant:
self.params = [1.19582389902985, -0.40591818219637,
-0.15813796884843, 0.16620079925202,
-0.08570200254617, 0.01876298948686,
0.06130211910707, -0.08461507700047,
0.27995084653313]
self.bse_stata = [.055645055, .088579237, .0912031179,
.0909032462, .0911161784, .0908611473,
.0907743174, .0880993504, .0558560278]
self.bse_gretl = [0.0564990, 0.0899386, 0.0926027,
0.0922983, 0.0925145, 0.0922555,
0.0921674, 0.0894513, 0.0567132]
self.rmse = 15.29712618677774
self.sigma = 226.9820074869752
self.llf = -1239.41217278661
# See note above; TODO: What note does this refer to?
#self.bic = 8.433861292817106
#self.hqic = 8.367215591385756
#self.aic = 8.322747818577421
self.fpe = 241.0221316614273
filename = os.path.join(cur_dir, "AROLSNoConstantPredict.csv")
predictresults = np.loadtxt(filename)
fv = predictresults[:300, 0]
pv = predictresults[300:, 1]
# cases - in sample predict
# n = -1, start = 0 (fitted values)
self.FVOLSnneg1start0 = fv
# n=-1, start=9
self.FVOLSnneg1start9 = fv
# n=-1, start=100
self.FVOLSnneg1start100 = fv[100 - 9:]
# n = 200, start = 0
self.FVOLSn200start0 = fv[:192]
# n = 200, start = 200
self.FVOLSn200start200 = np.hstack((fv[200 - 9:], pv[:101 - 9]))
# n = 200, start = -109 use above
self.FVOLSn200startneg109 = self.FVOLSn200start200
# n = 100, start = 325, post-sample forecasting
self.FVOLSn100start325 = np.hstack((fv[-1], pv))
# n = 301, start = 9
self.FVOLSn301start9 = np.hstack((fv, pv[:2]))
# n = 301, start = 0
self.FVOLSdefault = fv
# n = 4, start = 312
self.FVOLSn4start312 = np.hstack((fv[-1], pv[:8]))
# n = 15, start = 312
self.FVOLSn15start312 = np.hstack((fv[-1], pv[:19]))
class ARResultsMLE(object):
"""
Results of fitting an AR(9) model to the sunspot data using exact MLE.
Results were taken from gretl.
"""
def __init__(self, constant=True):
self.avobs = 300
if constant:
# NOTE: Stata's estimated parameters differ from gretl
filename = os.path.join(cur_dir, "ARMLEConstantPredict.csv")
filename2 = os.path.join(cur_dir,
"results_ar_forecast_mle_dynamic.csv")
predictresults = np.loadtxt(filename, delimiter=",")
pv = predictresults[:, 1]
dynamicpv = np.genfromtxt(filename2, delimiter=",", skip_header=1)
# cases - in sample predict
# start = 0 (fitted values)
self.FVMLEdefault = pv[:309]
# start=9
self.FVMLEstart9end308 = pv[9:309]
# start=100, end=309
self.FVMLEstart100end308 = pv[100:309]
# start = 0, end
self.FVMLEstart0end200 = pv[:201]
# n = 200, start = 200
self.FVMLEstart200end334 = pv[200:]
# start = 309, end=334 post-sample forecasting
self.FVMLEstart308end334 = pv[308:]
# end = 310, start = 9
self.FVMLEstart9end309 = pv[9:310]
# end = 301, start = 0
self.FVMLEstart0end301 = pv[:302]
# end = 312, start = 4
self.FVMLEstart4end312 = pv[4:313]
# end = 7, start = 2
self.FVMLEstart2end7 = pv[2:8]
self.fcdyn = dynamicpv[:, 0]
self.fcdyn2 = dynamicpv[:, 1]
self.fcdyn3 = dynamicpv[:, 2]
self.fcdyn4 = dynamicpv[:, 3]
|
<reponame>marcinn/midicontrol
def cmd(cmd, stop=False):
def wrapped(event):
if stop:
event.stop()
return cmd
return wrapped
class CommandsController(object):
def __init__(self):
self._times = {}
def command_decorator(self, threshold=None):
def decorator(fn):
def wrapped(event):
if threshold:
prevtime = self._times.get(event.code)
delay = event.time-prevtime if prevtime else None
if not threshold or delay is None or delay>threshold:
out = fn(event)
self._times[event.code] = event.time
return out or []
else:
return []
return wrapped
return decorator
_ctrl = CommandsController()
command = _ctrl.command_decorator
def knob(x):
return command(threshold=0.25)(x)
def slider(x):
return command(threshold=0.05)(x)
def zoom_in(event):
return ZOOM_IN*(int(event.delta/5)+1)
def zoom_out(event):
return ZOOM_OUT*(int(event.delta/5)+1)
def do_scrub(cmds):
def scrub(event):
if 'scrub' not in event.modifiers:
return cmds
return scrub
@command(threshold=0.05)
def scrub_left(ev):
return do_scrub(
['keydown Control_L', 'str j', 'keyup Control_L'])
@command(threshold=0.05)
def scrub_right(ev):
return do_scrub(
['keydown Control_L', 'str l', 'keyup Control_L'])
@command(threshold=0.1)
def frames_left(event):
delta = abs(event.delta)
if delta<5:
return REVERSE_NUDGE # * delta
else:
return NUDGE_10_BACKWARD # * (int(delta-10/4)+1)
@command(threshold=0.1)
def frames_right(event):
delta = abs(event.delta)
if delta<5:
return FORWARD_NUDGE # * delta
else:
return NUDGE_10_FORWARD # * (int(delta-10/4)+1)
def addmodifier(ctrl, x):
@command()
def wrapped(event):
ctrl.enable_modifier(x)
return []
return wrapped
def remmodifier(ctrl, x):
@command()
def wrapped(event):
if x in event.modifiers:
ctrl.disable_modifier(x)
return []
return wrapped
def ifmod(mod, enabled, disabled=None):
def wrapped(event):
if mod in event.modifiers:
return enabled
else:
return disabled or []
return wrapped
def ifnotmod(mod, enabled, disabled=None):
def wrapped(event):
if mod not in event.modifiers:
return enabled
else:
return disabled or []
return wrapped
LEFT = 'key a'
RIGHT = 'key s'
MARK_IN = 'key i'
MARK_OUT = 'key o'
MARK_CLIP = 'str ]'
BIN_PREV = ['keydown Alt_L', 'str q', 'keyup Alt_L']
BIN_PREV_IN_GROUP = [
'keydown Alt_L', 'keydown Shift_L',
'str q', 'keyup Alt_L', 'keyup Shift_L']
BIN_NEXT = ['keydown Alt_L', 'str w', 'keyup Alt_L']
BIN_NEXT_IN_GROUP = [
'keydown Alt_L', 'keydown Shift_L',
'str w', 'keyup Alt_L', 'keyup Shift_L']
TILE_TO_VIEWER = ['keydown Control_L', 'key Return', 'keyup Control_L']
REVERSE_NUDGE = ['key Left']
FORWARD_NUDGE = ['key Right']
NUDGE_10_BACKWARD = ['key m']
NUDGE_10_FORWARD = ['str /']
FULLSCREEN_TOGGLE = ['key F12']
PLAY = ['keydown Control_L', 'key p', 'keyup Control_L']
STOP = ['key k']
TOGGLE_PLAY = ['key Space']
GOTO_MARK_IN = [
'keydown Control_L', 'key i', 'keyup Control_L']
GOTO_MARK_OUT = [
'keydown Control_L', 'key o', 'keyup Control_L']
SWITCH = ['key Tab']
JOIN = ['key Escape']
UNMARK = ['key p']
JOG = ['keydown Control_L', 'key k', 'keyup Control_L']
PREVIEW = ['keydown Alt_L', 'key l', 'keyup Alt_L']
HOME = ['key h']
END = ['key colon']
INSERT = ['key v']
REPLACE = ['key b']
EMPTY_CUT = ['key c']
ZOOM_IN = ['str =']
ZOOM_OUT = ['str -']
DISABLE_ALL_TRACKS = ['str ~']
TOGGLE_V1 = ['key 1']
TOGGLE_V2 = ['key 2']
TOGGLE_A1 = ['key 3']
TOGGLE_A2 = ['key 4']
TOGGLE_A3 = ['key 5']
TOGGLE_A4 = ['key 6']
TOGGLE_A5 = ['key 7']
TOGGLE_A6 = ['key 8']
TOGGLE_A7 = ['key 9']
DELETE = ['key x']
REMOVE = ['key z']
CLOSE_ALL_GAPS = ['keydown Alt_L', 'str X', 'keyup Alt_L']
UNDO = ['keydown Control_L', 'key z', 'keyup Control_L']
REDO = ['keydown Control_L', 'key y', 'keyup Control_L']
|
import os
import dipy.reconst.dti as dti
import numpy as np
from dipy.core.gradients import gradient_table, generate_bvecs
from dipy.io.image import save_nifti
from dipy.reconst.csdeconv import ConstrainedSphericalDeconvModel
from dipy.reconst.csdeconv import auto_response
from dataset.mnist.database import MnistDatabase, KmnistDatabase
from dataset.mnist.dwi.diff_nd import successive_differences
from dataset.mnist.dwi.tube import tubify
from fwk.config import Config
from util.logging import get_logger, set_logger
class MnistProcessor:
def __init__(self):
mnist_dataset = Config.config['DATABASE']['mnist_dataset']
dataset_base_path = Config.config['DATABASE']['dataset_base_path']
local_processing_directory = os.path.join(dataset_base_path, mnist_dataset, 'processing')
self.processing_folder = os.path.expanduser(local_processing_directory)
if not os.path.isdir(self.processing_folder):
os.makedirs(self.processing_folder, exist_ok=True)
log_furl = os.path.join(self.processing_folder, 'log', 'preprocessing.log')
if not os.path.isdir(log_furl):
os.makedirs(os.path.join(self.processing_folder, 'log'), exist_ok=True)
set_logger('MnistProcessor', Config.config['LOGGING']['processing_level'], log_furl)
self.logger = get_logger('MnistProcessor')
self.database_class_names = {
'mnist': MnistDatabase,
'kmnist': KmnistDatabase
}
self.database = self.database_class_names[mnist_dataset]()
self.depth = int(Config.config['DATABASE']['image_depth'])
def _path_for_idx(self, image_idx, regime):
return os.path.join(self.processing_folder, regime, str(image_idx))
def process_labels(self, image_idx, regime):
self.logger.info('--- processing image {}'.format(image_idx))
image_2d, label = self.database.get_image(image_idx, regime=regime)
image = MnistDiffusionImageModel(image_2d, depth=self.depth)
self.logger.info(f'saving image')
image.save_label(self._path_for_idx(image_idx, regime), label)
def process_image(self, image_idx, regime):
self.logger.info('--- processing image {}'.format(image_idx))
self.logger.info(f'generating image')
image_2d, label = self.database.get_image(image_idx, regime=regime)
image = MnistDiffusionImageModel(image_2d, depth=self.depth)
self.logger.info(f'saving image')
image.save_image(self._path_for_idx(image_idx, regime))
image.save_label(self._path_for_idx(image_idx, regime), label)
self.logger.info(f'generating bvals and bvecs')
image.set_b0_image(same=True, weight=3000)
image.generate_bvals_bvecs(n_bvals=200)
self.logger.info(f'saving bvals and bvecs')
image.save_bvals_bvecs(self._path_for_idx(image_idx, regime))
self.logger.info(f'generating volumes')
image.generate_diffusion_volumes()
self.logger.info(f'saving volumes')
image.save_diffusion_volumes(self._path_for_idx(image_idx, regime))
self.logger.info(f'fitting dti')
image.fit_dti()
self.logger.info(f'saving dti')
image.save_dti(self._path_for_idx(image_idx, regime))
self.logger.info(f'fitting odf')
image.fit_odf()
self.logger.info(f'saving odf')
image.save_odf(self._path_for_idx(image_idx, regime))
class MnistDiffusionImageModel:
def __init__(self, image2d, depth=8, radius=10, bval0=100, intensity_threshold=1.e-3):
self.image = tubify(image2d, depth)
mask = np.zeros_like(self.image)
mask[self.image > intensity_threshold] = 1
self.mask = mask.astype(np.int)
self.affine = np.eye(4)
self.intensity_threshold = intensity_threshold
self.b0_image = None
self.bvals = None
self.bvecs = None
self.volumes = None
self.dti = None
self.v1 = None
self.evals = None
self.odf = None
self.radius = radius
self.bval0 = bval0
def _diffs_at_direction_tensor(self, bval, bvec, radius=3):
diffs = successive_differences(self.image, bvec, radius, normalize=True) + \
successive_differences(self.image, -bvec, radius, normalize=True)
return diffs
@staticmethod
def adc_fun(x):
return 1 / (1 + np.exp(-x))
def generate_diffusion_volumes(self):
diffusion_volumes = []
for i, bvec in enumerate(self.bvecs.tolist()):
bval = self.bvals[i]
volume = self._diffs_at_direction_tensor(bval, np.array(bvec), self.radius)
lam_min = 2.e-4
lam_max = 5.e-3
adc = lam_min + volume * (lam_max - lam_min) / 1
diffusion_volume = self.b0_image * np.exp(-bval * adc)
diffusion_volumes.append(diffusion_volume)
self.volumes = np.array(np.transpose(diffusion_volumes, (1, 2, 3, 0)))
def set_b0_image(self, same=False, weight=1):
if same is True:
b0_image = self.image
else:
b0_image = np.ones_like(self.image)
self.b0_image = weight * b0_image
def reweight_image(self, weight):
self.image = weight * self.image
def make_mask(self, threshold=1e-3):
mask = np.zeros_like(self.image)
mask[self.image > threshold] = 1
self.mask = mask.astype(np.int)
def generate_bvals_bvecs(self, n_bvals=64, n_b0=8):
bvals = self.bval0 * np.ones(n_bvals)
bvecs = generate_bvecs(n_bvals, 1)
if self.b0_image is not None:
bvecs = np.concatenate((np.zeros((n_b0, 3)), bvecs), axis=0)
bvals = np.concatenate((np.zeros(n_b0), bvals))
self.bvals = bvals
self.bvecs = bvecs
def save_bvals_bvecs(self, rel_path):
path = os.path.expanduser(rel_path)
fmt_bvals = '%d'
delimiter = ' '
url_bvals = os.path.join(path, 'bvals')
np.savetxt(url_bvals, np.expand_dims(self.bvals, axis=0), fmt=fmt_bvals, delimiter=delimiter)
fmt_bvecs = '%2.6f'
delimiter = ' '
url_bvecs = os.path.join(path, 'bvecs')
np.savetxt(url_bvecs, self.bvecs.T, fmt=fmt_bvecs, delimiter=delimiter)
def save_image(self, relative_path):
name = 'image.nii.gz'
path = os.path.expanduser(relative_path)
url = os.path.join(path, name)
if not os.path.isdir(path):
os.makedirs(path)
save_nifti(url, self.image, self.affine)
name = 'mask.nii.gz'
path = os.path.expanduser(relative_path)
url = os.path.join(path, name)
if not os.path.isdir(path):
os.makedirs(path)
self.make_mask()
save_nifti(url, self.mask, self.affine)
def save_label(self, relative_path, label):
name = 'label.nii.gz'
path = os.path.expanduser(relative_path)
url = os.path.join(path, name)
if not os.path.isdir(path):
os.makedirs(path)
save_nifti(url, label, self.affine)
def save_diffusion_volumes(self, relative_path):
name = 'diffusion_volumes.nii.gz'
path = os.path.expanduser(relative_path)
url = os.path.join(path, name)
if not os.path.isdir(path):
os.makedirs(path)
save_nifti(url, self.volumes, self.affine)
def fit_dti(self):
gtab = gradient_table(self.bvals, self.bvecs)
tensor_model = dti.TensorModel(gtab, fit_method='OLS')
tensor_fit = tensor_model.fit(self.volumes, mask=self.mask)
dti_coeffs = dti.lower_triangular(tensor_fit.quadratic_form)
# reorder coefficients to FDT convention Dxx, Dxy, Dxz, Dyy, Dyz, Dzz
# https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FDT/UserGuide
self.dti = dti_coeffs[:, :, :, (0, 1, 3, 2, 4, 5)]
self.v1 = tensor_fit.evecs[:, :, :, :, 0]
self.evals = tensor_fit.evals
def save_dti(self, relative_path):
name = 'dti.nii.gz'
path = os.path.expanduser(relative_path)
url = os.path.join(path, name)
if not os.path.isdir(path):
os.makedirs(path)
save_nifti(url, self.dti, self.affine)
name = 'v1.nii.gz'
path = os.path.expanduser(relative_path)
url = os.path.join(path, name)
save_nifti(url, self.v1, self.affine)
def fit_odf(self):
gtab = gradient_table(self.bvals, self.bvecs)
response, ratio = auto_response(gtab, self.volumes, roi_radius=10, fa_thr=0.7)
csd_model = ConstrainedSphericalDeconvModel(gtab, response)
csd_fit = csd_model.fit(self.volumes)
self.odf = csd_fit.shm_coeff
def save_odf(self, relative_path):
name = 'odf.nii.gz'
path = os.path.expanduser(relative_path)
url = os.path.join(path, name)
if not os.path.isdir(path):
os.makedirs(path)
save_nifti(url, self.odf, self.affine)
|
#-*- encoding: utf-8
import mmap
import os, os.path
import time, math
import sys
class Superbloque:
# Se leen los datos del superbloque
def __init__(self):
with open('fiunamfs.img', 'r+b') as f:
fs_mmap = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
self.fs_nombre = fs_mmap[0:8].decode('ascii') # FiUnamFS
self.version = fs_mmap[10:13].decode('ascii') # 0.7
self.volumen_tag = fs_mmap[20:35].decode('ascii') # Mi Sistema
self.cluster_size = int(fs_mmap[40:45].decode('ascii')) # 2048
self.num_cluster_dir = int(fs_mmap[47:49].decode('ascii')) # 4
self.num_cluster_total = int(fs_mmap[52:60].decode('ascii')) # 720
fs_mmap.close()
# Validamos que sea el sistema de archivos correcto
if self.fs_nombre != 'FiUnamFS':
print('[-] Error: Sistema de Archivos Incorreto')
exit(-1)
# Clase para las ENTradas del DIRectorio
class ENT_DIR:
entrada_sin_usar = 'Xx.xXx.xXx.xXx.'
entrada_size = 64
def __init__(self, entrada):
self.nombre_archivo = entrada[0:15].decode('ascii').strip()
self.archivo_size = entrada[16:24].decode('ascii')
self.cluster_inicial = entrada[25:30].decode('ascii')
self.creacion_archivo = entrada[31:45].decode('ascii')
self.modificacion_archivo = entrada[46:60].decode('ascii')
self.num_entrada = -1
class FSUnamFI:
# Elementos que se estar��n utilizando en la mayor��a de las funciones
f = open('fiunamfs.img','a+b')
fs_mmap = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_WRITE)
sb = Superbloque()
entrada_size = 64
def obtener_entradas(self):
entradas = []
# Se recorrera el directorio de entradas, cada entrada mide 64 bytes
# El tama�0�9o del directorio es: 2048 * 4 = 8192
# La cantidad de entradas ser��n 8192/64 = 128
for num_entrada in range(128):
p_entrada = self.sb.cluster_size + num_entrada * ENT_DIR.entrada_size
entrada = ENT_DIR(self.fs_mmap[p_entrada:p_entrada + ENT_DIR.entrada_size])
if entrada.nombre_archivo != ENT_DIR.entrada_sin_usar:
entrada.num_entrada = num_entrada
entradas.append(entrada)
return entradas
def listar(self):
entradas = self.obtener_entradas()
print('{:15} {:10} {:20} {:20} {:10}'.format("Nombre", "Tamaño", "Creación", "Modificación", "Clúster"))
for entrada in entradas:
print('{:15} {:10} {:20} {:20} {:10}'.format(entrada.nombre_archivo, entrada.archivo_size, self.convertir_fecha(entrada.creacion_archivo), self.convertir_fecha(entrada.modificacion_archivo), entrada.cluster_inicial))
# Para imprimir la fecha de una manera m��s adecuada al usuario
def convertir_fecha(self, fecha):
anio = fecha[:4]
mes = fecha[4:6]
dia = fecha[6:8]
hora = fecha[8:10]
min = fecha[10:12]
seg = fecha[12:14]
return dia + '/' + mes + '/' + anio + ' ' + hora + ':' + min + ':' + seg
# La funci��n buscar ser�� fundamental, ya que sabremos si existe una entrada y su ubicaci��n
def buscar_entrada(self, nombre_buscar):
for num_entrada in range(128):
p_entrada = self.sb.cluster_size + num_entrada * ENT_DIR.entrada_size
entrada = ENT_DIR(self.fs_mmap[p_entrada:p_entrada + ENT_DIR.entrada_size])
if nombre_buscar == entrada.nombre_archivo:
entrada.num_entrada = num_entrada
return entrada
return None
def copiar_a_pc(self, archivo, ruta):
entrada = self.buscar_entrada(archivo)
if entrada != None and os.path.exists(ruta):
cluster = int(entrada.cluster_inicial) * self.sb.cluster_size
with open(ruta + '/' + archivo, 'w+b') as nuevo_archivo:
nuevo_archivo.write(self.fs_mmap[cluster: cluster+int(entrada.archivo_size)])
print('[+] El archivo se copió correctamente')
else:
print('[-] Archivo o ruta no encontrado')
def copiar_a_fs(self, archivo):
if os.path.isfile(archivo):
if self.buscar_entrada(archivo) != None:
print('[-] El archivo ya existe, cambie el nombre o borre el archivo')
else:
self.crear_entrada(archivo)
else:
print('[-] No se ha encontrado el archivo')
# Cuando se copia una archivo externo debemos generar los metadatos
def crear_entrada(self, archivo):
nombre = archivo[:]
with open(archivo) as f:
f_mmap = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_COPY)
size = len(f_mmap)
contenido = f_mmap.read()
fecha_creacion = time.strftime("%Y%m%d%H%M%S")
fecha_modificacion = time.strftime("%Y%m%d%H%M%S")
entradas = self.obtener_entradas()
if entradas:
p_entrada_nueva = self.sb.cluster_size + (entradas[-1].num_entrada + 1) * 64
else:
p_entrada_nueva = self.sb.cluster_size
cluster_inicial = self.calcular_cluster(entradas)
entrada_nueva = ' '.encode('ascii') * (15 - len(nombre))
entrada_nueva += nombre.encode('ascii')
entrada_nueva += bytes(1) + ('0' * (8-len(str(size))) + str(size)).encode('ascii')
entrada_nueva += bytes(1) + ('0' * (5-len(str(cluster_inicial))) + str(cluster_inicial)).encode('ascii')
entrada_nueva += bytes(1) + fecha_creacion.encode('ascii')
entrada_nueva += bytes(1) + fecha_modificacion.encode('ascii')
entrada_nueva += bytes(4)
self.fs_mmap[p_entrada_nueva:p_entrada_nueva + ENT_DIR.entrada_size] = entrada_nueva
if(self.cargar_contenido(contenido, cluster_inicial)):
print('[+] Se ha guardado el archivo correctamente')
else:
print('[-] Error al agregar contenido, espacio no suficiente')
self.eliminar_archivo(archivo)
def cargar_contenido(self, contenido, cluster_inicial):
clusters = math.ceil(len(contenido)/self.sb.cluster_size)
if (cluster_inicial + clusters) < self.sb.num_cluster_total:
clusters *= self.sb.cluster_size
cluster_inicial *= self.sb.cluster_size
self.fs_mmap[cluster_inicial:cluster_inicial + clusters] = contenido + ('0' * (clusters - len(contenido))).encode('ascii')
return True
else:
return False
def calcular_cluster(self, entradas):
if entradas:
cluster_inicial = int(entradas[-1].cluster_inicial)
archivo_size = int(entradas[-1].archivo_size)
else:
cluster_inicial = 5
archivo_size = 0
return math.ceil((cluster_inicial * self.sb.cluster_size + archivo_size) / self.sb.cluster_size)
def eliminar_archivo(self, archivo):
entrada = self.buscar_entrada(archivo)
if entrada != None:
p_entrada = self.sb.cluster_size + ENT_DIR.entrada_size * entrada.num_entrada
self.fs_mmap[p_entrada:p_entrada+15] = bytes(ENT_DIR.entrada_sin_usar.encode('ascii'))
print('[+] El archivo se ha eliminado correctamente')
else:
print('[-] El archivo no existe, vuelva a intentarlo')
def desfragmentar(self):
entradas = self.obtener_entradas()
tam_cluster = self.sb.cluster_size
clusters = {}
#El primer clúster a partir del cual podemos definir información ya que del 0 - 4 tenemos el superbloque y el directorio
clus_init = 5
#Se genera un diccionario de la forma clusters{ cluster_inicial : [nom_arch , tam_arch]}
for entrada in entradas:
tmp = [int(entrada.archivo_size.strip('0')), entrada.num_entrada]
clusters[int(entrada.cluster_inicial.strip('0'))] = tmp
print(clusters)
#Por cada entrada del diccionario se verifica si se puede mover un elemento
for cluster in range(len(clusters)):
clus_min = min(clusters)
if(clus_min > clus_init):
inicio = tam_cluster * clus_min
#N��mero de clusters que ocupa el archivo
fin = math.ceil(clusters.get(clus_min)[0]/tam_cluster)
#Recupera el archivo
archivo = self.fs_mmap[inicio : inicio + (fin*tam_cluster)]
#Lo cambia de lugar
self.fs_mmap[clus_init * tam_cluster : (clus_init * tam_cluster) + (fin * tam_cluster)] = archivo
#Se actualiza el diccionario
tmp = clusters.get(clus_min)
del clusters [clus_min]
#Se deben actualizar los metadatos
i_metadatos = 2048 + (tmp[1] * 64)
self.fs_mmap[i_metadatos + 25 : i_metadatos + 30] = str(clus_init).zfill(5).encode('ascii')
#Se actualiza el valor del cluster inicial
num_clusters_arch = math.ceil(tmp[0] / tam_cluster)
clus_init += num_clusters_arch
def main():
fs = FSUnamFI()
ins_no_param = {'ls' : fs.listar, 'dfg' : fs.desfragmentar}
ins_param = {'cp_out' : fs.copiar_a_pc, 'cp_in' : fs.copiar_a_fs, 'rm' : fs.eliminar_archivo}
num_oper = len(sys.argv)
if(num_oper == 2):
instruccion = sys.argv[1]
if instruccion in ins_no_param:
ins_no_param[instruccion]()
else:
print ("Instruccion no válida, intentalo de nuevo :(")
elif(num_oper == 3):
instruccion = sys.argv[1]
if instruccion in ins_param:
if((instruccion == "rm" or instruccion == "cp_in") and len(sys.argv) == 3):
ins_param[instruccion](sys.argv[2])
else:
print("Los parámetros no coinciden :(")
else:
print ("Instruccion no válida, intentalo de nuevo :(")
elif(num_oper == 4):
instruccion = sys.argv[1]
if instruccion in ins_param and instruccion == "cp_out":
fs.copiar_a_pc(sys.argv[2], sys.argv[3])
else:
print ("Instrucción no válida, intentalo de nuevo :(")
else:
print("Faltan o sobran argumentos D: Consulta la documentación para introducir los datos correctamente (:")
if __name__ == '__main__':
main()
|
<reponame>yunus-ceyhan/kivyx<filename>kivyx/bottomsheet.py
"""
<MainApp>:
XButton:
text: "open bottom sheet"
pos_hint: {"center_x": .5, "center_y": .5}
on_release: bottom_sheet.open_menu()
XBottomSheet:
id: bottom_sheet
XBottomSheetContent:
orientation: "vertical"
XIconListItem:
icon: "android"
text: "android"
XIconListItem:
icon: "android"
text: "android"
XIconListItem:
icon: "android"
text: "android"
XIconListItem:
icon: "android"
text: "android"
"""
from kivy.lang import Builder
from kivyx.theming import Theming
from kivy.properties import ColorProperty, ListProperty, DictProperty, NumericProperty, BooleanProperty, StringProperty
from kivyx.floatlayout import XFloatLayout
from kivy.animation import Animation
from kivy.metrics import dp
from kivy.uix.behaviors import ButtonBehavior
from kivy.uix.boxlayout import BoxLayout
Builder.load_string("""
#:import ScrollEffect kivy.effects.scroll.ScrollEffect
<XBottomSheet>:
id: bs
size_hint: 1,1
pos_hint: root.main_pos
on_press: root.close()
BoxLayout:
orientation: "vertical"
pos_hint: bs.pos_hint
XCard:
id: scr
size_hint_y: None
height: root.scroll_height
radius: root.radius
bg_color: root.back_color
ScrollView:
id: sc
bar_width: 0
effect_cls: ScrollEffect
BoxLayout:
id: bx
orientation: "vertical"
size_hint_y: None
height: max(self.minimum_height,scr.height)
<XBottomSheetContent>:
size_hint_y: None
height: self.minimum_height
pos_hint: {'center_x': .5, 'center_y': .5}
""")
class XBottomSheetContent(BoxLayout):
pass
class XBottomSheet(Theming,ButtonBehavior,XFloatLayout):
scroll_height = NumericProperty(0)
main_pos = DictProperty({"center_x": .5, "center_y": -2})
expandable = BooleanProperty(False)
radius = ListProperty([dp(10),dp(10),0,0])
status = StringProperty('closed')
back_color = ColorProperty()
def __init__(self, **kwargs):
super(XBottomSheet, self).__init__(**kwargs)
self.back_color = self.card_color
def add_widget(self,widget,*args):
if isinstance(widget, XBottomSheetContent):
self.ids.bx.add_widget(widget)
else:
super(XBottomSheet, self).add_widget(widget)
def open(self,*args):
if self.scroll_height < 1:
self.main_pos = {"center_x": .5, "center_y": .5}
box_height = self.ids.bx.height + dp(10) if self.expandable else min(dp(112),self.ids.bx.height)
anim = Animation(scroll_height = box_height,bg_color = [0,0,0,.3], duration = 0.1)
anim.start(self)
self.status = 'opened'
def close(self,*args):
try:
self.main_pos = {"center_x": .5, "center_y": -2}
anim = Animation(scroll_height = 0,bg_color = [0,0,0,0] , duration = 0.2)
anim.start(self)
#anim.bind(on_complete = self.set_pos)
self.status = 'closed'
except:
pass
|
<reponame>remigermain/chernobyl-disaster.org-backend
from django.test import tag
from lib.test import BaseTest
from django.urls import reverse
from django.core import mail
from django.conf import settings
from django.contrib.auth import get_user_model
import json
@tag('auth', 'authentication')
class AuthTest(BaseTest):
def setUp(self):
super().setUp()
self.set_email_none()
self.username = 'username'
self.email = '<EMAIL>'
self.password = '<PASSWORD>'
def set_email_mandatory(self):
settings.ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
def set_email_none(self):
settings.ACCOUNT_EMAIL_VERIFICATION = 'none'
def test_register_valid(self):
data = {
'username': 'username2',
'email': '<EMAIL>',
'password1': <PASSWORD>,
'password2': <PASSWORD>,
}
response = self.factory.post(reverse("rest_register"), data=data)
self.assertEqual(response.status_code, 201)
return data
@tag('email', 'register')
def test_register_valid_with_email(self):
self.set_email_mandatory()
data = {
'username': 'username2',
'email': '<EMAIL>',
'password1': <PASSWORD>,
'password2': <PASSWORD>,
}
response = self.factory.post(reverse("rest_register"), data=data)
self.assertEqual(response.status_code, 201)
self.assertEqual(len(mail.outbox), 1)
def test_register_same_username(self):
self.test_register_valid()
data = {
'username': 'username2',
'email': '<EMAIL>',
'password1': <PASSWORD>,
'password2': <PASSWORD>,
}
response = self.factory.post(reverse("rest_register"), data=data)
self.assertEqual(response.status_code, 400)
def test_register_same_email(self):
self.test_register_valid()
data = {
'username': 'username25',
'email': '<EMAIL>',
'password1': <PASSWORD>,
'password2': <PASSWORD>,
}
response = self.factory.post(reverse("rest_register"), data=data)
self.assertEqual(response.status_code, 400)
def test_register_empty(self):
data = {}
response = self.factory.post(reverse("rest_register"), data=data)
self.assertEqual(response.status_code, 400)
def test_register_witout_username(self):
data = {
'email': '<EMAIL>',
'password1': <PASSWORD>,
'password2': <PASSWORD>,
}
response = self.factory.post(reverse("rest_register"), data=data)
self.assertEqual(response.status_code, 400)
def test_register_witout_email(self):
data = {
'username': 'username2',
'password1': <PASSWORD>,
'password2': <PASSWORD>,
}
response = self.factory.post(reverse("rest_register"), data=data)
self.assertEqual(response.status_code, 400)
def test_register_witout_password(self):
data = {
'username': 'username2',
'email': '<EMAIL>',
'password2': <PASSWORD>,
}
response = self.factory.post(reverse("rest_register"), data=data)
self.assertEqual(response.status_code, 400)
def test_register_witout_password2(self):
data = {
'username': 'username2',
'email': '<EMAIL>',
'password1': <PASSWORD>,
}
response = self.factory.post(reverse("rest_register"), data=data)
self.assertEqual(response.status_code, 400)
def test_register_no_same_password(self):
data = {
'username': 'username2',
'email': '<EMAIL>',
'password1': '<PASSWORD>[[',
'password2': '5f5fwefFWFE[[',
}
response = self.factory.post(reverse("rest_register"), data=data)
self.assertEqual(response.status_code, 400)
def test_login(self):
self.test_register_valid()
data = {
'username': 'username2',
'password': <PASSWORD>,
}
response = self.factory.post(reverse("rest_login"), data=data)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertIsNotNone(content['key'])
def test_login_with_email_verification(self):
self.test_register_valid_with_email()
self.set_email_mandatory()
data = {
'username': 'username2',
'password': <PASSWORD>,
}
response = self.factory.post(reverse("rest_login"), data=data)
self.assertEqual(response.status_code, 400)
def test_login_wrong(self):
data = {
'username': 'usernam',
'password': <PASSWORD>,
}
response = self.factory.post(reverse("rest_login"), data=data)
self.assertEqual(response.status_code, 400)
def test_login_wrong2(self):
data = {
'username': 'username2',
'password': <PASSWORD>,
}
response = self.factory.post(reverse("rest_login"), data=data)
self.assertEqual(response.status_code, 400)
def test_login_wrong3(self):
data = {
'username': 'username2',
}
response = self.factory.post(reverse("rest_login"), data=data)
self.assertEqual(response.status_code, 400)
def test_login_wrong4(self):
data = {
'password': <PASSWORD>,
}
response = self.factory.post(reverse("rest_login"), data=data)
self.assertEqual(response.status_code, 400)
def test_reset_password(self):
data = {
'email': '<EMAIL>'
}
response = self.factory.post(reverse("rest_password_reset"), data=data)
self.assertEqual(response.status_code, 200)
def test_reset_password_with_email_dont_exits(self):
self.set_email_mandatory()
data = {
'email': '<EMAIL>'
}
response = self.factory.post(reverse("rest_password_reset"), data=data)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(mail.outbox), 0)
@tag('email', 'password')
def test_reset_password_with_email_exits(self):
dict_instance = self.test_register_valid()
self.set_email_mandatory()
data = {
'email': dict_instance['email']
}
response = self.factory.post(reverse("rest_password_reset"), data=data)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(mail.outbox), 1)
def test_reset_password_wrong_email(self):
data = {
'email': '<EMAIL>'
}
response = self.factory.post(reverse("rest_password_reset"), data=data)
self.assertEqual(response.status_code, 200)
def test_reset_password_empty(self):
data = {}
response = self.factory.post(reverse("rest_password_change"), data=data)
self.assertEqual(response.status_code, 400)
def test_reset_password_old_password(self):
data = {
"old_password": <PASSWORD>,
"new_password1": "<PASSWORD>[[",
"new_password2": "<PASSWORD>[["
}
response = self.factory.post(reverse("rest_password_change"), data=data)
self.assertEqual(response.status_code, 200)
def test_reset_password_wrong_old_password(self):
data = {
"old_password": self.password + "dd",
"new_password1": "<PASSWORD>[[",
"new_password2": "<PASSWORD>[["
}
response = self.factory.post(reverse("rest_password_change"), data=data)
self.assertEqual(response.status_code, 400)
def test_reset_password_wrong_no_old_password(self):
data = {
"new_password1": "<PASSWORD>[[",
"new_password2": "<PASSWORD>[["
}
response = self.factory.post(reverse("rest_password_change"), data=data)
self.assertEqual(response.status_code, 400)
def test_reset_password_wrong_no_same_password(self):
data = {
"old_password": <PASSWORD>,
"new_password1": "<PASSWORD>[",
"new_password2": "<PASSWORD>[["
}
response = self.factory.post(reverse("rest_password_change"), data=data)
self.assertEqual(response.status_code, 400)
def test_get_user(self):
response = self.factory.get(reverse("rest_user_details"))
self.assertEqual(response.status_code, 200)
content = json.loads(response.content)
self.assertDictEqual(
content['scope'],
{
'staff': self.user.is_staff,
'admin': self.user.is_superuser,
}
)
def test_settings_change(self):
data = {
'show_helpers': True,
'show_admin': False
}
response = self.factory.patch(reverse("rest_user_details"), data)
self.assertEqual(response.status_code, 200)
self.user.refresh_from_db()
self.assertTrue(self.user.show_helpers)
self.assertFalse(self.user.show_admin)
def test_settings_change2(self):
data = {
'show_helpers': False,
'show_admin': True
}
response = self.factory.patch(reverse("rest_user_details"), data)
self.assertEqual(response.status_code, 200)
self.user.refresh_from_db()
self.assertFalse(self.user.show_helpers)
self.assertTrue(self.user.show_admin)
@tag('auth', 'authentication')
class AuthTestDelete(BaseTest):
def test_register_valid(self):
data = {
'username': 'username2',
'email': '<EMAIL>',
'password1': <PASSWORD>,
'password2': <PASSWORD>,
}
response = self.factory.post(reverse("rest_register"), data=data)
self.assertEqual(response.status_code, 201)
return data
def test_account_delete(self):
self.test_register_valid()
user = self.user
response = self.factory.post(reverse("account_delete"))
self.assertEqual(response.status_code, 200)
self.assertFalse(get_user_model().objects.filter(id=user.id).exists())
|
from app.main.service.user_team_service import check_user_in_team, check_user_is_owner_or_editor, check_user_is_owner
from flask import request
from flask_restplus import Resource, reqparse
from ..util.decorator import token_required, admin_token_required
from ..util.dto import PortfolioDto
from ..service.portfolio_service import save_new_portfolio, get_all_portfolios, get_a_portfolio, update_portfolio, delete_a_portfolio
from ..service.team_portfolio_service import save_new_team_portfolio
from ..service.team_service import get_all_teams, get_a_team, get_personal_team_id, get_teams_from_portfolio, get_personal_team_id
from ..service.auth_helper import Auth
import datetime
api = PortfolioDto.api
_portfolio = PortfolioDto.portfolio
@api.route('/')
class PortfolioList(Resource):
@api.doc('list_of_portfolios for a portfolio_model')
@api.param('property_id', 'property id to search portfolios for')
@api.param('team_id', 'team_id to search portfolios for')
@api.param('is_deleted', 'if the portfolio is deleted or not')
@api.param('is_active', 'if the portfolio is active or not')
@api.marshal_list_with(_portfolio, envelope='data')
@token_required
def get(self):
"""List all portfolios"""
parser = reqparse.RequestParser()
parser.add_argument("property_id", type=int)
parser.add_argument("team_id", type=int)
parser.add_argument("is_deleted", type=bool)
parser.add_argument("is_active", type=bool)
args = parser.parse_args()
logined, status = Auth.get_logged_in_user(request)
token=logined.get('data')
if not token:
return logined, status
if token["admin"]==False:
args["team_id"]=get_personal_team_id(token["user_id"]).id
#non admins will see the portfolios they have created if they search for portfolios without specifying team id
return get_all_portfolios(args['property_id'], args['team_id'], args["is_deleted"], args["is_active"])
@api.response(201, 'portfolio successfully created.')
@api.doc('create a new portfolio')
@api.expect(_portfolio, validate=True)
@token_required
def post(self):
"""Creates a new portfolio """
data = request.json
logined, status = Auth.get_logged_in_user(request)
token=logined.get('data')
if not token:
return logined, status
login_user={"login_user_id": token['user_id']}
action_time={"action_time": datetime.datetime.utcnow()}
data.update(login_user)
data.update(action_time)
personal_team=get_personal_team_id(token['user_id'])
personal_team_id={"personal_team_id": personal_team.id}
data.update(personal_team_id)
return save_new_portfolio(data=data)
#This also creates the entry in TeamPortfolio as to which team member is the owner of the portfolio.
@api.route('/<portfolio_id>')
@api.param('portfolio_id', 'The portfolio identifier')
@api.response(404, 'portfolio not found.')
class Portfolio(Resource):
@api.doc('get a portfolio')
@api.marshal_with(_portfolio)
@token_required
def get(self, portfolio_id):
"""get a portfolio given its identifier"""
p_id=int(portfolio_id)
portfolio = get_a_portfolio(p_id)
logined, status = Auth.get_logged_in_user(request)
token=logined.get('data')
if not token:
return logined, status
usr_id=int(token['user_id'])
allowed_teams=get_teams_from_portfolio(p_id)
#Checking is User can VIEW the portfolio
flag=False
for team in allowed_teams:
team_id=int(team.id)
if check_user_in_team(usr_id, team_id)==True:
flag=True
if flag==False and token['admin']==False:
response_object = {
'status': 'fail',
'message': 'You cannot search for this information.'
}
return response_object, 401
if not portfolio:
api.abort(404)
else:
return portfolio
@api.response(201, 'portfolio successfully created.')
@api.doc('update a portfolio')
@api.expect(_portfolio, validate=True)
@token_required
def put(self, portfolio_id):
"""Update a portfolio"""
data = request.json
logined, status = Auth.get_logged_in_user(request)
token=logined.get('data')
if not token:
return logined, status
login_user={"login_user_id": token['user_id']}
action_time={"action_time": datetime.datetime.utcnow()}
allowed_teams=get_teams_from_portfolio(portfolio_id)
flag=False
for team in allowed_teams:
if team.role=="Owner" or team.role=="Editor":
if check_user_is_owner_or_editor(token['user_id'],team.team_id)==True:
flag=True
if flag==False and token['admin']==False:
response_object = {
'status': 'fail',
'message': 'You cannot add this information.'
}
return response_object, 401
data.update(login_user)
data.update(action_time)
return update_portfolio(portfolio_id, data)
@api.response(201, 'portfolio successfully deleted.')
@api.doc('delete a portfolio')
@token_required
def delete(self, portfolio_id):
"""Delete a portfolio """
logined, status = Auth.get_logged_in_user(request)
token=logined.get('data')
if not token:
return logined, status
login_user={"login_user_id": token['user_id']}
action_time={"action_time": datetime.datetime.utcnow()}
allowed_teams=get_teams_from_portfolio(portfolio_id)
flag=False
for team in allowed_teams:
if team.role=="Owner" or team.role=="Editor":
if check_user_is_owner_or_editor(token['user_id'],team.team_id)==True:
flag=True
if flag==False and token['admin']==False:
response_object = {
'status': 'fail',
'message': 'You cannot delete this information.'
}
return response_object, 401
data=dict()
data.update(login_user)
data.update(action_time)
return delete_a_portfolio(portfolio_id, data) |
<filename>celer/tests/test_homotopy.py
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# License: BSD 3 clause
import numpy as np
import pytest
from itertools import product
from scipy import sparse
from sklearn.utils.estimator_checks import check_estimator
from sklearn.linear_model import (LassoCV as sklearn_LassoCV,
Lasso as sklearn_Lasso, lasso_path)
from celer import celer_path, celer
from celer.dropin_sklearn import Lasso, LassoCV
def build_dataset(n_samples=50, n_features=200, n_informative_features=10,
n_targets=1, sparse_X=False):
"""Build samples and observation for linear regression problem."""
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
if sparse_X:
X = sparse.random(n_samples, n_features, density=0.5, format='csc',
random_state=random_state)
X_test = sparse.random(n_samples, n_features, density=0.5,
format='csc', random_state=random_state)
else:
X = random_state.randn(n_samples, n_features)
X_test = random_state.randn(n_samples, n_features)
y = X.dot(w)
y_test = X_test.dot(w)
return X, y, X_test, y_test
@pytest.mark.parametrize("sparse_X", [False, True])
def test_celer_path(sparse_X):
"""Test Lasso path convergence."""
X, y, _, _ = build_dataset(n_samples=30, n_features=50, sparse_X=sparse_X)
n_samples = X.shape[0]
alpha_max = np.max(np.abs(X.T.dot(y))) / n_samples
n_alphas = 10
alphas = alpha_max * np.logspace(0, -2, n_alphas)
tol = 1e-6
alphas, coefs, gaps, thetas = celer_path(X, y, alphas=alphas, tol=tol,
return_thetas=True, verbose=False,
verbose_inner=False)
np.testing.assert_array_less(gaps, tol)
@pytest.mark.parametrize("sparse_X, prune", [(False, 0), (False, 1)])
def test_celer_path_vs_lasso_path(sparse_X, prune):
"""Test that celer_path matches sklearn lasso_path."""
X, y, _, _ = build_dataset(n_samples=30, n_features=50, sparse_X=sparse_X)
params = dict(eps=1e-2, n_alphas=10, tol=1e-14)
alphas1, coefs1, gaps1 = celer_path(
X, y, return_thetas=False, verbose=1, prune=prune, **params)
alphas2, coefs2, gaps2 = lasso_path(X, y, verbose=False, **params)
np.testing.assert_allclose(alphas1, alphas2)
np.testing.assert_allclose(coefs1, coefs2, rtol=1e-05, atol=1e-6)
@pytest.mark.parametrize("sparse_X, fit_intercept", product([False, True],
[False, True]))
def test_dropin_LassoCV(sparse_X, fit_intercept):
"""Test that our LassoCV behaves like sklearn's LassoCV."""
X, y, _, _ = build_dataset(n_samples=30, n_features=50, sparse_X=sparse_X)
params = dict(eps=1e-1, n_alphas=100, tol=1e-10, cv=2,
fit_intercept=fit_intercept)
clf = LassoCV(**params)
clf.fit(X, y)
clf2 = sklearn_LassoCV(**params)
clf2.fit(X, y)
np.testing.assert_allclose(clf.mse_path_, clf2.mse_path_,
rtol=1e-04)
np.testing.assert_allclose(clf.alpha_, clf2.alpha_,
rtol=1e-05)
np.testing.assert_allclose(clf.coef_, clf2.coef_,
rtol=1e-05)
check_estimator(LassoCV)
@pytest.mark.parametrize("sparse_X, fit_intercept", product([False, True],
[False, True]))
def test_dropin_lasso(sparse_X, fit_intercept):
"""Test that our Lasso class behaves as sklearn's Lasso."""
X, y, _, _ = build_dataset(n_samples=20, n_features=30, sparse_X=sparse_X)
alpha_max = np.linalg.norm(X.T.dot(y), ord=np.inf) / X.shape[0]
alpha = alpha_max / 2.
params = dict(alpha=alpha, fit_intercept=fit_intercept, tol=1e-10,
normalize=True)
clf = Lasso(**params)
clf.fit(X, y)
clf2 = sklearn_Lasso(**params)
clf2.fit(X, y)
np.testing.assert_allclose(clf.coef_, clf2.coef_, rtol=1e-5)
if fit_intercept:
np.testing.assert_allclose(clf.intercept_, clf2.intercept_)
check_estimator(Lasso)
@pytest.mark.parametrize("sparse_X", [True, False])
def test_celer_single_alpha(sparse_X):
X, y, _, _ = build_dataset(n_samples=20, n_features=100, sparse_X=sparse_X)
alpha_max = np.linalg.norm(X.T.dot(y), ord=np.inf) / X.shape[0]
tol = 1e-6
w, theta, gaps, times = celer(X, y, alpha_max / 10., tol=tol)
np.testing.assert_array_less(gaps[-1], tol)
np.testing.assert_equal(w.shape[0], X.shape[1])
np.testing.assert_equal(theta.shape[0], X.shape[0])
@pytest.mark.parametrize("sparse_X", [True, False])
def test_zero_column(sparse_X):
X, y, _, _ = build_dataset(n_samples=60, n_features=50, sparse_X=sparse_X)
n_zero_columns = 20
if sparse_X:
X.data[:X.indptr[n_zero_columns]].fill(0.)
else:
X[:, :n_zero_columns].fill(0.)
alpha_max = np.linalg.norm(X.T.dot(y), ord=np.inf) / X.shape[0]
tol = 1e-6
w, theta, gaps, times = celer(X, y, alpha_max / 10., tol=tol, p0=50,
prune=0, verbose=1, verbose_inner=1)
np.testing.assert_array_less(gaps[-1], tol)
np.testing.assert_equal(w.shape[0], X.shape[1])
np.testing.assert_equal(theta.shape[0], X.shape[0])
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
DP-Model test.
"""
import pytest
import numpy as np
from mindspore import nn
from mindspore import context
import mindspore.dataset as ds
from mindarmour.privacy.diff_privacy import DPModel
from mindarmour.privacy.diff_privacy import NoiseMechanismsFactory
from mindarmour.privacy.diff_privacy import ClipMechanismsFactory
from mindarmour.privacy.diff_privacy import DPOptimizerClassFactory
from tests.ut.python.utils.mock_net import Net
def dataset_generator():
"""mock training data."""
batch_size = 32
batches = 128
data = np.random.random((batches*batch_size, 1, 32, 32)).astype(
np.float32)
label = np.random.randint(0, 10, batches*batch_size).astype(np.int32)
for i in range(batches):
yield data[i*batch_size:(i + 1)*batch_size],\
label[i*batch_size:(i + 1)*batch_size]
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_dp_model_with_pynative_mode():
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
norm_bound = 1.0
initial_noise_multiplier = 0.01
network = Net()
epochs = 1
micro_batches = 2
loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True)
factory_opt = DPOptimizerClassFactory(micro_batches=micro_batches)
factory_opt.set_mechanisms('Gaussian',
norm_bound=norm_bound,
initial_noise_multiplier=initial_noise_multiplier)
net_opt = factory_opt.create('Momentum')(network.trainable_params(),
learning_rate=0.1, momentum=0.9)
clip_mech = ClipMechanismsFactory().create('Gaussian',
decay_policy='Linear',
learning_rate=0.01,
target_unclipped_quantile=0.9,
fraction_stddev=0.01)
model = DPModel(micro_batches=micro_batches,
norm_bound=norm_bound,
clip_mech=clip_mech,
noise_mech=None,
network=network,
loss_fn=loss,
optimizer=net_opt,
metrics=None)
ms_ds = ds.GeneratorDataset(dataset_generator,
['data', 'label'])
model.train(epochs, ms_ds, dataset_sink_mode=False)
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_dp_model_with_graph_mode():
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
norm_bound = 1.0
initial_noise_multiplier = 0.01
network = Net()
epochs = 1
loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True)
noise_mech = NoiseMechanismsFactory().create('Gaussian',
norm_bound=norm_bound,
initial_noise_multiplier=initial_noise_multiplier)
clip_mech = ClipMechanismsFactory().create('Gaussian',
decay_policy='Linear',
learning_rate=0.01,
target_unclipped_quantile=0.9,
fraction_stddev=0.01)
net_opt = nn.Momentum(network.trainable_params(), learning_rate=0.1,
momentum=0.9)
model = DPModel(micro_batches=2,
clip_mech=clip_mech,
norm_bound=norm_bound,
noise_mech=noise_mech,
network=network,
loss_fn=loss,
optimizer=net_opt,
metrics=None)
ms_ds = ds.GeneratorDataset(dataset_generator,
['data', 'label'])
model.train(epochs, ms_ds, dataset_sink_mode=False)
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_card
@pytest.mark.component_mindarmour
def test_dp_model_with_graph_mode_ada_gaussian():
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
norm_bound = 1.0
initial_noise_multiplier = 0.01
network = Net()
epochs = 1
alpha = 0.8
loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True)
noise_mech = NoiseMechanismsFactory().create('AdaGaussian',
norm_bound=norm_bound,
initial_noise_multiplier=initial_noise_multiplier,
noise_decay_rate=alpha,
decay_policy='Exp')
clip_mech = None
net_opt = nn.Momentum(network.trainable_params(), learning_rate=0.1,
momentum=0.9)
model = DPModel(micro_batches=2,
clip_mech=clip_mech,
norm_bound=norm_bound,
noise_mech=noise_mech,
network=network,
loss_fn=loss,
optimizer=net_opt,
metrics=None)
ms_ds = ds.GeneratorDataset(dataset_generator,
['data', 'label'])
model.train(epochs, ms_ds, dataset_sink_mode=False)
|
<gh_stars>0
import os
import random
import re
from pathlib import Path
import intervals as I
from pytorch_pretrained_bert import BertTokenizer
from collections import defaultdict
from config import Config
entity_label2abbr = {'自然人主体': 'NP',
'非自然人主体': 'NNP',
'机动车': 'MV',
'非机动车': 'NMV',
'责任认定': 'DUT',
'一般人身损害': 'GI',
'伤残': 'DIS',
'死亡': 'DEA',
'人身损害赔偿项目': 'PIC',
'财产损失赔偿项目': 'PLC',
'保险类别': 'INS',
'抗辩事由': 'DEF',
'违反道路交通信号灯': 'VTL',
'饮酒后驾驶': 'DAD',
'醉酒驾驶': 'DD',
'超速': 'SPE',
'违法变更车道': 'ICL',
'未取得驾驶资格': 'UD',
'超载': 'OVE',
'不避让行人': 'NAP',
'行人未走人行横道或过街设施': 'NCF',
'其他违法行为': 'OLA'
}
# filters = ['抗辩事由', '违反道路交通信号灯', '饮酒后驾驶', '醉酒驾驶', '超速', '违法变更车道', '未取得驾驶资格', '超载', '不避让行人', '行人未走人行横道或过街设施', '其他违法行为']
filters = ['抗辩事由', '其他违法行为']
class PrepareNer(object):
def __init__(self, max_len=Config.max_sequence_len):
self.tokenizer = BertTokenizer.from_pretrained(Config.bert_pretrained_dir, do_lower_case=True)
self.max_len = max_len
def get_annoteted_data(self, txt_file, ann_file, samples_statistics):
"""Get a piece of data from a annotated file"""
# Get annotated entity dict
entities = {}
with open(ann_file, 'r', encoding="utf-8") as reader_ann:
for line in reader_ann:
line = line.strip()
if line.startswith('T'):
splits = line.split('\t')
label_pos = splits[1]
ent_label = label_pos.split(' ')[0]
ent = splits[2]
if ';' in label_pos:
continue
samples_statistics[ent_label] += 1
# Cross line annotation, example: T49 其他违法行为 1320 1372;1374 1417
if ent not in entities and ent_label not in filters:
entities[ent] = ent_label
entity_items = []
for ent, ent_label in entities.items():
ent_tokens = self.tokenizer.tokenize(ent)
entity_items.append((ent_tokens, ent_label))
entity_items = sorted(entity_items, key=lambda x: len(x[0]), reverse=True)
# The length of the segmentation fragment for each long sentence, no more than 2
sents = self.get_fact(txt_file)
sents = [self.tokenizer.tokenize(sent.strip()) for sent in sents if len(sent.strip()) > 5]
tags = [['O'] * len(sent) for sent in sents]
for i in range(len(sents)):
assert len(sents[i]) == len(tags[i])
for i, sent_tokens in enumerate(sents):
intervals = []
for ent_tokens, ent_label in entity_items:
ent_tag = entity_label2abbr[ent_label]
positions = self.find_sub_list(sent_tokens, ent_tokens)
for pos in positions:
interval = I.closed(pos[0], pos[-1])
overlap = self.is_overlap(intervals, interval)
if not overlap:
tags[i][pos[0]] = 'B-' + ent_tag
if len(pos) > 1:
for p in pos[1:]:
tags[i][p] = 'I-' + ent_tag
intervals.append(interval)
return sents, tags
def get_fact(self, txt_file):
with open(txt_file, 'r', encoding="utf-8") as reader_txt:
txt_text = reader_txt.read()
facts = txt_text.split('\n') # fact list
new_facts = [] # The length of the segmentation fragment for each long sentence, no more than 2
for i, fact in enumerate(facts):
if len(fact) <= 5:
continue
clauses_period = self.split_text(fact, '。')
clauses_period = list(filter(lambda x: len(x) > 5, clauses_period))
for clause in clauses_period:
if len(clause) <= self.max_len:
new_facts.append(clause)
else:
clauses_semi = self.split_text(clause, ';;')
clauses_semi = list(filter(lambda x: len(x) > 5, clauses_semi))
len_semi = len(clauses_semi)
if len_semi == 1:
clauses_comma = self.split_text(clause, ',,')
clauses_comma = list(filter(lambda x: len(x) > 5, clauses_comma))
len_comma = len(clauses_comma)
if len_comma <= 2:
new_facts.extend(clauses_comma)
else: # len_comma >= 3
mid = len_comma // 2
if len_comma % 2 == 1: # The number of elements is odd
if clauses_comma[0] < clauses_comma[-1]:
mid = mid + 1
new_facts.extend(clauses_comma[:mid])
new_facts.extend(clauses_comma[mid:])
elif len_semi == 2:
new_facts.extend(clauses_semi)
else:
mid = len_semi // 2
if len_semi % 2 == 1:
if clauses_semi[0] < clauses_semi[-1]:
mid = mid + 1
new_facts.extend(clauses_semi[:mid])
new_facts.extend(clauses_semi[mid:])
return new_facts
def split_text(self, text, punc):
# (*x) - Retain the punctuation separator
splits = re.split('([{}])'.format(punc), text)
# Put the separator after the sentence if available
clauses = [''.join(split).strip() for split in zip(splits[0::2], splits[1::2])]
if not text.endswith(punc):
clauses.append(splits[-1].strip())
return clauses
def find_sub_list(self, all_list, sub_list):
match_indices = []
all_len, sub_len = len(all_list), len(sub_list)
starts = [i for i, ele in enumerate(all_list) if ele == sub_list[0]]
for start in starts:
end = start + sub_len
if end <= all_len and all_list[start: end] == sub_list:
match_indices.append(list(range(start, end)))
return match_indices
def is_overlap(self, intervals, interval):
flag = False
for i in intervals:
if interval.overlaps(i):
flag = True
break
return flag
def prepare_data():
"""Data processing and data partitioning"""
prapare_ner = PrepareNer()
# entity samples_statistics
samples_statistics = defaultdict(int)
dataset = []
for file_ann in Path(Config.annotation_data_dir).rglob("*.ann"):
file_txt = str(file_ann.with_suffix('.txt'))
sents, tags = prapare_ner.get_annoteted_data(file_txt, file_ann, samples_statistics)
dataset.append((sents, tags))
all_case_num = len(dataset)
train_count = int(all_case_num * 0.8)
valid_count = int(all_case_num * 0.1)
test_count = all_case_num - train_count - valid_count
order = list(range(all_case_num))
random.shuffle(order)
train_dataset = [dataset[idx] for idx in order[:train_count]]
valid_dataset = [dataset[idx] for idx in order[train_count:train_count + valid_count]]
test_dataset = [dataset[idx] for idx in order[train_count + valid_count:]]
train_samples_count = write_to_file(os.path.join(Config.ner_data_dir, 'train'), train_dataset)
valid_samples_count = write_to_file(os.path.join(Config.ner_data_dir, 'valid'), valid_dataset)
test_samples_count = write_to_file(os.path.join(Config.ner_data_dir, 'test'), test_dataset)
print('\nall cases num: {}'.format(all_case_num))
print("train cases: {}, samples: {}".format(train_count, train_samples_count))
print("valid cases: {}, samples: {}".format(valid_count, valid_samples_count))
print("test cases: {}, samples: {}".format(test_count, test_samples_count))
return dict(samples_statistics)
def write_to_file(data_dir, dataset):
os.makedirs(data_dir, exist_ok=True)
sentences_file = os.path.join(data_dir, 'sentences.txt')
tags_file = os.path.join(data_dir, 'tags.txt')
print("write to: {}, sentences.txt, tags.txt".format(data_dir))
samples_count = 0
with open(sentences_file, 'w') as writer_sent, open(tags_file, 'w') as writer_tag:
for sents, tags in dataset:
for sent, tag_seq in zip(sents, tags):
writer_sent.write(' '.join(sent) + '\n')
writer_tag.write(' '.join(tag_seq) + '\n')
samples_count += 1
return samples_count
def build_tags():
tags = set()
data_types = ['train', "valid", 'test']
for data_type in data_types:
data_path = os.path.join(Config.ner_data_dir, data_type, 'tags.txt')
with open(data_path, 'r') as reader:
for line in reader:
line = line.strip()
tags.update(list(line.strip().split(' ')))
tags_data_path = os.path.join(Config.ner_data_dir, 'tags.txt')
with open(tags_data_path, "w", encoding="utf-8") as f:
f.write("\n".join(tags))
return tags
def draw_histogram(samples_statistics):
"""Draw according to statistical results"""
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['Arial Unicode MS']
items = sorted(samples_statistics.items(), key=lambda x: x[1], reverse=True)
print('sorted samples_statistics: {}'.format(items))
labels = [item[0] for item in items]
num = [item[1] for item in items]
plt.bar(labels, num)
plt.xticks(rotation=300)
plt.xlabel('Label')
plt.ylabel('Number')
axes = plt.gca()
axes.yaxis.grid(linestyle='--')
for l, n in zip(labels, num):
plt.text(l, n + 0.05, '%.0f' % n, ha='center', va='bottom')
plt.show()
def create_ner_data():
samples_statistics = prepare_data()
print('\nsamples_statistics: {}'.format(samples_statistics))
# build tag set
build_tags()
|
"""support to generate SQLObject-based fixtures."""
from fixture.style import camel_to_under
from fixture import SQLObjectFixture
from fixture.command.generate import (
DataHandler, FixtureSet, register_handler, code_str,
UnsupportedHandler, MisconfiguredHandler, NoData )
try:
import sqlobject
except ImportError:
sqlobject = None
class SQLObjectHandler(DataHandler):
loadable_fxt_class = SQLObjectFixture
def __init__(self, *a,**kw):
DataHandler.__init__(self, *a,**kw)
from sqlobject import sqlhub, connectionForURI
if self.options.dsn:
self.connection = connectionForURI(self.options.dsn)
else:
raise MisconfiguredHandler(
"--dsn option is required by %s" % self.__class__)
if len(self.options.env):
raise NotImplementedError(
"sqlobject is not using --env; perhaps we just need to import "
"the envs so that findClass knows about its objects?")
def add_fixture_set(self, fset):
from sqlobject.classregistry import findClass
so_class = fset.obj_id()
kls = findClass(so_class)
# this maybe isn't very flexible ...
self.template.add_import("from %s import %s" % (
kls.__module__, so_class))
def find(self, idval):
self.rs = [self.obj.get(idval)]
def findall(self, query):
"""gets record set for query."""
self.rs = self.obj.select(query, connection=self.connection)
if not self.rs.count():
raise NoData("no data for query \"%s\" on object %s" % (
query, self.obj))
def fxt_type(self):
return 'SOFixture'
@staticmethod
def recognizes(object_path, obj=None):
"""returns True if obj is a SQLObject class.
"""
if not sqlobject:
raise UnsupportedHandler("sqlobject module not found")
if obj is None:
return False
from sqlobject.declarative import DeclarativeMeta
if type(obj) is DeclarativeMeta and obj.__name__ not in (
'SQLObject', 'sqlmeta', 'ManyToMany', 'OneToMany'):
return True
def sets(self):
"""yields FixtureSet for each row in SQLObject."""
for row in self.rs:
yield SQLObjectFixtureSet(row, self.obj, connection=self.connection)
register_handler(SQLObjectHandler)
class SQLObjectFixtureSet(FixtureSet):
"""a fixture set for a SQLObject row."""
def __init__(self, data, model, connection=None):
FixtureSet.__init__(self, data)
self.connection = connection
self.model = model
self.meta = model.sqlmeta
self.foreign_key_class = {}
self.primary_key = None
self.understand_columns()
# NOTE: primary keys are not included in columnList
# so we need to find it ...
cols = [self.meta.style.idForTable(self.meta.table)]
cols.extend([self.attr_to_db_col(c) for c in self.meta.columnList])
# even though self.meta.idName tells us the real col name, when
# accessing object properties sqlobject wants you to say object.id,
# for which it proxies the real id column name
vals = [getattr(self.data, 'id')]
vals.extend([self.get_col_value(c.name) for c in self.meta.columnList])
self.data_dict = dict(zip(cols, vals))
def attr_to_db_col(self, col):
if col.dbName is not None:
return col.dbName
else:
return self.meta.style.pythonAttrToDBColumn(col.name)
def get_col_value(self, colname):
"""transform column name into a value or a
new set if it's a foreign key (recursion).
"""
from sqlobject.classregistry import findClass
value = getattr(self.data, colname)
if value is None:
# this means that we are in a NULL foreign key
# which could be perfectly legal.
return None
if self.foreign_key_class.has_key(colname):
model = findClass(self.foreign_key_class[colname])
rs = model.get(value, connection=self.connection)
return SQLObjectFixtureSet(rs, model, connection=self.connection)
else:
return value
def get_id_attr(self):
meta = self.meta
id_attr = meta.style.idForTable(meta.table)
return id_attr
def mk_var_name(self):
"""returns a variable name for the instance of the fixture class.
"""
fxt_cls_name = self.obj_id()
return "_".join([camel_to_under(n) for n in fxt_cls_name.split('_')])
def set_id(self):
"""returns id of this set (the primary key value)."""
return getattr(self.data, 'id') # id is a magic property in sqlobject, see __init__
def understand_columns(self):
"""get an understanding of what columns are what, foreign keys, etc."""
from sqlobject.col import SOForeignKey
for name,col in self.meta.columns.items():
if isinstance(col, SOForeignKey):
self.foreign_key_class[col.name] = col.foreignKey
#### I don't know if this is necessary anymore...
# if sqlobject:
# # OUCH!
# # prepare for sqlobject monkey patch :( ...
# # this is so that foreign key lookups work right when
# # there are multiple schemas having the same table
# # (perfectly legal, but sqlobject was only finding the primary key
# # from the first schema)
# import re
# def columnsFromSchema(self, tableName, soClass):
#
# keyQuery = """
# SELECT pg_catalog.pg_get_constraintdef(oid) as condef
# FROM pg_catalog.pg_constraint r
# WHERE r.conrelid = %s::regclass AND r.contype = 'f'"""
#
# colQuery = """
# SELECT a.attname,
# pg_catalog.format_type(a.atttypid, a.atttypmod), a.attnotnull,
# (SELECT substring(d.adsrc for 128) FROM pg_catalog.pg_attrdef d
# WHERE d.adrelid=a.attrelid AND d.adnum = a.attnum)
# FROM pg_catalog.pg_attribute a
# WHERE a.attrelid =%s::regclass
# AND a.attnum > 0 AND NOT a.attisdropped
# ORDER BY a.attnum"""
#
# # kumar: add limit 1 to get primary key for
# # first rel in schema search path
# primaryKeyQuery = """
# SELECT pg_index.indisprimary,
# pg_catalog.pg_get_indexdef(pg_index.indexrelid)
# FROM pg_catalog.pg_class c, pg_catalog.pg_class c2,
# pg_catalog.pg_index AS pg_index
# WHERE c.relname = %s
# AND c.oid = pg_index.indrelid
# AND pg_index.indexrelid = c2.oid
# AND pg_index.indisprimary
# LIMIT 1
# """
#
# keyData = self.queryAll(keyQuery % self.sqlrepr(tableName))
# keyRE = re.compile(r"\((.+)\) REFERENCES (.+)\(")
# keymap = {}
#
# for (condef,) in keyData:
# match = keyRE.search(condef)
# if match:
# field, reftable = match.groups()
# keymap[field] = reftable.capitalize()
#
# primaryData = self.queryAll(primaryKeyQuery % self.sqlrepr(tableName))
# primaryRE = re.compile(r'CREATE .*? USING .* \((.+?)\)')
# primaryKey = None
# for isPrimary, indexDef in primaryData:
# match = primaryRE.search(indexDef)
# assert match, "Unparseable contraint definition: %r" % indexDef
# assert primaryKey is None, "Already found primary key (%r), then found: %r" % (primaryKey, indexDef)
# primaryKey = match.group(1)
# assert primaryKey, "No primary key found in table %r" % tableName
# if primaryKey.startswith('"'):
# assert primaryKey.endswith('"')
# primaryKey = primaryKey[1:-1]
#
# colData = self.queryAll(colQuery % self.sqlrepr(tableName))
# results = []
# if self.unicodeCols:
# client_encoding = self.queryOne("SHOW client_encoding")[0]
# for field, t, notnull, defaultstr in colData:
# if field == primaryKey:
# continue
# colClass, kw = self.guessClass(t)
# if self.unicodeCols and colClass == col.StringCol:
# colClass = col.UnicodeCol
# kw['dbEncoding'] = client_encoding
# kw['name'] = soClass.sqlmeta.style.dbColumnToPythonAttr(field)
# kw['dbName'] = field
# kw['notNone'] = notnull
# if defaultstr is not None:
# kw['default'] = self.defaultFromSchema(colClass, defaultstr)
# elif not notnull:
# kw['default'] = None
# if keymap.has_key(field):
# kw['foreignKey'] = keymap[field]
# results.append(colClass(**kw))
# return results
# from sqlobject.postgres import pgconnection
# from warnings import warn
# warn("monkey patching %s for multiple schema support" % (
# pgconnection.PostgresConnection.columnsFromSchema))
# pgconnection.PostgresConnection.columnsFromSchema = columnsFromSchema |
from itertools import product
from collections import OrderedDict
# comparable set
_set_ID = 0
class xset(set):
def __init__(self, iterable=None):
if iterable is not None:
self.data = OrderedDict({e: None for e in iterable})
else:
self.data = OrderedDict()
global _set_ID
self._id = _set_ID
_set_ID += 1
def add(self, item):
self.data[item] = None
def update(self, items):
self.data.update({e:None for e in items})
def discard(self, item):
if item in self.data:
del self.data[item]
def remove(self, item):
del self.data[item]
def __iter__(self):
return iter(self.data)
def __len__(self):
return len(self.data)
def __contains__(self, item):
return item in self.data
def pop(self):
return self.data.popitem()[0]
def clear(self):
return self.data.clear()
def copy(self): # Copies have new IDs (can't be compared to originals)
return xset(iter(self))
def __eq__(self, other):
return self._id == other._id
def __hash__(self):
return self._id
def __repr__(self):
# return '[{}]'.format(self._id) + '{' + ', '.join(map(repr, iter(self))) + '}'
return '{'+', '.join(map(repr,iter(self)))+'}'
def __str__(self):
return '{'+', '.join(map(repr,iter(self)))+'}'
def intersection(self, *others):
new = self.copy()
for x, other in product(self, others):
if x not in other:
new.remove(x)
return new
def union(self, *others):
new = self.copy()
for other in others:
for x in other:
new.add(x)
return new
def __isub__(self, other):
for x in other:
self.discard(x)
return self
def __imul__(self, other):
for x in self.copy():
if x not in other:
self.discard(x)
return self
def difference(self, *others):
new = self.copy()
for other in others:
for x in other:
new.discard(x)
return new
class xset_wrapper(set):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
global _set_ID
self._id = _set_ID
_set_ID += 1
def copy(self): # Copies have new IDs (can't be compared to originals)
return xset(iter(self))
def __eq__(self, other):
return self._id == other._id
def __hash__(self):
return self._id
def __repr__(self):
# return '[{}]'.format(self._id) + '{' + ', '.join(map(repr, iter(self))) + '}'
return '{' + ', '.join(map(repr, iter(self))) + '}'
def __str__(self):
return '{' + ', '.join(map(repr, iter(self))) + '}'
def intersection(self, *others):
new = self.copy()
for x, other in product(self, others):
if x not in other:
new.remove(x)
return new
def union(self, *others):
new = self.copy()
for other in others:
for x in other:
new.add(x)
return new
def __isub__(self, other):
for x in other:
self.discard(x)
return self
def __imul__(self, other):
for x in self.copy():
if x not in other:
self.discard(x)
return self
def difference(self, *others):
new = self.copy()
for other in others:
for x in other:
new.discard(x)
return new
|
<reponame>sivikt/14m2-alg
# Draws Hilbert Curve
# author <NAME> <<EMAIL>>, 2014 Public Domain
import sys
from tkinter import *
class TkDraw(object):
def __init__(self):
self._color = 'black'
self._x_coor = 0
self._y_coor = 0
self._width = 1024
self._height = 800
master = Tk()
self._canvas = Canvas(master,
width=self._width, height=self._height,
bg='white')
self._canvas.pack()
def LineTo(self, x=0, y=0):
self._canvas.create_line(self._x_coor, self._y_coor, x, y,
fill=self._color, smooth=1)
self._x_coor = x
self._y_coor = y
@property
def Color(self):
return self._color
@Color.setter
def Color(self, value):
self._color = value
@property
def PenX(self):
return self._x_coor
@PenX.setter
def PenX(self, value):
self._x_coor = value
@property
def PenY(self):
return self._y_coor
@PenY.setter
def PenY(self, value):
self._y_coor = value
@property
def Width(self):
return self._width
@property
def Height(self):
return self._height
class HilbertCurve(object):
def __init__(self, graphics):
self._g = graphics
def _Colorize(self):
if self._g.Color == 'black':
self._g.Color = 'red'
else:
self._g.Color = 'black'
def _StartFrom(self, posX, posY):
self._g.PenX = posX
self._g.PenY = posY
def _StepHor(self, width):
self._g.LineTo(self._g.PenX + width, self._g.PenY)
def _StepVer(self, width):
self._g.LineTo(self._g.PenX, self._g.PenY + width)
''' |_|
'''
def _Top(self, order, width):
if (order > 0):
order -= 1
self._Right(order, width)
self._StepVer(width)
self._Top(order, width)
self._StepHor(-width)
self._Top(order, width)
self._StepVer(-width)
self._Left(order, width)
''' _
| |
'''
def _Down(self, order, width):
if (order > 0):
order -= 1
self._Left(order, width)
self._StepVer(-width)
self._Down(order, width)
self._StepHor(width)
self._Down(order, width)
self._StepVer(width)
self._Right(order, width)
''' _
|_
'''
def _Right(self, order, width):
if (order > 0):
order -= 1
self._Top(order, width)
self._StepHor(-width)
self._Right(order, width)
self._StepVer(width)
self._Right(order, width)
self._StepHor(width)
self._Down(order, width)
''' _
_|
'''
def _Left(self, order, width):
if (order > 0):
order -= 1
self._Down(order, width)
self._StepHor(width)
self._Left(order, width)
self._StepVer(-width)
self._Left(order, width)
self._StepHor(-width)
self._Top(order, width)
def Recur(self, order=0):
posX = self._g.Width / 2
posY = self._g.Height / 2
width = posX + (posX / 2)
for i in range(1, order+1):
width /= 2
posX += width / 2
posY -= width / 2
self._Colorize()
self._StartFrom(posX, posY)
print("Hilbert order = %d, width=%d" % (i, width))
self._Right(i, width)
def main():
order = int(sys.argv[1])
g = TkDraw()
hc = HilbertCurve(g)
hc.Recur(order=order)
mainloop()
main()
|
<reponame>fjfaggingerauer/obsim
__all__ = ['Telescope',]
import astropy.units as u
import astropy.coordinates as c
import astropy.time as t
import hcipy as hp
from ..config import default_units, simulation_units
from ..util import make_properties
# Conversion functions for grid properties
# pupil grid: diameter = pupil_grid_size * pupil_grid_resolution
def get_diameter(pupil_grid_size, pupil_grid_resolution):
return pupil_grid_resolution * pupil_grid_size
def get_pupil_grid_size(diameter, pupil_grid_resolution):
return ((diameter/pupil_grid_resolution).value).astype(int)
def get_pupil_grid_resolution(diameter, pupil_grid_size):
return diameter/pupil_grid_size
# focal grid
# q = focal_grid_size / num_airy
# field_of_view = focal_grid_size * focal_grid_resolution / focal_length
# phys_scale = f_number * reference_wavelength
# field_of_view = num_airy * reference_wavelength/diameter
# f_number = focal_length / diameter
# phys_scale = q * focal_grid_resolution
def get_q1(focal_grid_size, num_airy):
return focal_grid_size/num_airy
def get_focal_grid_size1(q, num_airy):
return q * num_airy
def get_num_airy1(q, focal_grid_size):
return focal_grid_size / q
def get_field_of_view1(focal_grid_size, focal_grid_resolution, focal_length):
return focal_grid_size * focal_grid_resolution / focal_length
def get_focal_grid_size2(field_of_view, focal_grid_resolution, focal_length):
return ((field_of_view.to(u.rad) * focal_length/focal_grid_resolution).value).astype(int)
def get_focal_grid_resolution1(field_of_view, focal_grid_size, focal_length):
return field_of_view.to(u.rad).value/focal_grid_size * focal_length
def get_focal_length1(focal_grid_size, focal_grid_resolution, field_of_view):
return focal_grid_size * focal_grid_resolution / field_of_view.to(u.rad).value
def get_phys_scale1(f_number, reference_wavelength):
return f_number * reference_wavelength
def get_f_number1(phys_scale, reference_wavelength):
return (phys_scale / reference_wavelength).value
def get_reference_wavelength1(phys_scale, f_number):
return phys_scale / f_number
def get_field_of_view2(num_airy, reference_wavelength, diameter):
return (num_airy * reference_wavelength / diameter) * u.rad
def get_num_airy2(field_of_view, reference_wavelength, diameter):
return field_of_view.to(u.rad).value * (diameter/reference_wavelength).value
def get_reference_wavelength2(field_of_view, num_airy, diameter):
return field_of_view.to(u.rad).value / num_airy * diameter
# no function for diameter, this should only be determined by the pupil grid
def get_f_number2(focal_length, diameter):
return (focal_length/diameter).value
def get_focal_length2(f_number, diameter):
return f_number * diameter
def get_phys_scale2(q, focal_grid_resolution):
return q * focal_grid_resolution
def get_q2(phys_scale, focal_grid_resolution):
return (phys_scale / focal_grid_resolution).value
def get_focal_grid_resolution2(phys_scale, q):
return phys_scale / q
class Telescope(object):
property_list = {
# pupil grid
'diameter' : { # telescope primary mirror diameter
'unit': default_units.length,
'default' : 1*u.m,
'functions' : [(get_diameter, ("pupil_grid_size", "pupil_grid_resolution"))]
},
'pupil_grid_size' : { # number of pixels in telescope pupil grid (for hcipy)
'default' : 128,
'functions' : [(get_pupil_grid_size, ("diameter", "pupil_grid_resolution"))]
},
'pupil_grid_resolution' : { # physical size of a pixel in the pupil grid
'unit' : default_units.length,
'functions' : [(get_pupil_grid_resolution, ("diameter", "pupil_grid_size"))]
},
# focal grid
'field_of_view' : { # field of view of the telescope
'unit' : default_units.angle,
'functions' : [(get_field_of_view1, ("focal_grid_size", "focal_grid_resolution", "focal_length")),
(get_field_of_view2, ("num_airy", "reference_wavelength", "diameter"))]
},
'reference_wavelength' : { # reference wavelength in lambda/d calculations
'unit' : default_units.length,
'default' : 1E-6 * u.m,
'functions' : [(get_reference_wavelength1, ("phys_scale", "f_number")),
(get_reference_wavelength2, ("field_of_view", "num_airy", "diameter"))]
},
'f_number': { # telescope primary f-number
'default' : 1,
'functions' : [(get_f_number1, ("phys_scale", "reference_wavelength")),
(get_f_number2, ("focal_length", "diameter"))]
},
'q' : { # number of pixels per lambda/d in focal plane
'default' : 2,
'functions' : [(get_q1, ("focal_grid_size", "num_airy")),
(get_q2, ("phys_scale", "focal_grid_resolution"))]
},
'phys_scale' : { # physical size of a lambda/d distance in the focal plane
'unit' : default_units.length,
'functions' : [(get_phys_scale1, ("f_number", "reference_wavelength")),
(get_phys_scale2, ("q", "focal_grid_resolution"))]
},
'focal_grid_size' : { # number of pixels in focal plane grid (for hcipy)
'functions' : [(get_focal_grid_size1, ("q", "num_airy")),
(get_focal_grid_size2, ("field_of_view", "focal_grid_resolution", "focal_length"))]
},
'focal_length' : { # effective focal length of primary
'unit' : default_units.length,
'functions' : [(get_focal_length1, ("focal_grid_size", "focal_grid_resolution", "field_of_view")),
(get_focal_length2, ("f_number", "diameter"))]
},
'num_airy' : { # number of airy rings in the focal grid (for hcipy)
'functions' : [(get_num_airy1, ("q", "focal_grid_size")),
(get_num_airy2, ("field_of_view", "reference_wavelength", "diameter"))]
},
'focal_grid_resolution' : { # size of a pixel in focal plane grid (for hcipy)
'unit' : default_units.length,
'functions' : [(get_focal_grid_resolution1, ("field_of_view", "focal_grid_size", "focal_length")),
(get_focal_grid_resolution2, ("phys_scale", "q"))]
},
# aperture
'aperture_type' : { # shape of aperture (including spiders etc.) (for hcipy aperture generator)
'type' : str,
'default' : 'circular'
},
'aperture_generator' : { # hcipy aperture generator
},
# pointing of the telescope
'pointing' : { # direction the telescope is pointed when sources don't have a physical location
'unit': default_units.angle,
'default' : [0,0]
},
'physical_pointing' : { # Altitude/azimuth of telescope pointing
'type' : c.SkyCoord,
},
# properties for sources with physical location
'rotation' : { # TBD
'unit' : default_units.angle,
'default' : 0 * u.rad
},
'location': { # location of the telescope on Earth
'type' : c.EarthLocation,
},
'observation_time': { # date & time of observation
'type' : t.Time,
},
'reference_azimuth': { # TBD
'unit' : default_units.angle,
'default' : 0 * u.rad
},
'altitude_limits':{ # minimum/maximum altitude that can be pointed at
'unit' : default_units.angle,
'default' : [0, 90] * u.deg,
},
}
def __init__(self, **kwargs):
make_properties(self, self.property_list, kwargs)
self._tracked_target = None
@property
def pupil_grid(self):
return hp.make_pupil_grid(self.pupil_grid_size, self.diameter.to(simulation_units.length).value)
@property
def focal_grid(self):
return hp.make_focal_grid(self.q, self.num_airy, self.phys_scale.to(simulation_units.length).value)
@property
def aperture_function(self):
if self.aperture_generator is not None:
return self.aperture_generator
elif self.aperture_type == 'circular':
return hp.circular_aperture(self.diameter.to(simulation_units.length).value)
else:
raise NotImplementedError(f"Aperture type '{self.aperture_type}' not supported.")
def aperture(self, component_type='hcipy'):
if component_type == 'hcipy':
from ..components import Apodizer
return Apodizer(self.aperture_function, 'pupil')
else:
raise ValueError(f"Unrecognized type '{component_type}' for telescope apodizer.")
def propagator(self, component_type='hcipy'):
if component_type == 'hcipy':
from ..components import FraunhoferPropagator
return FraunhoferPropagator(propagator = hp.FraunhoferPropagator(self.pupil_grid, self.focal_grid, self.focal_length.to(simulation_units.length).value))
else:
raise ValueError(f"Unrecognized type '{component_type}' for telescope propagator.")
def point_at(self, source):
def point(loc):
if self.location is None:
raise ValueError("When a source has a physical location, the telescope must have this as well.")
if self.observation_time is None:
raise ValueError("When a source has a physical location, an observation time must be given.")
self.physical_pointing = loc.transform_to(c.AltAz(obstime = self.observation_time, location = self.location))
if self.physical_pointing.alt < self.altitude_limits.min() or self.physical_pointing.alt > self.altitude_limits.max():
raise RuntimeError(f"Cannot point telescope to an altitude of {self.physical_pointing.alt}, it must be in the range {self.altitude_limits}.")
if isinstance(source, c.SkyCoord):
point(source)
if source.physical_location is None:
self.pointing = source.location
else:
point(source.physical_location)
@property
def tracked_target(self):
return self._tracked_target
@property
def tracking_target(self):
return self.tracked_target is not None
def track(self, source):
if source.physical_location is not None:
self._tracked_target = source
self.point_at(source)
def __call__(self, val):
ap = self.aperture()(val)
prop = self.propagator()(ap)
return prop
def to_dict(self):
tree = {'properties': self.properties.to_dict(),
#'tracked_target': self.tracked_target, # TBD
#'pointing' : self.pointing} # TBD
}
@classmethod
def from_dict(cls, tree):
kwargs = {key: tree['properties']['values'][key] for key in tree['properties']['values'] if tree['properties']['externally_set'][key]}
obj = cls(**kwargs)
#if tree['tracked_target'] is not None:
# obj.track()
#if tree['pointing'] is not None:
# obj.point_at() |
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
from plotly.subplots import make_subplots
import geopandas as gpd
from pages.constants import TOPIC_COLORS
class SentimentCRUD(object):
def __init__(self):
self.base_df = pd.read_csv(
'./data/results_sa_all.txt',
infer_datetime_format=True,
parse_dates=["date"]
).sort_values("date")
self.base_df = self.base_df[self.base_df["date"] >= "2009-01-27"]
self.df_ppl = pd.read_csv(
'./data/obama-job-approval-ratings.txt',
infer_datetime_format=True,
parse_dates=["date"]
).sort_values("date")
self.geo_df = gpd.GeoDataFrame(pd.read_csv(
'./data/geo_df_sentiment.csv')
)
self.gun_deaths_wide = pd.read_csv(
'./data/gun_deaths.csv'
).pivot(
index="Year", columns="Intent", values = "Total"
)
self.gun_deaths_wide['Year2'] = self.gun_deaths_wide.index
self.gun_deaths_wide = self.gun_deaths_wide.loc[(self.gun_deaths_wide['Year2'] > 2008) & (self.gun_deaths_wide['Year2'] < 2018)]
self.gun_deaths_wide.rename(columns={"Total - all intents": "All_intents"}, inplace = True)
self.topic_colors = TOPIC_COLORS
def plot_sentiment_over_time(self):
df_sorted = self.base_df.sort_values('date')
fig = go.Figure()
fig.add_traces(go.Scatter(x=df_sorted["date"], y=df_sorted["stanza"], name='stanza',
line=dict(color='#636EFA', width=2)))
fig.add_traces(go.Scatter(x=df_sorted["date"], y=df_sorted["textblob"], name='textblob',
line=dict(color='#EF553B', width=2)))
fig.add_traces(go.Scatter(x=df_sorted["date"], y=df_sorted["vader"], name='vader',
line=dict(color='#00CC96', width=2)))
fig.add_traces(go.Scatter(x=df_sorted["date"], y=df_sorted["subjectivity"], name='subjectivity',
line=dict(color='#AB63FA', width=2)))
fig.update_layout(title='Sentiment scores over time per method',
xaxis_title='Date',
yaxis_title='Score')
return fig
def plot_sentiment_location(self, location=True, stanza=True, color='stanza'):
fig = px.scatter_geo(self.geo_df, lat="latitude", lon="longitude", color=color, # or color='stanza'
hover_name="title",
hover_data={"latitude": False,
"longitude": False,
"location": location,
"stanza": stanza},
projection="natural earth")
fig.update_layout(
title_text='Obama`s Speeches',
showlegend=True,
height=600
# Layout of legend for the slides
# legend=dict(
# orientation="h",
# yanchor="bottom",
# y= -1,
# xanchor="right",
# x=1.3)
)
return fig
def plot_sentiment_popularity_tracker(self):
# Plot sentiment plus ratings (together)
# Create figure with secondary y-axis
fig = make_subplots(specs=[[{"secondary_y": True}]])
# Add popularity lines
for col in self.df_ppl.columns[1:]:
fig.add_trace(go.Scatter(x=self.df_ppl["date"],
y=self.df_ppl[col], name=col),
secondary_y=True,)
# Add sentiment lines
for col in self.base_df.columns[2:]:
if col == 'stanza' or col == 'textblob':
fig.add_trace(go.Scatter(x=self.base_df["date"],
y=self.base_df[col], name=col),
secondary_y=False,)
# Add figure title
fig.update_layout( # height=600, width=1450,
title={"text": "Job (Dis)Approval Distribition and Sentiment Scores over Time", "x": 0.3})
# Set x-axis title
fig.update_xaxes(title_text="Year")
fig.update_yaxes(title_text="Opinion ratio",
range=[0, 70], secondary_y=True)
fig.update_yaxes(title_text="Sentiment score",
range=[-1.1, 1.1], secondary_y=False)
return fig
def plot_gun_popularity_tracker(self):
# Create figure with secondary y-axis
fig = make_subplots(specs=[[{"secondary_y": True}]])
# Add sentiment lines
for col in self.base_df.columns[2:]:
if col == 'stanza' or col == 'textblob' or col == 'weighted' :
fig.add_trace(go.Scatter(x=self.base_df["date"],
y=self.base_df[col], name=col),
secondary_y=False,)
# Add gun deaths lines
for col in self.gun_deaths_wide.columns[0:6]:
if col == 'Assault' or col == 'Suicide' or col == 'All_intents' :
fig.add_trace(go.Scatter(x=self.gun_deaths_wide["Year2"],
y=self.gun_deaths_wide[col], name=col),
secondary_y=True,)
# Add figure title
fig.update_layout(#height=600, width=1450,
title = {"text":"Gun Deaths Frequencies and Sentiment Scores over Time", "x":0})
# Set x-axis title
fig.update_xaxes(title_text="Year")
fig.update_yaxes(title_text="Gun Deaths", range=[10000, 42000], secondary_y=True)
fig.update_yaxes(title_text="Sentiment score",range=[-1.1,1.1], secondary_y=False)
return fig
# def plot_sentiment_time_neo4j(self):
# array=['Positive','Mixed','Neutral','Negative']
# # Create figure with secondary y-axis
# fig = make_subplots(specs=[[{"secondary_y": True}]])
# # Add traces
# fig.add_trace(
# go.Scatter(x=ndf_1["Date"], y=ndf_1["Sentiment"], name="Sentiment",line=dict(color='#636EFA', width=2)),
# secondary_y=False,
# )
# fig.add_trace(
# go.Scatter(x=ndf_1["Date"],y=ndf_1["Sentiment_score"], name="Confidence",line=dict(color='#00CC96', width=1)),
# secondary_y=True,
# )
# # Add figure title
# fig.update_layout(
# title_text="Sentiment over time"
# )
# # Set x-axis title
# fig.update_xaxes(title_text="Date")
# # Set y-axes titles and values
# fig.update_yaxes(title_text="<b>Primary axis</b> Sentiment", secondary_y=False,categoryorder='array', categoryarray= ['Negative','Neutral','Mixed','Positive'])
# fig.update_yaxes(title_text="<b>Secondary axis</b> Confidence", secondary_y=True,griddash="dot")
# return fig
|
import enum
import json
from typing import List, Union
import msgpack
from aioredis import Redis
class RequestType(int, enum.Enum):
SOCKETS = 0
ALL_ROOMS = 1
REMOTE_JOIN = 2
REMOTE_LEAVE = 3
REMOTE_DISCONNECT = 4
REMOTE_FETCH = 5
SERVER_SIDE_EMIT = 6
class PacketType(int, enum.Enum):
CONNECT = 0
DISCONNECT = 1
EVENT = 2
ACK = 3
CONNECT_ERROR = 4
BINARY_EVENT = 5
BINARY_ACK = 6
class BroadcastOperatorError(Exception):
def __init__(self, message: str):
super().__init__(message)
UID = "emitter"
class Emitter:
def __init__(self, redis_client: Redis, nsp: str = '/'):
self.redis_client = redis_client
self.broadcast_options = {
'broadcast_channel': 'socket.io' + '#' + nsp + '#',
'request_channel': 'socket.io' + '-request#' + nsp + '#',
'nsp': nsp
}
def of(self, nsp: str):
return Emitter(self.redis_client, ('' if nsp.startswith('/') else '/') + nsp)
async def emit(self, ev: str, *args):
return await BroadcastOperator(self.redis_client, self.broadcast_options).emit(ev, *args)
def to(self, room: Union[str, List[str]]):
return BroadcastOperator(self.redis_client, self.broadcast_options).to(room)
def in_room(self, room: Union[str, List[str]]):
return BroadcastOperator(self.redis_client, self.broadcast_options).in_room(room)
def except_room(self, room: Union[str, List[str]]):
return BroadcastOperator(self.redis_client, self.broadcast_options).except_room(room)
def volatile(self):
return BroadcastOperator(self.redis_client, self.broadcast_options).volatile()
def compress(self, _compress: bool = True):
return BroadcastOperator(self.redis_client, self.broadcast_options).compress(_compress)
async def sockets_join(self, rooms: Union[List[str], str]):
return await BroadcastOperator(self.redis_client, self.broadcast_options).sockets_join(rooms)
async def sockets_leave(self, rooms: Union[List[str], str]):
return await BroadcastOperator(self.redis_client, self.broadcast_options).sockets_leave(rooms)
async def disconnect_sockets(self, close: bool):
return await BroadcastOperator(self.redis_client, self.broadcast_options).disconnect_sockets(close)
async def server_side_emit(self, *args):
if callable(args[-1]):
raise BroadcastOperatorError("Acknowledgements are not supported")
request = json.dumps({
'uid': UID,
'type': RequestType.SERVER_SIDE_EMIT,
'data': args
})
await self.redis_client.publish(self.broadcast_options['request_channel'], request)
RESERVED_EVENTS = {
"connect",
"connect_error",
"disconnect",
"disconnecting",
"newListener",
"removeListener",
}
class BroadcastOperator:
def __init__(self, redis_client: Redis,
broadcast_options,
rooms=None,
except_rooms=None,
flags=None):
if flags is None:
flags = {}
if except_rooms is None:
except_rooms = set()
if rooms is None:
rooms = set()
self.redis_client = redis_client
self.broadcast_options = broadcast_options
self.rooms = rooms
self.except_rooms = except_rooms
self.flags = flags
def to(self, room: Union[List[str], str]):
rooms = set(self.rooms)
if isinstance(room, list):
for r in room:
rooms.add(r)
else:
rooms.add(room)
return BroadcastOperator(self.redis_client,
self.broadcast_options,
rooms,
self.except_rooms,
self.flags)
def in_room(self, room: List[str]):
return self.to(room)
def except_room(self, room: Union[str, List[str]]):
except_rooms = set(self.except_rooms)
if isinstance(room, list):
for r in room:
except_rooms.add(r)
else:
except_rooms.add(room)
return BroadcastOperator(self.redis_client,
self.broadcast_options,
self.rooms,
except_rooms,
self.flags)
def compress(self, _compress: bool = True):
flags = {**self.flags, 'compress': _compress}
return BroadcastOperator(self.redis_client, self.broadcast_options, self.rooms, self.except_rooms, flags)
def volatile(self):
flags = {**self.flags, 'volatile': True}
return BroadcastOperator(self.redis_client, self.broadcast_options, self.rooms, self.except_rooms, flags)
async def emit(self, ev: str, *args):
if ev in RESERVED_EVENTS:
raise BroadcastOperatorError(f'"{ev}" is a reserved event name')
data = list(args)
data.insert(0, ev)
packet = {
'type': PacketType.EVENT,
'data': data,
'nsp': self.broadcast_options['nsp']
}
opts = {
'rooms': list(self.rooms) if self.rooms else '',
'flags': self.flags if self.flags else '',
'except': list(self.except_rooms) if self.except_rooms else ''
}
msg = msgpack.packb([UID, packet, opts])
channel = self.broadcast_options['broadcast_channel']
await self.redis_client.publish(channel, msg)
return True
async def sockets_join(self, rooms: Union[str, List[str]]):
request = json.dumps({
'type': RequestType.REMOTE_JOIN,
'opts': {
'rooms': list(self.rooms),
'except': list(self.except_rooms)
},
'rooms': rooms if isinstance(rooms, list) else [rooms]
})
await self.redis_client.publish(self.broadcast_options['request_channel'], request)
async def sockets_leave(self, rooms: Union[str, list[str]]):
request = json.dumps({
'type': RequestType.REMOTE_LEAVE,
'opts': {
'rooms': list(self.rooms),
'except': list(self.except_rooms)
},
'rooms': rooms if isinstance(rooms, list) else [rooms]
})
await self.redis_client.publish(self.broadcast_options['request_channel'], request)
async def disconnect_sockets(self, close: bool = False):
request = json.dumps({
'type': RequestType.REMOTE_DISCONNECT,
'opts': {
'rooms': list(self.rooms),
'except': list(self.except_rooms)
},
'close': close
})
await self.redis_client.publish(self.broadcast_options['request_channel'], request)
|
<reponame>christopherpickering/djangosaml2<filename>tests/testprofiles/tests.py
# Copyright (C) 2012 <NAME> (<EMAIL>)
# Copyright (C) 2011-2012 Yaco Sistemas (http://www.yaco.es)
# Copyright (C) 2010 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf import settings
from django.contrib.auth import get_user_model
from djangosaml2.backends import get_saml_user_model
from django.contrib.auth.models import User as DjangoUserModel
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase, override_settings
from djangosaml2.backends import Saml2Backend, set_attribute
from testprofiles.models import TestUser
class BackendUtilMethodsTests(TestCase):
def test_set_attribute(self):
u = TestUser()
self.assertFalse(hasattr(u, 'custom_attribute'))
# Set attribute initially
changed = set_attribute(u, 'custom_attribute', 'value')
self.assertTrue(changed)
self.assertEqual(u.custom_attribute, 'value')
# 'Update' to the same value again
changed_same = set_attribute(u, 'custom_attribute', 'value')
self.assertFalse(changed_same)
self.assertEqual(u.custom_attribute, 'value')
# Update to a different value
changed_different = set_attribute(u, 'custom_attribute', 'new_value')
self.assertTrue(changed_different)
self.assertEqual(u.custom_attribute, 'new_value')
class dummyNameId:
text = 'dummyNameId'
class Saml2BackendTests(TestCase):
""" UnitTests on backend classes
"""
backend_cls = Saml2Backend
def setUp(self):
self.backend = self.backend_cls()
self.user = TestUser.objects.create(username='john')
def test_get_model_ok(self):
self.assertEqual(self.backend._user_model, TestUser)
def test_get_model_nonexisting(self):
with override_settings(SAML_USER_MODEL='testprofiles.NonExisting'):
with self.assertRaisesMessage(ImproperlyConfigured, "Model 'testprofiles.NonExisting' could not be loaded"):
self.assertEqual(self.backend._user_model, None)
def test_get_model_invalid_specifier(self):
with override_settings(SAML_USER_MODEL='random_package.specifier.testprofiles.NonExisting'):
with self.assertRaisesMessage(ImproperlyConfigured, "Model was specified as 'random_package.specifier.testprofiles.NonExisting', but it must be of the form 'app_label.model_name'"):
self.assertEqual(self.backend._user_model, None)
def test_user_model_specified(self):
with override_settings(AUTH_USER_MODEL='auth.User'):
with override_settings(SAML_USER_MODEL='testprofiles.TestUser'):
self.assertEqual(self.backend._user_model, TestUser)
def test_user_model_default(self):
with override_settings(AUTH_USER_MODEL='auth.User'):
self.assertEqual(self.backend._user_model, DjangoUserModel)
def test_user_lookup_attribute_specified(self):
with override_settings(SAML_USER_MODEL='testprofiles.TestUser'):
with override_settings(SAML_DJANGO_USER_MAIN_ATTRIBUTE='age'):
self.assertEqual(self.backend._user_lookup_attribute, 'age')
def test_user_lookup_attribute_default(self):
with override_settings(SAML_USER_MODEL='testprofiles.TestUser'):
self.assertEqual(self.backend._user_lookup_attribute, 'username')
def test_extract_user_identifier_params_use_nameid_present(self):
with override_settings(SAML_USER_MODEL='testprofiles.TestUser'):
with override_settings(SAML_USE_NAME_ID_AS_USERNAME=True):
_, lookup_value = self.backend._extract_user_identifier_params({'name_id': dummyNameId()}, {}, {})
self.assertEqual(lookup_value, 'dummyNameId')
def test_extract_user_identifier_params_use_nameid_missing(self):
with override_settings(SAML_USER_MODEL='testprofiles.TestUser'):
with override_settings(SAML_USE_NAME_ID_AS_USERNAME=True):
_, lookup_value = self.backend._extract_user_identifier_params({}, {}, {})
self.assertEqual(lookup_value, None)
def test_is_authorized(self):
self.assertTrue(self.backend.is_authorized({}, {}, '', {}))
def test_clean_attributes(self):
attributes = {'random': 'dummy', 'value': 123}
self.assertEqual(self.backend.clean_attributes(attributes, ''), attributes)
def test_clean_user_main_attribute(self):
self.assertEqual(self.backend.clean_user_main_attribute('value'), 'value')
def test_update_user_simple(self):
u = TestUser(username='johny')
self.assertIsNone(u.pk)
u = self.backend._update_user(u, {}, {})
self.assertIsNotNone(u.pk)
def test_update_user(self):
attribute_mapping = {
'uid': ('username', ),
'mail': ('email', ),
'cn': ('first_name', ),
'sn': ('last_name', ),
}
attributes = {
'uid': ('john', ),
'mail': ('<EMAIL>', ),
'cn': ('John', ),
'sn': ('Doe', ),
}
self.backend._update_user(self.user, attributes, attribute_mapping)
self.assertEqual(self.user.email, '<EMAIL>')
self.assertEqual(self.user.first_name, 'John')
self.assertEqual(self.user.last_name, 'Doe')
attribute_mapping['saml_age'] = ('age', )
attributes['saml_age'] = ('22', )
self.backend._update_user(self.user, attributes, attribute_mapping)
self.assertEqual(self.user.age, '22')
def test_update_user_callable_attributes(self):
attribute_mapping = {
'uid': ('username', ),
'mail': ('email', ),
'cn': ('process_first_name', ),
'sn': ('last_name', ),
}
attributes = {
'uid': ('john', ),
'mail': ('<EMAIL>', ),
'cn': ('John', ),
'sn': ('Doe', ),
}
self.backend._update_user(self.user, attributes, attribute_mapping)
self.assertEqual(self.user.email, '<EMAIL>')
self.assertEqual(self.user.first_name, 'John')
self.assertEqual(self.user.last_name, 'Doe')
def test_update_user_empty_attribute(self):
self.user.last_name = 'Smith'
self.user.save()
attribute_mapping = {
'uid': ('username', ),
'mail': ('email', ),
'cn': ('first_name', ),
'sn': ('last_name', ),
}
attributes = {
'uid': ('john', ),
'mail': ('<EMAIL>', ),
'cn': ('John', ),
'sn': (),
}
with self.assertLogs('djangosaml2', level='DEBUG') as logs:
self.backend._update_user(self.user, attributes, attribute_mapping)
self.assertEqual(self.user.email, '<EMAIL>')
self.assertEqual(self.user.first_name, 'John')
# empty attribute list: no update
self.assertEqual(self.user.last_name, 'Smith')
self.assertIn(
'DEBUG:djangosaml2:Could not find value for "sn", not updating fields "(\'last_name\',)"',
logs.output,
)
def test_invalid_model_attribute_log(self):
attribute_mapping = {
'uid': ['username'],
'cn': ['nonexistent'],
}
attributes = {
'uid': ['john'],
'cn': ['John'],
}
with self.assertLogs('djangosaml2', level='DEBUG') as logs:
user, _ = self.backend.get_or_create_user(self.backend._user_lookup_attribute, 'john', True, None, None, None, None)
self.backend._update_user(user, attributes, attribute_mapping)
self.assertIn(
'DEBUG:djangosaml2:Could not find attribute "nonexistent" on user "john"',
logs.output,
)
@override_settings(SAML_USER_MODEL='testprofiles.RequiredFieldUser')
def test_create_user_with_required_fields(self):
attribute_mapping = {
'mail': ['email'],
'mail_verified': ['email_verified']
}
attributes = {
'mail': ['<EMAIL>'],
'mail_verified': [True],
}
# User creation does not fail if several fields are required.
user, created = self.backend.get_or_create_user(self.backend._user_lookup_attribute, '<EMAIL>', True, None, None, None, None)
self.assertEquals(user.email, '<EMAIL>')
self.assertIs(user.email_verified, None)
user = self.backend._update_user(user, attributes, attribute_mapping, created)
self.assertIs(user.email_verified, True)
def test_django_user_main_attribute(self):
old_username_field = get_user_model().USERNAME_FIELD
get_user_model().USERNAME_FIELD = 'slug'
self.assertEqual(self.backend._user_lookup_attribute, 'slug')
get_user_model().USERNAME_FIELD = old_username_field
with override_settings(AUTH_USER_MODEL='auth.User'):
self.assertEqual(
DjangoUserModel.USERNAME_FIELD,
self.backend._user_lookup_attribute)
with override_settings(
AUTH_USER_MODEL='testprofiles.StandaloneUserModel'):
self.assertEqual(
self.backend._user_lookup_attribute,
'username')
with override_settings(SAML_DJANGO_USER_MAIN_ATTRIBUTE='foo'):
self.assertEqual(self.backend._user_lookup_attribute, 'foo')
def test_get_or_create_user_existing(self):
with override_settings(SAML_USER_MODEL='testprofiles.TestUser'):
user, created = self.backend.get_or_create_user(self.backend._user_lookup_attribute, 'john', False, None, None, None, None)
self.assertTrue(isinstance(user, TestUser))
self.assertFalse(created)
def test_get_or_create_user_duplicates(self):
TestUser.objects.create(username='paul')
with self.assertLogs('djangosaml2', level='DEBUG') as logs:
with override_settings(SAML_USER_MODEL='testprofiles.TestUser'):
user, created = self.backend.get_or_create_user('age', '', False, None, None, None, None)
self.assertTrue(user is None)
self.assertFalse(created)
self.assertIn(
"ERROR:djangosaml2:Multiple users match, model: testprofiles.testuser, lookup: {'age': ''}",
logs.output,
)
def test_get_or_create_user_no_create(self):
with self.assertLogs('djangosaml2', level='DEBUG') as logs:
with override_settings(SAML_USER_MODEL='testprofiles.TestUser'):
user, created = self.backend.get_or_create_user(self.backend._user_lookup_attribute, 'paul', False, None, None, None, None)
self.assertTrue(user is None)
self.assertFalse(created)
self.assertIn(
"ERROR:djangosaml2:The user does not exist, model: testprofiles.testuser, lookup: {'username': 'paul'}",
logs.output,
)
def test_get_or_create_user_create(self):
with self.assertLogs('djangosaml2', level='DEBUG') as logs:
with override_settings(SAML_USER_MODEL='testprofiles.TestUser'):
user, created = self.backend.get_or_create_user(self.backend._user_lookup_attribute, 'paul', True, None, None, None, None)
self.assertTrue(isinstance(user, TestUser))
self.assertTrue(created)
self.assertIn(
"DEBUG:djangosaml2:New user created: {}".format(user),
logs.output,
)
def test_deprecations(self):
attribute_mapping = {
'mail': ['email'],
'mail_verified': ['email_verified']
}
attributes = {
'mail': ['<EMAIL>'],
'mail_verified': [True],
}
old = self.backend.get_attribute_value('email_verified', attributes, attribute_mapping)
self.assertEqual(old, True)
self.assertEqual(self.backend.get_django_user_main_attribute(), self.backend._user_lookup_attribute)
with override_settings(SAML_DJANGO_USER_MAIN_ATTRIBUTE_LOOKUP='user_name'):
self.assertEqual(self.backend.get_django_user_main_attribute_lookup(), settings.SAML_DJANGO_USER_MAIN_ATTRIBUTE_LOOKUP)
self.assertEqual(self.backend.get_user_query_args(''), {'username'})
u = TestUser(username='mathieu')
self.assertEqual(u.email, '')
new_u = self.backend.configure_user(u, attributes, attribute_mapping)
self.assertIsNotNone(new_u.pk)
self.assertEqual(new_u.email, '<EMAIL>')
u = TestUser(username='mathieu_2')
self.assertEqual(u.email, '')
new_u = self.backend.update_user(u, attributes, attribute_mapping)
self.assertIsNotNone(new_u.pk)
self.assertEqual(new_u.email, '<EMAIL>')
u = TestUser()
self.assertTrue(self.backend._set_attribute(u, 'new_attribute', True))
self.assertFalse(self.backend._set_attribute(u, 'new_attribute', True))
self.assertTrue(self.backend._set_attribute(u, 'new_attribute', False))
self.assertEqual(get_saml_user_model(), TestUser)
class CustomizedBackend(Saml2Backend):
""" Override the available methods with some customized implementation to test customization
"""
def is_authorized(self, attributes, attribute_mapping, idp_entityid: str, assertion_info, **kwargs):
''' Allow only staff users from the IDP '''
return attributes.get('is_staff', (None, ))[0] == True and assertion_info.get('assertion_id', None) != None
def clean_attributes(self, attributes: dict, idp_entityid: str, **kwargs) -> dict:
''' Keep only certain attribute '''
return {
'age': attributes.get('age', (None, )),
'mail': attributes.get('mail', (None, )),
'is_staff': attributes.get('is_staff', (None, )),
'uid': attributes.get('uid', (None, )),
}
def clean_user_main_attribute(self, main_attribute):
''' Partition string on @ and return the first part '''
if main_attribute:
return main_attribute.partition('@')[0]
return main_attribute
class CustomizedSaml2BackendTests(Saml2BackendTests):
backend_cls = CustomizedBackend
def test_is_authorized(self):
attribute_mapping = {
'uid': ('username', ),
'mail': ('email', ),
'cn': ('first_name', ),
'sn': ('last_name', ),
}
attributes = {
'uid': ('john', ),
'mail': ('<EMAIL>', ),
'cn': ('John', ),
'sn': ('Doe', ),
}
assertion_info = {
'assertion_id': None,
'not_on_or_after': None,
}
self.assertFalse(self.backend.is_authorized(attributes, attribute_mapping, '', assertion_info))
attributes['is_staff'] = (True, )
self.assertFalse(self.backend.is_authorized(attributes, attribute_mapping, '', assertion_info))
assertion_info['assertion_id'] = 'abcdefg12345'
self.assertTrue(self.backend.is_authorized(attributes, attribute_mapping, '', assertion_info))
def test_clean_attributes(self):
attributes = {'random': 'dummy', 'value': 123, 'age': '28'}
self.assertEqual(
self.backend.clean_attributes(attributes, ''),
{'age': '28', 'mail': (None,), 'is_staff': (None,), 'uid': (None,)}
)
def test_clean_user_main_attribute(self):
self.assertEqual(self.backend.clean_user_main_attribute('<EMAIL>'), 'john')
def test_authenticate(self):
attribute_mapping = {
'uid': ('username', ),
'mail': ('email', ),
'cn': ('first_name', ),
'sn': ('last_name', ),
'age': ('age', ),
'is_staff': ('is_staff', ),
}
attributes = {
'uid': ('john', ),
'mail': ('<EMAIL>', ),
'cn': ('John', ),
'sn': ('Doe', ),
'age': ('28', ),
'is_staff': (True, ),
}
assertion_info = {
'assertion_id': 'abcdefg12345',
'not_on_or_after': '',
}
self.assertEqual(self.user.age, '')
self.assertEqual(self.user.is_staff, False)
user = self.backend.authenticate(
None
)
self.assertIsNone(user)
user = self.backend.authenticate(
None,
session_info={'random': 'content'},
attribute_mapping=attribute_mapping,
assertion_info=assertion_info,
)
self.assertIsNone(user)
with override_settings(SAML_USE_NAME_ID_AS_USERNAME=True):
user = self.backend.authenticate(
None,
session_info={'ava': attributes, 'issuer': 'dummy_entity_id'},
attribute_mapping=attribute_mapping,
assertion_info=assertion_info,
)
self.assertIsNone(user)
attributes['is_staff'] = (False, )
user = self.backend.authenticate(
None,
session_info={'ava': attributes, 'issuer': 'dummy_entity_id'},
attribute_mapping=attribute_mapping,
assertion_info=assertion_info,
)
self.assertIsNone(user)
attributes['is_staff'] = (True, )
user = self.backend.authenticate(
None,
session_info={'ava': attributes, 'issuer': 'dummy_entity_id'},
attribute_mapping=attribute_mapping,
assertion_info=assertion_info,
)
self.assertEqual(user, self.user)
self.user.refresh_from_db()
self.assertEqual(self.user.age, '28')
self.assertEqual(self.user.is_staff, True)
def test_user_cleaned_main_attribute(self):
"""
In this test the username is taken from the `mail` attribute,
but cleaned to remove the @domain part. After fetching and
updating the user, the username remains the same.
"""
attribute_mapping = {
'mail': ('username',),
'cn': ('first_name',),
'sn': ('last_name',),
'is_staff': ('is_staff', ),
}
attributes = {
'mail': ('<EMAIL>',),
'cn': ('John',),
'sn': ('Doe',),
'is_staff': (True, ),
}
assertion_info = {
'assertion_id': 'abcdefg12345',
}
user = self.backend.authenticate(
None,
session_info={'ava': attributes, 'issuer': 'dummy_entity_id'},
attribute_mapping=attribute_mapping,
assertion_info=assertion_info,
)
self.assertEqual(user, self.user)
self.user.refresh_from_db()
self.assertEqual(user.username, 'john')
|
<filename>stars/__init__.py
import traceback
from typing import List, Mapping, Any, Dict, Tuple, Union
import logging
from logger import APPNAME
from state import BaseState
from utils import http
logger = logging.getLogger(APPNAME)
class MessageSign(BaseState):
EXC = '[!]'
STR = '[*]'
PLS = '[+]'
MIN = '[-]'
class TargetType(BaseState):
VULNERABILITY = 40
MODULE = 20
class ResultCode(BaseState):
# start checking
START = 10
# exists vulnerability
EXISTS = 20
# not exists anything
NOTEXISTS = 40
# timeout
TIMEOUT = 50
# error
ERROR = 60
# detect finish
FINISH = 100
msg_sign = MessageSign()
result_code = ResultCode()
target_type = TargetType()
class Star:
info = {
'NAME': '',
'CVE': '',
'TAG': []
}
type: target_type.VULNERABILITY
def __init__(self):
rc = result_code.to_dict()
self.msg_group: Dict[str, List[str]] = {}
for key in rc:
code = rc[key]
self.msg_group[code] = []
if code == result_code.START:
self.msg_group[code].append('[*][{call}][{target}] Start...')
if code == result_code.NOTEXISTS:
if self.type == target_type.VULNERABILITY:
self.msg_group[code].append('[-][{call}][{target}] Not vulnerability.')
elif self.type == target_type.MODULE:
self.msg_group[code].append('[-][{call}][{target}] Not found.')
if code == result_code.EXISTS:
if self.type == target_type.VULNERABILITY:
self.msg_group[code].append('[+][{call}][{target}] Exists vulnerability!')
elif self.type == target_type.MODULE:
self.msg_group[code].append('[+][{call}][{target}] Found module!')
self.msg_group[code].append('[*][{call}][{target}] Please verify manually!')
if code == result_code.TIMEOUT:
self.msg_group[code].append('[!][{call}][{target}] Timeout.')
if code == result_code.ERROR:
self.msg_group[code].append('[!][{call}][{target}] Connection error.')
def light_and_msg(self, dip, dport, *arg, **kwargs):
self.print_msg(f'{dip}:{dport}', result_code.START)
res = False
data = {}
try:
res, data = self.light_up(dip, dport, *arg, **kwargs)
except Exception as e:
# ConnectionResetError: 当 socket 连接被重置触发,常见于反序列化的场景
# ConnectionAbortedError: 当 socket 连接被强制中断触发,常见于存在防火墙的场景
self.print_msg(f'{dip}:{dport}', result_code.ERROR, {
'more_detail': ['''The following information output is only used for error tracking, so don't panic''',
'以下信息输出仅为错误追踪使用,请勿担心',traceback.format_exc()]}, level=logging.DEBUG)
if res:
self.print_msg(f'{dip}:{dport}', result_code.EXISTS)
else:
self.print_msg(f'{dip}:{dport}', result_code.NOTEXISTS)
return res, data
def light_up(self, dip, dport, *arg, **kwargs) -> Tuple[Union[bool, None], dict]:
self.print_msg(f'{dip}:{dport}', result_code.START)
return None, {}
def get_info(self, key: str):
if key in self.info:
return self.info[key.upper()]
def set_info(self, key: str, value: Any):
if key.upper() == 'CVE':
self.info[key.upper()] = value.upper()
else:
self.info[key.upper()] = value
def add_msg_group(self, msg, code=result_code.START, sign=msg_sign.STR):
if code not in self.msg_group:
return None
self.msg_group[code] = f'{sign} {msg}'
def print_msg(self, target, code: int = result_code.START, data: Union[Dict[str, Any], None] = None,
level: int = logging.INFO):
if not data:
data = {}
data['target'] = target
data['call'] = self.get_info("CVE") if self.get_info("CVE") else self.get_info("NAME")
for msg in self.msg_group[code]:
logger.info(msg.format(**data))
if 'more_detail' in data and isinstance(data['more_detail'], List):
# The 'more_detail' key type is List
for detail in data['more_detail']:
self.print_ext_msg(detail, level=level)
def print_ext_msg(self, msg, sign=msg_sign.STR, level: int = logging.INFO):
logger.log(level, f'{sign} {msg}')
def http(self, url, method='GET', *arg, **kwargs):
return http(url, method, *arg, **kwargs)
class Universe:
actived: Dict[str, List[Star]] = {}
def groups(self, gname=''):
def decorator(cls: Star):
nonlocal gname
if not gname:
gname = 'default'
if gname not in self.actived:
self.actived[gname] = []
# instance = cls
# if instance
self.actived[gname].append(cls)
return decorator
universe = Universe()
|
import unittest
from torchsummary import summary, summary_string
from torchsummary.torchsummary import _build_summary_dict, _build_summary_string
from torchsummary.tests.test_models.test_model import SingleInputNet, MultipleInputNet, \
MultipleInputNetDifferentDtypes, NestedNet, CustomModule
import torch
gpu_if_available = "cuda:0" if torch.cuda.is_available() else "cpu"
class TorchSummaryTests(unittest.TestCase):
def test_single_input(self):
model = SingleInputNet()
input = (1, 28, 28)
total_params, trainable_params = summary(model, input, device="cpu")
self.assertEqual(total_params, 21840)
self.assertEqual(trainable_params, 21840)
def test_multiple_input(self):
model = MultipleInputNet()
input1 = (1, 300)
input2 = (1, 300)
total_params, trainable_params = summary(
model, [input1, input2], device="cpu")
self.assertEqual(total_params, 31120)
self.assertEqual(trainable_params, 31120)
def test_single_layer_network(self):
model = torch.nn.Linear(2, 5)
input = (1, 2)
total_params, trainable_params = summary(model, input, device="cpu")
self.assertEqual(total_params, 15)
self.assertEqual(trainable_params, 15)
def test_single_layer_network_on_gpu(self):
model = torch.nn.Linear(2, 5)
if torch.cuda.is_available():
model.cuda()
input = (1, 2)
total_params, trainable_params = summary(model, input, device=gpu_if_available)
self.assertEqual(total_params, 15)
self.assertEqual(trainable_params, 15)
def test_multiple_input_types(self):
model = MultipleInputNetDifferentDtypes()
input1 = (1, 300)
input2 = (1, 300)
dtypes = [torch.FloatTensor, torch.LongTensor]
total_params, trainable_params = summary(
model, [input1, input2], device="cpu", dtypes=dtypes)
self.assertEqual(total_params, 31120)
self.assertEqual(trainable_params, 31120)
def test_recursive(self):
model = NestedNet()
input = (1, 28, 28)
summary = _build_summary_dict(model, [input], device='cpu')
summary_str, (total_params, trainable_params) = _build_summary_string(summary, [input])
self.assertListEqual(list(summary.keys()), ['Conv2d-1', 'BatchNorm2d-2', 'MaxPool2d-3', 'ConvBlock-4',
'Conv2d-5', 'BatchNorm2d-6', 'MaxPool2d-7', 'ConvBlock-8',
'Dropout2d-9', 'Linear-10', 'Linear-11', 'NestedNet-12'])
self.assertEqual(total_params, 21900)
self.assertEqual(trainable_params, 21900)
summary = _build_summary_dict(model, [input], device='cpu', recurse=False)
summary_str, (total_params, trainable_params) = _build_summary_string(summary, [input])
self.assertListEqual(list(summary.keys()), ['ConvBlock-1', 'ConvBlock-2', 'Dropout2d-3', 'Linear-4',
'Linear-5', 'NestedNet-6'])
self.assertEqual(total_params, 21900)
self.assertEqual(trainable_params, 21900)
def test_custom_module(self):
model = CustomModule()
input = (1, 50)
total_params, trainable_params = summary(model, input, device='cpu')
self.assertEqual(total_params, 2500)
self.assertEqual(trainable_params, 2500)
class TorchSummaryStringTests(unittest.TestCase):
def test_single_input(self):
model = SingleInputNet()
input = (1, 28, 28)
result, (total_params, trainable_params) = summary_string(
model, input, device="cpu")
self.assertEqual(type(result), str)
self.assertEqual(total_params, 21840)
self.assertEqual(trainable_params, 21840)
if __name__ == '__main__':
unittest.main(buffer=True)
|
<filename>uhd_restpy/testplatform/sessions/ixnetwork/topology/commandsnippetsdata_bfd4407665f4331cd53fee07f65b1820.py
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from uhd_restpy.base import Base
from uhd_restpy.files import Files
from typing import List, Any, Union
class CommandSnippetsData(Base):
"""Command Snippets Data allows user to fire Yang commands to DUT
The CommandSnippetsData class encapsulates a required commandSnippetsData resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'commandSnippetsData'
_SDM_ATT_MAP = {
'Active': 'active',
'CommandSnippetDirectory': 'commandSnippetDirectory',
'CommandSnippetFile': 'commandSnippetFile',
'Count': 'count',
'DescriptiveName': 'descriptiveName',
'Name': 'name',
'PeriodicTransmissionInterval': 'periodicTransmissionInterval',
'TransmissionBehaviour': 'transmissionBehaviour',
'TransmissionCount': 'transmissionCount',
}
_SDM_ENUM_MAP = {
}
def __init__(self, parent, list_op=False):
super(CommandSnippetsData, self).__init__(parent, list_op)
@property
def Active(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Activate/Deactivate Configuration.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Active']))
@property
def CommandSnippetDirectory(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Directory containing XML based Netconf compliant command snippets.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CommandSnippetDirectory']))
@property
def CommandSnippetFile(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): File containing XML based Netconf compliant command snippet. For multiple command snippets with assymetric file names( which cannot be expressed easily as a pattern) please explore File option in Master Row Pattern Editor by putting the file namesin a .csv and pulling those values into the column cells.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CommandSnippetFile']))
@property
def Count(self):
# type: () -> int
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP['Count'])
@property
def DescriptiveName(self):
# type: () -> str
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP['DescriptiveName'])
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def PeriodicTransmissionInterval(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Minimum interval between scheduling of two transmits of the Command Snippet.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PeriodicTransmissionInterval']))
@property
def TransmissionBehaviour(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Transmission behaviour for command snippet.Don't Send : This means that command will not be automatically executed. This choice should beused if user wants to control the order or/and timing of sending the command snippet to the DUTusing Test Composer or Automation Script.Once: The command will be sent only once to the DUT every time session comes up with the DUT.Periodic - Continuous: The command will be sent every Transmission Interval for the full lifetime of the session.Capture should be enabled with care if this option is selected.Periodic - Fixed Count: The command will be sent Transmission Count number of times, every Periodic Transmission Interval.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TransmissionBehaviour']))
@property
def TransmissionCount(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Number of times to transmit the Command Snippet.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TransmissionCount']))
def update(self, Name=None):
# type: (str) -> CommandSnippetsData
"""Updates commandSnippetsData resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def ExecuteCommand(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the executeCommand operation on the server.
Send the selected command snippet if the Netconf session is established with the Netconf Server
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
executeCommand(async_operation=bool)
------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
executeCommand(SessionIndices=list, async_operation=bool)
---------------------------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
executeCommand(SessionIndices=string, async_operation=bool)
-----------------------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
executeCommand(Arg2=list, async_operation=bool)list
---------------------------------------------------
- Arg2 (list(number)): List of indices into the device group.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('executeCommand', payload=payload, response_object=None)
def get_device_ids(self, PortNames=None, Active=None, CommandSnippetDirectory=None, CommandSnippetFile=None, PeriodicTransmissionInterval=None, TransmissionBehaviour=None, TransmissionCount=None):
"""Base class infrastructure that gets a list of commandSnippetsData device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args
----
- PortNames (str): optional regex of port names
- Active (str): optional regex of active
- CommandSnippetDirectory (str): optional regex of commandSnippetDirectory
- CommandSnippetFile (str): optional regex of commandSnippetFile
- PeriodicTransmissionInterval (str): optional regex of periodicTransmissionInterval
- TransmissionBehaviour (str): optional regex of transmissionBehaviour
- TransmissionCount (str): optional regex of transmissionCount
Returns
-------
- list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
|
<reponame>louisenje/Pitches
from flask import render_template,request,redirect,url_for,abort
from . import main
from ..models import Pitch,Comment
from flask_login import login_required,current_user
from ..models import User
from ..import db,photos
from .forms import UpdateProfile,AddPitch,CommentInput
from datetime import datetime
# views
@main.route('/')
def index():
'''
View root page function that returns the index page and its data
'''
message='Index Page'
title = 'Home - Welcome to The Pitchpitches'
# user=User.get_user(id)
pitch=Pitch.get_all_pitch()
general="general"
pickuplines="pickuplines"
interviewpitch="interviewpitch"
productpitch="productpitch"
promotionpitch="promotionpitch"
return render_template('index.html',message=message,title=title,pitch=pitch,
general=general,pickuplines=pickuplines,interviewpitch=interviewpitch,
productpitch=productpitch,promotionpitch=promotionpitch)
@main.route('/pitch/<category>')
def pitch(category):
# general="general"
# pickuplines="pickuplines"
# interviewpitch="interviewpitch"
# productpitch="productpitch"
# promotionpitch="promotionpitch"
pitches=Pitch.query.filter_by(category=category).all()
pitchess=Pitch.query.filter_by(category=category).first()
# pitchss=pitches.get_all_pitch()
# catname=pitches.id
# category=pitches.category
# pitch_id=pitches.id
pitcheses=Pitch.get_pitch_category(category)
form=CommentInput()
if form.validate_on_submit():
description=form.description.data
new_comment=Comment(description=description,upvote=0,downvote=0,pitch_id=pitchess.id)
# SAVE COMENT
new_comment.save_new_comment()
return redirect(url_for('.pitch',category=pitcheses.category))
# return redirect(url_for('.movie',id = movie.id ))
#
pitches=Pitch.get_pitch_category(category)
return render_template('categories.html',category = category,pitches=pitches,form=form)
@main.route('/user/<uname>')
def profile(uname):
general="general"
pickuplines="pickuplines"
interviewpitch="interviewpitch"
productpitch="productpitch"
promotionpitch="promotionpitch"
user = User.query.filter_by(username = uname).first()
user_id=user.id
pitches=Pitch.get_pitch(user_id)
if user is None:
abort(404)
return render_template("profile/profile.html", user = user,pitches=pitches,general=general,pickuplines=pickuplines,interviewpitch=interviewpitch,
productpitch=productpitch,promotionpitch=promotionpitch)
@main.route('/user/<uname>/update',methods=['GET','POST'])
@login_required
def update_profile(uname):
user=User.query.filter_by(username=uname).first()
if user is None:
abort(404)
form=UpdateProfile()
if form.validate_on_submit():
user.bio=form.bio.data
db.session.add(user)
db.session.commit()
return redirect(url_for('.profile',uname=user.username))
return render_template('profile/update.html',form =form)
@main.route('/user/<uname>/update/pic',methods= ['POST'])
@login_required
def update_pic(uname):
user = User.query.filter_by(username = uname).first()
if 'photo' in request.files:
filename = photos.save(request.files['photo'])
path = f'photos/{filename}'
user.profile_pic_path = path
db.session.commit()
return redirect(url_for('main.profile',uname=uname))
@main.route('/user/pitch/<id>', methods = ['GET','POST'])
@login_required
def new_pitch(id):
form=AddPitch()
user=User.get_user(id)
if form.validate_on_submit():
title=form.title.data
category=form.category.data
description=form.description.data
new_pitch=Pitch(title=title,category=category,description=description,user_id=user.id)
new_pitch.save_pitch()
return redirect(url_for('.index',id=user.id))
title='New Pitch'
return render_template('pitch.html',title = title, pitch_form=form,user=user)
|
<reponame>thautwarm/rtpy
from wisepy.fn_describe import describe
from wisepy.dynamic_cast import dynamic_cast
from wisepy.cmd_parser import parse
from wisepy.cmd_ast import Quote, Cmd, Closure, PlaceHolder
from wisepy.color import *
from Redy.Magic.Pattern import Pattern
from pprint import pprint
import types
import io
class Component:
def __init__(self, fn, name, help_doc):
self.fn: types.FunctionType = fn # original function
self.name: str = name # command name
self.help_doc = help_doc
self._display = None
def __call__(self, *args, **kwargs):
return dynamic_cast(self.fn)(*args, **kwargs)
def displayer(self, func):
"""
register a custom displayer to describe the command output.
"""
self._display = func
return self
def display(self, result):
if not self._display:
if isinstance(result, (list, tuple, dict)):
pprint(Blue(result))
elif result is not None:
print(result)
return
self._display(result)
class ShellFunction:
def __init__(self, talking_session: 'Talking', stmts):
self.talking_session = talking_session
self.stmts = stmts
def __call__(self, ctx: dict):
talking_session = self.talking_session
stmts = self.stmts
if not stmts:
return None
*head, tail = stmts
for each in head:
talking_session.visit(each, ctx)
return talking_session.visit(tail, ctx)
def __repr__(self):
return repr(self.stmts)
class Talking:
def __init__(self):
self._registered_cmds = {}
self._current_com = None
self._current_cmd = None
@property
def registered_cmds(self):
return self._registered_cmds
def __call__(self, func: types.FunctionType):
return self.alias(func.__name__)(func)
def alias(self, name):
def _inner(func):
com = Component(func, name, describe(func, name))
self._registered_cmds[name] = com
return com
return _inner
@Pattern
def visit(self, ast, ctx):
return type(ast)
@visit.case(Quote)
def visit_quote(self, ast: Quote, ctx: dict):
return self.visit(ast.cmd, ctx)
@visit.case(Closure)
def visit_closure(self, ast: Closure, _):
return ShellFunction(self, ast.stmts)
@visit.case(PlaceHolder)
def visit_place_holder(self, ast: PlaceHolder, ctx: dict):
name = self.visit(ast.value, ctx)
return ctx.get(name)
@visit.case(str)
def visit(self, ast: str, _):
return ast
@visit.case(Cmd)
def visit(self, command: Cmd, ctx: dict):
visit = self.visit
instruction, args, kwargs = command.inst, command.args, command.kwargs
instruction = visit(instruction, ctx)
self._current_com = com = self._registered_cmds.get(instruction)
if not com:
raise ValueError(
f'No function registered/aliased as `{instruction}`.')
if kwargs and any(True for k, _ in kwargs if k == 'help'):
return com.help_doc
args = (visit(arg, ctx) for arg in args) if args else ()
kwargs = {k: v for k, v in kwargs} if kwargs else {}
try:
return com(*args, **kwargs)
except Exception as e:
print(Purple2(com.help_doc))
raise e
def from_io(self, ios: io.TextIOWrapper, ctx: dict = None):
ctx = ctx or {}
result = self.visit(parse(ios.read()).result.value, ctx)
self._current_com.display(result)
def from_text(self, text, ctx: dict = None):
ctx = ctx or {}
result = self.visit(parse(text).result.value, ctx)
self._current_com.display(result)
def on(self):
import sys
recv = ' '.join(sys.argv[1:])
stripped = recv.strip()
if not stripped:
print(
Yellow(
"No command input, use --help to show available commands and their information."
))
elif stripped == '--help':
print(Blue("Available commands:"))
for each in self._registered_cmds.values():
print(Purple2(each.help_doc))
else:
self.from_text(recv, {})
sys.exit(0)
|
import os
import argparse
import tensorflow as tf
import tensorflow.keras as keras
from rich.console import Console
from rich.markdown import Markdown
#from models.DGV2.model_v2 import DGMV2
from models.DGV2.model_v2_1 import DGMV2_1
from models.DGV4.model_v4 import DGMV4
from models.DGV4.model_v4_mnet import DGMV5
from models.DGV5.model_v5 import DGMV2
from models.DGV6.model_v6 import DGMV6
from models.DGV7.model_v7 import DGMV8
from models.DGV8.model_v8 import DGMV9
from models.DGV8.model_v8_2 import DGMV9_2
from trainer import Trainer
from utils import configs
def console_handler(console):
'''
---------------------------------------------------------------
Command Line Guide (args to use)
---------------------------------------------------------------
-h [–-help] : help with command line
-gpu [–-num-gpu] : # of GPUs to be used for training [default : 2]
-s [--start-epoch] : the epoch to porsuit the training from [default : 1]
-e [--end-epoch] : the last epoch of training [default : 500]
-b [--batch-size] : the batch size used to train model [default : 512]
-n [--num-samples] : # of samples in the get_batch per epoch [default : 25k]
-w [--show-warnings] : the warnings show level [from 0 to 3]
-l [--load][PATH] : load Model to continue training from [PATH]
'''
# Get All Arguments from Command Line
parser = argparse.ArgumentParser(description='Argument Parser For DGM models.')
parser.add_argument('-gpu','--num-gpu',type=int,default=2,metavar='',help="the # of GPUs used to train the model. [default : 2]")
parser.add_argument('-s','--start-epoch',type=int,default=1,metavar='',help="the epoch to porsuit the training from [default : 1]")
parser.add_argument('-e','--end-epoch',type=int,default=500,metavar='',help="the last epoch of training [default : 500]")
parser.add_argument('-b','--batch-size',type=int,default=512,metavar='',help="the batch size used to train model [default : 512]")
parser.add_argument('-n','--num-samples',type=int,default=25_000,metavar='',help="# of samples in the get_batch per epoch [default : 25k]")
parser.add_argument('-w','--show-warnings',type=int,default=2,metavar='',help="the warnings show level [from 0 to 3]")
parser.add_argument('-l','--load',type=str,default='',help="load Model to continue training")
args = parser.parse_args()
# Set Parsed Parameters into configs
configs.devices = [f"/device:GPU:{i}" for i in range(args.num_gpu)]
configs.start_epoch = args.start_epoch
configs.end_epoch = args.end_epoch
configs.batch_size = args.batch_size
configs.n_samples = args.num_samples
os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(args.show_warnings)
console.print(f" Here are your `configs` : {configs}")
# Make it Parallel 🥳
strategy = tf.distribute.MirroredStrategy(configs.devices)
return strategy,args
if __name__ == '__main__':
console = Console()
strategy , args = console_handler(console)
title = Markdown(f"# Start training ON {len(configs.devices)} GPU's", style=configs.info_style)
console.print(title)
# Build the model
with strategy.scope():
dgm = DGMV5()
# Check if we wanna load an existing model and continue train
if args.load != '':
model = keras.models.load_model(args.load)
else:
model = dgm.build_model()
dgm.model = model
dgm.summary()
# Get Trainer
trainer = Trainer(dgm)
# Train
history = trainer.train()
|
import abc
from .logic import Predicate, Object, Action, State
###
# Objects
###
class Robot(Object):
def __init__(self, name):
super().__init__(name, 'Robot')
class Box(Object):
def __init__(self, name):
super().__init__(name, 'Box')
class Area(Object):
def __init__(self, name):
super().__init__(name, 'Area')
###
# Predicates
###
class In(Predicate):
def __init__(self, box, area, value=True):
super().__init__('In', [box, area], value)
def __str__(self):
if self.value:
return "{0} IN {1}".format(self.args[0], self.args[1])
else:
return "{0} NOT IN {1}".format(self.args[0], self.args[1])
class Holding(Predicate):
def __init__(self, robot, value=None):
super().__init__('Holding', [robot], value)
def __str__(self):
return "{0} is HOLDING {1}".format(self.args[0], self.value)
class Free(Predicate):
def __init__(self, robot, value=True):
super().__init__('Holding', [robot], value)
class Reachable(Predicate):
def __init__(self, robot, pose, value=True):
if not (isinstance(pose, Box) or isinstance(pose, Area)):
raise Exception(F"Invalid argument type {pose}, expected Box or Area")
super().__init__('Reachable', [robot, pose], value)
###
# Actions
###
class Pick(Action):
def __init__(self, robot, box):
self.robot = robot
self.box = box
super().__init__("Pick")
def apply(self, state):
if not self.applicable(state):
raise Exception('Action is not applicable in this state')
new_state = state.copy()
new_state.remove(Holding(self.robot, None))
new_state.remove(Reachable(self.robot, self.box))
new_state.set(Holding(self.robot, self.box))
return new_state
def applicable(self, state):
return state.check(Holding(self.robot, None)) and\
state.check(Reachable(self.robot, self.box, True))
def __str__(self):
return "{0} Pick {1}".format(self.robot, self.box)
class Place(Action):
def __init__(self, robot, box, area):
self.robot = robot
self.box = box
self.area = area
super().__init__("Place")
def apply(self, state):
if not self.applicable(state):
raise Exception('Action is not applicable in this state')
new_state = state.copy()
new_state.remove(Holding(self.robot, self.box))
new_state.set(Holding(self.robot, None))
new_state.set(In(self.box, self.area))
return new_state
def applicable(self, state):
return state.check(Holding(self.robot, self.box)) and\
state.check(Reachable(self.robot, self.area, True))
def __str__(self):
return '{0} Place {1} in {2}'.format(self.robot, self.box, self.area)
class Domain(object):
def __init__(self):
self._map = {}
def add_object(self, obj):
if obj.name in self._map:
raise Exception("Object name must be unique")
self._map[obj.name] = obj
def get_object_by_name(self, name):
return self._map[name]
def __getitem__(self, key):
return self.get_object_by_name(key)
@abc.abstractmethod
def get_applicable_actions(self, state):
raise NotImplementedError()
class PickAndPlaceDomain(Domain):
def __init__(self, workspace):
super().__init__()
self.robot = Robot("arm")
self.boxes = []
self.areas = []
self.workspace_map = {}
self.workspace_map[self.robot.name] = workspace.arm
for box in workspace.boxes:
self.add_box(box, box.name)
for area in workspace.areas:
self.add_area(area, area.name)
def add_area(self, obj, name=None):
""" Add an area to planning domain. obj is a refrence to the workspace instance """
if name is None:
name = "area{0}".format(len(self.areas))
area = Area(name)
self.areas.append(area)
self.workspace_map[name] = obj
self.add_object(area)
return area
def add_box(self, obj, name=None):
""" Add a box to planning domain. obj is a refrence to the workspace instance """
if name is None:
name = "box{0}".format(len(self.boxes))
box = Box(name)
self.boxes.append(box)
self.workspace_map[name] = obj
self.add_object(box)
return box
def get_workspace_object(self, name):
return self.workspace_map[name]
def get_applicable_actions(self, state):
actions = []
holding = state.find(Holding, self.robot)
if holding is None:
for box in self.boxes:
if state.check(Reachable(self.robot, box)):
actions.append(Pick(self.robot, box))
else:
for area in self.areas:
if state.check(Reachable(self.robot, area)):
actions.append(Place(self.robot, holding, area))
return actions |
<filename>MCP2221/MCP2221.py<gh_stars>0
import hid
from time import sleep
from enum import Enum, unique, auto
from typing import Dict, List, Union
class TYPE(Enum):
INPUT = auto()
OUTPUT = auto()
ADC = auto()
DAC = auto()
CLOCK_OUT = auto()
INTERRUPT = auto()
LED_RX = auto()
LED_TX = auto()
LED_I2C = auto()
SSPND = auto()
USBCFG = auto()
@unique
class VRM(Enum):
VDD = 0
REF_1_024V = 1
REF_2_048V = 2
REF_4_096V = 3
@unique
class DUTY(Enum):
CYCLE_0 = 0
CYCLE_25 = 1
CYCLE_50 = 2
CYCLE_75 = 3
@unique
class CLOCK(Enum):
DIV_375KHZ = 0b111
DIV_750KHZ = 0b110
DIV_1_5MHZ = 0b101
DIV_3MHZ = 0b100
DIV_6MHZ = 0b011
DIV_12MHZ = 0b010
DIV_24MHZ = 0b001
@unique
class FLASH(Enum):
CHIP_SETTING = 0x00
GP_SETTING = 0x01
USB_MANUFACTURER = 0x02
USB_PRODUCT_DESCRIPTOR = 0x03
USB_SERIAL_NUMBER = 0x04
CHIP_SERIAL_NUMBER = 0x05
class MCP2221:
def __init__(self, VID=0x04D8, PID=0x00DD, dev=0):
self.mcp2221 = hid.device()
self.mcp2221.open_path(hid.enumerate(VID, PID)[dev]["path"])
self.VID = VID
self.PID = PID
def _getConfig(self):
""" Get current config & prepare for set """
buf = [0] * 65
buf[1] = 0x61 # get SRAM settings
rbuf = self._send(buf)
# print(rbuf)
buf[0 + 1] = 0x60 # set SRAM settings
# Clock Output Divider Value
buf[2 + 1] |= (rbuf[5] & 0b11111)
# DAC Voltage Reference
buf[3 + 1] |= rbuf[6] >> 5
# ADC Voltage Reference
buf[5 + 1] |= (rbuf[7] >> 2) & 0b111
# Interrupt detection
# TODO
# GP0 Settings
buf[8 + 1] = rbuf[22]
# GP1 Settings
buf[9 + 1] = rbuf[23]
# GP2 Settings
buf[10 + 1] = rbuf[24]
# GP3 Settings
buf[11 + 1] = rbuf[25]
return buf
def _send(self, buffer: List[int]) -> List[int]:
""" Send buffer """
self.mcp2221.write(buffer)
return self.mcp2221.read(65)
def SetClockOutput(self, duty: DUTY, clock: CLOCK):
""" Set clock output """
if not isinstance(duty, DUTY):
raise TypeError("Invalid duty cycle value")
if not isinstance(clock, CLOCK):
raise TypeError("Invalid clock divider value")
buf = self._getConfig()
buf[2 + 1] = 0b10000000 # set mode
buf[2 + 1] |= clock.value
buf[2 + 1] |= (duty.value << 3)
self._send(buf)
def SetDACVoltageReference(self, ref: VRM):
""" Set DAC voltage reference """
if not isinstance(ref, VRM):
raise TypeError("Invalid DAC voltage reference")
buf = self._getConfig()
buf[3 + 1] = 0b10000000 # set mode
if ref != VRM.VDD:
buf[3 + 1] |= ref.value << 1
buf[3 + 1] |= 0b1
self._send(buf)
def WriteDAC(self, value: int):
""" Write DAC value (0-31) """
if not 0 <= value <= 31:
raise ValueError("Invalid value")
buf = self._getConfig()
buf[4 + 1] |= 0b10000000 # set mode
buf[4 + 1] |= value
self._send(buf)
def SetADCVoltageReference(self, ref: VRM):
""" Set ADC voltage reference """
if not isinstance(ref, VRM):
raise TypeError("Invalid ADC voltage reference")
buf = self._getConfig()
buf[5 + 1] = 0b10000000 # set mode
if ref != VRM.VDD:
buf[5 + 1] |= ref.value << 1
buf[5 + 1] |= 0b1 # VRM is used
self._send(buf)
def SetInterruptDetection(self):
# TODO
raise Exception("Not yet implemented")
def InitGP(self, pin: int, type: TYPE, value: int = 0):
""" Init GPIO """
buf = self._getConfig()
buf[7 + 1] = 0b10000000 # alter GPIO
if pin == 0:
buf[8 + 1] = 0
if type == TYPE.INPUT:
buf[8 + 1] |= 1 << 3
elif type == TYPE.OUTPUT:
buf[8 + 1] |= (value & 1) << 4
elif type == TYPE.SSPND:
buf[8 + 1] |= 1
elif type == TYPE.LED_RX:
buf[8 + 1] |= 2
else:
raise TypeError("Invalid type on pin GP0")
elif pin == 1:
buf[9 + 1] = 0
if type == TYPE.INPUT:
buf[9 + 1] |= 1 << 3
elif type == TYPE.OUTPUT:
buf[9 + 1] |= (value & 1) << 4
elif type == TYPE.CLOCK_OUT:
buf[9 + 1] |= 1
elif type == TYPE.ADC:
buf[9 + 1] |= 2
elif type == TYPE.LED_TX:
buf[9 + 1] |= 3
elif type == TYPE.INTERRUPT:
buf[9 + 1] |= 4
else:
raise TypeError("Invalid type on pin GP1")
elif pin == 2:
buf[10 + 1] = 0
if type == TYPE.INPUT:
buf[10 + 1] |= 1 << 3
elif type == TYPE.OUTPUT:
buf[10 + 1] |= (value & 1) << 4
elif type == TYPE.USBCFG:
buf[10 + 1] |= 1
elif type == TYPE.ADC:
buf[10 + 1] |= 2
elif type == TYPE.DAC:
buf[10 + 1] |= 3
else:
raise TypeError("Invalid type on pin GP2")
elif pin == 3:
buf[11 + 1] = 0
if type == TYPE.INPUT:
buf[11 + 1] |= 1 << 3
elif type == TYPE.OUTPUT:
buf[11 + 1] |= (value & 1) << 4
elif type == TYPE.LED_I2C:
buf[11 + 1] |= 1
elif type == TYPE.ADC:
buf[11 + 1] |= 2
elif type == TYPE.DAC:
buf[11 + 1] |= 3
else:
raise TypeError("Invalid type on pin GP3")
else:
raise ValueError("Invalid pin number")
self._send(buf)
def ReadAllGP(self):
""" Read GPIOs in bulk (when set as input or output) """
buf = [0] * 65
buf[1] = 0x51 # Get GPIO Values
buf = self._send(buf)
if buf[0] == 0x51 and buf[1] == 0x00:
return [buf[2], buf[4], buf[6], buf[8]]
else:
return None
def ReadGP(self, pin: int) -> Union[int, None]:
""" Read GPIO pin value (when set as input or output) """
if not 0 <= pin <= 3:
raise ValueError("Invalid pin number")
gpio = self.ReadAllGP()
if gpio:
return gpio[pin]
else:
return None
def WriteAllGP(self, gp0: Union[int, None], gp1: Union[int, None],
gp2: Union[int, None], gp3: Union[int, None]):
""" Write GPIO output """
buf = [0] * 65
buf[0 + 1] = 0x50 # Set GPIO Values
if gp0 is not None:
buf[2 + 1] = 1 # Alter GPIO output
buf[3 + 1] = gp0 # output value
if gp1 is not None:
buf[6 + 1] = 1 # Alter GPIO output
buf[7 + 1] = gp1 # output value
if gp2 is not None:
buf[10 + 1] = 1 # Alter GPIO output
buf[11 + 1] = gp2 # output value
if gp3 is not None:
buf[14 + 1] = 1 # Alter GPIO output
buf[15 + 1] = gp3 # output value
self._send(buf)
def WriteGP(self, pin: int, value: int):
""" Write GPIO output """
if not 0 <= pin <= 3:
raise ValueError("Invalid pin number")
buf = [0] * 65
buf[0 + 1] = 0x50 # Set GPIO Values
buf[2 + pin * 4 + 1] = 1 # Alter GPIO output
buf[3 + pin * 4 + 1] = value & 1 # output value
self._send(buf)
def ReadAllADC(self):
""" Read ADC in bulk """
buf = [0] * 65
buf[0 + 1] = 0x10 # Status/Set Parameters
buf = self._send(buf)
if buf[0] == 0x10 and buf[1] == 0x00:
return [buf[50] | (buf[51] << 8),
buf[52] | (buf[53] << 8),
buf[54] | (buf[55] << 8)]
else:
return None
def ReadADC(self, channel: int) -> Union[int, None]:
""" Read specific ADC channel """
if not 1 <= channel <= 3:
raise ValueError("Invalid channel number")
adc = self.ReadAllADC()
if adc:
return adc[channel - 1]
else:
return None
def GetDeviceInfo(self) -> Dict[str, Union[str, None]]:
""" Get device information """
output = dict()
output["manufacturer"] = self.mcp2221.get_manufacturer_string()
output["product"] = self.mcp2221.get_product_string()
output["serial"] = self.mcp2221.get_serial_number_string()
return output
def ReadFlash(self, address: FLASH):
""" Read data from flash """
if not isinstance(address, FLASH):
raise TypeError("Invalid flash address")
buf = [0] * 65
buf[0 + 1] = 0xB0 # Read Flash Data
buf[1 + 1] = address.value
buf = self._send(buf)
if buf[0] == 0xB0 and buf[1] == 0x00:
if address == FLASH.GP_SETTING or \
address == FLASH.CHIP_SETTING or \
address == FLASH.CHIP_SERIAL_NUMBER:
return buf[4:(4+buf[2])]
elif buf[3] == 0x03:
return buf[4:(4+buf[2]-2)]
else:
return []
else:
return []
def WriteFlash(self, address: FLASH, data: List[int]) -> Union[int, None]:
""" Write data to flash """
if not isinstance(address, FLASH) \
or address == FLASH.CHIP_SERIAL_NUMBER:
raise TypeError("Invalid flash address")
if len(data) == 0 or len(data) > 60:
raise ValueError("Invalid data length")
buf = [0] * 3
buf[0 + 1] = 0xB1 # Write Flash Data
buf[1 + 1] = address.value
fill = [0] * (65 - 3 - len(data))
buf = self._send([*buf, *data, *fill])
if buf[0] == 0xB1:
return buf[1]
else:
return None
def Reset(self):
""" Reset the device """
buf = [0x00, 0x70, 0xAB, 0xCD, 0xEF]
fill = [0] * 60
self.mcp2221.write([*buf, *fill])
sleep(1)
|
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
import copy
import json
import numpy as np
from pathlib import Path
import pytest
from idaes.core.ui.flowsheet import FlowsheetSerializer, FlowsheetDiff, validate_flowsheet
from idaes.models.properties.swco2 import SWCO2ParameterBlock
from idaes.models.unit_models import Heater, PressureChanger, HeatExchanger
from idaes.models.unit_models.pressure_changer import ThermodynamicAssumption
from pyomo.environ import Expression, TransformationFactory, ConcreteModel
from pyomo.network import Arc
from idaes.core import FlowsheetBlock
from idaes.models.properties.activity_coeff_models.BTX_activity_coeff_VLE import (
BTXParameterBlock,
)
from idaes.models.unit_models import Flash, Mixer
from .shared import dict_diff
# === Sample data ===
test_dir = Path(__file__).parent
base_model = {
"model": {
"id": "Model1",
"unit_models": {},
"arcs": {}
},
"cells": {},
"routing_config": {}
}
@pytest.fixture
def models():
# Build a series of models where each has one more component than
# the last, and the arcs connect the components in a loop
models = {}
unit_types = "mixer", "heater", "stoichiometric_reactor"
for n in range(1, len(unit_types) + 1):
model = copy.deepcopy(base_model)
m = model["model"]
m["id"] = f"Model{n}"
m["unit_models"] = {}
for unit_num in range(n):
m["unit_models"][f"U{unit_num}"] = {
"type": unit_types[unit_num],
"image": unit_types[unit_num] + ".svg",
}
m["arcs"] = {}
if n > 1:
for arc_num in range(n):
unit_num = arc_num
m["arcs"][f"A{arc_num}"] = {
"source": f"U{unit_num}",
"dest": f"U{(unit_num + 1) % n}",
"label": f"stream {arc_num}",
}
# add minimal cells for each unit model and arc
c = model["cells"] = []
for key, value in m["unit_models"].items():
c.append(
{
"id": key,
"attrs": {
"image": {"xlinkHref": "image.svg"},
"root": {"title": "TITLE"},
},
}
)
for key, value in m["arcs"].items():
c.append(
{
"id": key,
"source": {"id": value["source"]},
"target": {"id": value["dest"]},
"labels": [{"attrs": {"text": {"text": "LABEL"}}}],
}
)
# done
models[n] = model
return models
@pytest.fixture(scope="module")
def demo_flowsheet():
"""Semi-complicated demonstration flowsheet."""
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.BT_props = BTXParameterBlock()
m.fs.M01 = Mixer(default={"property_package": m.fs.BT_props})
m.fs.H02 = Heater(default={"property_package": m.fs.BT_props})
m.fs.F03 = Flash(default={"property_package": m.fs.BT_props})
m.fs.s01 = Arc(source=m.fs.M01.outlet, destination=m.fs.H02.inlet)
m.fs.s02 = Arc(source=m.fs.H02.outlet, destination=m.fs.F03.inlet)
TransformationFactory("network.expand_arcs").apply_to(m.fs)
m.fs.properties = SWCO2ParameterBlock()
m.fs.main_compressor = PressureChanger(
default={
"dynamic": False,
"property_package": m.fs.properties,
"compressor": True,
"thermodynamic_assumption": ThermodynamicAssumption.isentropic,
}
)
m.fs.bypass_compressor = PressureChanger(
default={
"dynamic": False,
"property_package": m.fs.properties,
"compressor": True,
"thermodynamic_assumption": ThermodynamicAssumption.isentropic,
}
)
m.fs.turbine = PressureChanger(
default={
"dynamic": False,
"property_package": m.fs.properties,
"compressor": False,
"thermodynamic_assumption": ThermodynamicAssumption.isentropic,
}
)
m.fs.boiler = Heater(
default={
"dynamic": False,
"property_package": m.fs.properties,
"has_pressure_change": True,
}
)
m.fs.FG_cooler = Heater(
default={
"dynamic": False,
"property_package": m.fs.properties,
"has_pressure_change": True,
}
)
m.fs.pre_boiler = Heater(
default={
"dynamic": False,
"property_package": m.fs.properties,
"has_pressure_change": False,
}
)
m.fs.HTR_pseudo_tube = Heater(
default={
"dynamic": False,
"property_package": m.fs.properties,
"has_pressure_change": True,
}
)
m.fs.LTR_pseudo_tube = Heater(
default={
"dynamic": False,
"property_package": m.fs.properties,
"has_pressure_change": True,
}
)
return m.fs
@pytest.fixture(scope="module")
def flash_flowsheet():
# Model and flowsheet
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
# Flash properties
m.fs.properties = BTXParameterBlock(
default={
"valid_phase": ("Liq", "Vap"),
"activity_coeff_model": "Ideal",
"state_vars": "FTPz",
}
)
# Flash unit
m.fs.flash = Flash(default={"property_package": m.fs.properties})
# TODO: move this to fix(np.NINF, skip_validation=True) once
# Pyomo#2180 is merged
m.fs.flash.inlet.flow_mol[:].set_value(np.NINF, True)
m.fs.flash.inlet.flow_mol.fix()
m.fs.flash.inlet.temperature.fix(np.inf)
m.fs.flash.inlet.pressure[:].set_value(np.nan, True)
m.fs.flash.inlet.pressure.fix()
m.fs.flash.inlet.mole_frac_comp[0, "benzene"].fix(0.5)
m.fs.flash.inlet.mole_frac_comp[0, "toluene"].fix(0.5)
m.fs.flash.heat_duty.fix(0)
m.fs.flash.deltaP.fix(0)
return m.fs
@pytest.fixture(scope="module")
def demo_flowsheet_json():
json_file = test_dir / "demo_flowsheet.json"
s = json_file.open().read()
return s
@pytest.fixture(scope="module")
def flash_flowsheet_json():
json_file = test_dir / "flash_flowsheet.json"
s = json_file.open().read()
return s
@pytest.fixture(scope="module")
def serialized_boiler_flowsheet_json():
json_file = test_dir / "serialized_boiler_flowsheet.json"
s = json_file.open().read()
return s
# === Tests ===
@pytest.mark.unit
def test_merge(models):
"""Test the FlowsheetDiff output from the .merge() function."""
num_models = len(models)
# With N models, in increasing complexity, test the results of merging
# each with with the next, including the last with the first.
for i in range(num_models):
next_i = (i + 1) % num_models
old, new = models[i + 1], models[next_i + 1]
merged = FlowsheetDiff(old, new).merged(do_copy=bool(i % 2))
assert merged["model"] == new["model"]
sources, dests, units = [], [], []
for item in merged["cells"]:
id_ = item["id"]
if "source" in item: # arc
sources.append(item["source"])
dests.append(item["target"])
else: # unit model
units.append(id_)
# Each unit ID will show up exactly once in each of these sets, except
# when we wrap around to the start where there are no arcs
expect_unit_ids = sorted([f"U{n}" for n in range(0, next_i + 1)])
assert expect_unit_ids == sorted(units)
if next_i == 0:
assert sources == []
assert dests == []
else:
assert expect_unit_ids == sorted([x["id"] for x in sources])
assert expect_unit_ids == sorted([x["id"] for x in dests])
# Test the results of merging each with a changed version of itself
for i in range(1, num_models + 1):
old, new = models[i], copy.deepcopy(models[i])
m = new["model"]
for key in m["unit_models"]:
m["unit_models"][key]["image"] = "changed.svg"
for key in m["arcs"]:
m["arcs"][key]["label"] = "changed"
merged = FlowsheetDiff(old, new).merged()
assert merged["model"] == new["model"]
for cell in merged["cells"]:
if "source" in cell:
# see if label was copied into layout
assert cell["labels"][0]["attrs"]["text"]["text"] == "changed"
else:
assert cell["attrs"]["image"]["xlinkHref"] == "changed.svg"
@pytest.mark.unit
def test_validate_flowsheet(models):
# these have a type error since they are not iterable at all
pytest.raises(TypeError, validate_flowsheet, None)
pytest.raises(TypeError, validate_flowsheet, 123)
# these are missing the top-level keys (but are sort of iterable, so no type error)
assert validate_flowsheet("hello")[0] is False
assert validate_flowsheet([])[0] is False
# empty one fails
assert validate_flowsheet({})[0] is False
# the minimal ones we manually constructed will pass
for model in models.values():
assert validate_flowsheet(model)[0]
# now try tweaks on the minimal ones
m = models[2]["model"]
# remove image
image = m["unit_models"]["U1"]["image"]
del m["unit_models"]["U1"]["image"]
assert validate_flowsheet(m)[0] is False
m["unit_models"]["U1"]["image"] = image # restore it
# mess up a unit model ID
m["unit_models"]["U-FOO"] = m["unit_models"]["U1"]
del m["unit_models"]["U1"]
assert validate_flowsheet(m)[0] is False
m["unit_models"]["U1"] = m["unit_models"]["U-FOO"]
del m["unit_models"]["U-FOO"]
# mess up an arc ID
m["arcs"]["A-FOO"] = m["arcs"]["A1"]
del m["arcs"]["A1"]
assert validate_flowsheet(m)[0] is False
m["arcs"]["A1"] = m["arcs"]["A-FOO"]
del m["arcs"]["A-FOO"]
def _canonicalize(d):
for cell in d["cells"]:
if "ports" in cell:
items = cell["ports"]["items"]
cell["ports"]["items"] = sorted(items, key=lambda x: x["id"])
@pytest.mark.component
def test_flowsheet_serializer_demo(demo_flowsheet, demo_flowsheet_json):
"""Simple regression test vs. stored data."""
test_dict = FlowsheetSerializer(demo_flowsheet, "demo").as_dict()
stored_dict = json.loads(demo_flowsheet_json)
_canonicalize(test_dict)
_canonicalize(stored_dict)
assert json.dumps(test_dict, sort_keys=True) == json.dumps(
stored_dict, sort_keys=True
)
@pytest.mark.component
def test_boiler_demo(serialized_boiler_flowsheet_json):
import idaes.models_extra.power_generation.flowsheets.supercritical_power_plant.boiler_subflowsheet_build as blr
m, solver = blr.main()
test_dict = FlowsheetSerializer(m.fs, "boiler").as_dict()
stored_dict = json.loads(serialized_boiler_flowsheet_json)
_canonicalize(test_dict)
_canonicalize(stored_dict)
test_json = json.dumps(test_dict, sort_keys=True)
stored_json = json.dumps(stored_dict, sort_keys=True)
if test_json != stored_json:
report_failure(test_dict, stored_dict)
pytest.fail("Serialized flowsheet does not match expected")
@pytest.mark.unit
def test_flowsheet_serializer_flash(flash_flowsheet, flash_flowsheet_json):
"""Simple regression test vs. stored data."""
test_dict = FlowsheetSerializer(flash_flowsheet, "demo").as_dict()
stored_dict = json.loads(flash_flowsheet_json)
_canonicalize(test_dict)
_canonicalize(stored_dict)
test_json = json.dumps(test_dict, sort_keys=True)
stored_json = json.dumps(stored_dict, sort_keys=True)
if test_json != stored_json:
report_failure(test_dict, stored_dict)
pytest.fail("Serialized flowsheet does not match expected")
def report_failure(test_dict, stored_dict):
test_json, stored_json = (json.dumps(d, indent=2) for d in (test_dict, stored_dict))
diff = dict_diff(test_dict, stored_dict)
print("Diff between generated dict and expected dict:")
print(diff)
# print("---")
# print(f"Generated data (JSON):\n{test_json}")
# print("---")
# print(f"Expected data (JSON):\n{stored_json}")
def _show_json(test=None, stored=None):
import sys
print("-" * 60)
print("TEST VALUE")
json.dump(test, sys.stdout)
print()
print("-" * 60)
print("STORED VALUE")
json.dump(stored, sys.stdout)
@pytest.mark.unit
def test_flowsheet_serializer_invalid():
m = ConcreteModel()
pytest.raises(ValueError, FlowsheetSerializer, m, "bad")
@pytest.mark.unit
def test_flowsheet_serializer_get_unit_model_type():
from idaes.core import MaterialBalanceType
from idaes.models.unit_models.pressure_changer import (
ThermodynamicAssumption,
)
from idaes.models.unit_models.heat_exchanger import (
delta_temperature_underwood_callback,
)
from idaes.models.properties import iapws95
from pyomo.environ import Set
# flowsheet
m = ConcreteModel(name="My Model")
m.fs = FlowsheetBlock(default={"dynamic": False})
m.fs.prop_water = iapws95.Iapws95ParameterBlock(
default={"phase_presentation": iapws95.PhaseType.LG}
)
# add & test scalar unit model
m.fs.cond_pump = PressureChanger(
default={
"property_package": m.fs.prop_water,
"material_balance_type": MaterialBalanceType.componentTotal,
"thermodynamic_assumption": ThermodynamicAssumption.pump,
}
)
unit_type = FlowsheetSerializer.get_unit_model_type(m.fs.cond_pump)
assert unit_type == "pressure_changer"
# add & test indexed unit model
m.set_fwh = Set(initialize=[1, 2, 3, 4, 6, 7, 8])
m.fs.fwh = HeatExchanger(
m.set_fwh,
default={
"delta_temperature_callback": delta_temperature_underwood_callback,
"shell": {
"property_package": m.fs.prop_water,
"material_balance_type": MaterialBalanceType.componentTotal,
"has_pressure_change": True,
},
"tube": {
"property_package": m.fs.prop_water,
"material_balance_type": MaterialBalanceType.componentTotal,
"has_pressure_change": True,
},
}
)
unit_type = FlowsheetSerializer.get_unit_model_type(m.fs.fwh)
assert unit_type == "heat_exchanger"
|
<reponame>ankush07d/iac_electricals_v13
# Copyright (c) 2021, IAC Electricals and contributors
# For license information, please see license.txt
import frappe
from frappe import _
from frappe.model.document import Document
from frappe.model.mapper import get_mapped_doc
class PriceSchedule(Document):
def validate(self):
for i in self.items:
if i.freight_charges_type == "Percent" or i.freight_charges_type == "Amount":
if i.freight_charges == 0 or i.freight_charges == None:
frappe.throw("Please Enter First Freight Charges for row "+ str(i.idx)+" in Item Table")
# if i.freight_charges != 0 or i.freight_charges != None:
# print("########################")
# if not i.freight_charges_type:
# frappe.throw("Please Enter First Freight Charges type.........................for row "+ str(i.idx))
if i.freight_charges_type_ == "Percent" or i.freight_charges_type_ == "Amount":
if i.freight_charges_ == 0 or i.freight_charges_ == None:
frappe.throw("Please Enter Secound Freight Charges for row "+ str(i.idx)+" in Item Table")
# if i.freight_charges_ != 0 or i.freight_charges_ != None:
# if not i.freight_charges_type_:
# frappe.throw("Please Enter Secound Freight Charges type.........................for row "+ str(i.idx))
@frappe.whitelist()
def address_query(name):
if name:
address_lists = frappe.db.get_all("Address",{'name':name},["name","address_line1","address_line2","city","state","country","pincode"])
if address_lists:
return address_lists
@frappe.whitelist()
def contact_query(name):
if name:
contact_info = {
'mobile_no' :'',
'email' :'',
'first_name':'',
'middle_name':'',
'last_name':''
}
contact_doc = frappe.get_doc("Contact",name)
if contact_doc:
contact_info['first_name'] = contact_doc.get('first_name')
contact_info['middle_name'] = contact_doc.get('middle_name')
contact_info['last_name'] = contact_doc.get('last_name')
for phone in contact_doc.phone_nos:
if phone.get('is_primary_mobile_no'):
contact_info['mobile_no'] = phone.get('phone')
for email in contact_doc.email_ids:
if email.get('is_primary'):
contact_info['email'] = email.get('email_id')
return contact_info
@frappe.whitelist()
def fetch_address_contact_name(name):
if name:
address_contact_name = {
'address_name' :'',
'contact_name' :''
}
add_name = frappe.get_all('Dynamic Link', filters={'link_doctype': 'Customer', 'link_name': name, 'parenttype': 'Address'}, fields=['parent'])
con_name = frappe.get_all('Dynamic Link', filters={'link_doctype': 'Customer', 'link_name': name, 'parenttype': 'Contact'}, fields=['parent'])
if add_name:
address_contact_name['address_name'] = add_name[0].get('parent')
if con_name:
address_contact_name['contact_name'] = con_name[0].get('parent')
return address_contact_name
@frappe.whitelist()
def number_to_word(amount):
def get_word(n):
words={ 0:"", 1:"One", 2:"Two", 3:"Three", 4:"Four", 5:"Five", 6:"Six", 7:"Seven", 8:"Eight", 9:"Nine", 10:"Ten", 11:"Eleven", 12:"Twelve", 13:"Thirteen", 14:"Fourteen", 15:"Fifteen", 16:"Sixteen", 17:"Seventeen", 18:"Eighteen", 19:"Nineteen", 20:"Twenty", 30:"Thirty", 40:"Forty", 50:"Fifty", 60:"Sixty", 70:"Seventy", 80:"Eighty", 90:"Ninty" }
if n<=20:
return words[n]
else:
ones=n%10
tens=n-ones
return words[tens]+" "+words[ones]
def get_all_word(n):
d=[100,10,100,100]
v=["","Hundred And","Thousand","lakh"]
w=[]
for i,x in zip(d,v):
t=get_word(n%i)
if t!="":
t+=" "+x
w.append(t.rstrip(" "))
n=n//i
w.reverse()
w=' '.join(w).strip()
if w.endswith("And"):
w=w[:-3]
return w
arr=str(amount).split(".")
amount=int(arr[0])
crore=amount//10000000
amount=amount%10000000
word=""
if crore>0:
word+=get_all_word(crore)
word+=" crore "
word+=get_all_word(amount).strip()+" only."
if len(arr)>1:
if len(arr[1])==1:
arr[1]+="0"
word+=" and "+get_all_word(int(arr[1]))+" paisa"
return word
@frappe.whitelist()
def calculate_taxes(tax_temlet_name,total_amount,unit_price_1_total_amount):
try:
tax_items = []
tx_calculation = 0.0
total_tax_amount =0.0
tax_details = frappe.get_doc("Sales Taxes and Charges Template", tax_temlet_name).taxes
for taxes in tax_details:
tx_calculation = float(total_amount)/100*taxes.rate
if taxes.idx == 1:
total_tax_amount =float(total_amount) + tx_calculation
else:
total_tax_amount = total_tax_amount + tx_calculation
unit_price_1_tx_calculation = float(unit_price_1_total_amount)/100*taxes.rate
if taxes.idx == 1:
unit_price_1_total_tax_amount =float(unit_price_1_total_amount) + unit_price_1_tx_calculation
else:
unit_price_1_total_tax_amount = unit_price_1_total_tax_amount + unit_price_1_tx_calculation
temp = {
'charge_type' : taxes.charge_type,
'account_head' : taxes.account_head,
'description' : taxes.description,
'rate' : taxes.rate,
'unit_price_2_tax_amount' : tx_calculation,
'unit_price_2_total':total_tax_amount,
'unit_price_1_tax_amount' : unit_price_1_tx_calculation,
'unit_price_1_total':unit_price_1_total_tax_amount
}
tax_items.append(temp)
return tax_items
except Exception as e:
raise e
@frappe.whitelist()
def make_blanket_order(source_name, target_doc=None, ignore_permissions=False):
frappe.log_error(frappe.get_traceback(), _("Blanket order Button clicked....(Error_log)"))
doclist = get_mapped_doc("Price Schedule", source_name, {
"Price Schedule": {
"doctype": "Blanket Order",
"field_map": {
"name": "Price Schedule",
"name":"price_schedule_no",
"terms":"tc_name",
"term_details":"terms"
},
"validation": {
"docstatus": ["=", 1]
}
},
"Price Schedule Items": {
"doctype": "Blanket Order Item",
"field_map": {
"total_quantity": "qty"
},
},
}, target_doc)
return doclist
@frappe.whitelist()
def make_sales_order(source_name, target_doc=None, ignore_permissions=False):
def set_missing_values(source, target):
target.against_price_schedule = 1
def update_item(source_doc, target_doc, source_parent):
target_doc.against_price_schedule = 1
target_doc.price_schedule = source_name
doclist = get_mapped_doc("Price Schedule", source_name, {
"Price Schedule": {
"doctype": "Sales Order",
"field_map": {
"name":"price_schedule_no",
"sales_taxes_and_charges_template":"taxes_and_charges",
"contact_person_mobile_no":"contact_mobile",
"terms":"tc_name",
"term_details":"terms"
},
"validation": {
"docstatus": ["=", 1]
}
},
"Price Schedule Items": {
"doctype": "Sales Order Item",
"field_map": {
"total_quantity": "qty"
},
"postprocess": update_item
},
"Sales Taxes and Charges Table": {
"doctype": "Sales Taxes and Charges"
},
}, target_doc, set_missing_values)
return doclist
|
<filename>DFCV.py
"""
Provides a way to quickly validate column conversions didn't result in null across large dataframes.
Can also be used to find problem columns and rows.
Will need to specify or create a primary key column for matching before and after rows.
Use case: Converting StringType columns to TimestampType can cause null values in poorly formed data. It's not possible
to check by hand. This works as an early warning system.
Example:
$ dfcv = DataframeConversionValidator(_before_df=unmodified_df, _after_df=converted_df, _primary_key_column='pk')
---------------
Original Shape:
rows - 469221
columns - 582
Problem Shape:
rows - 1
columns - 3
Details:
['ImproperDate (1)', 'ImproperTimestamp (1)', 'BadUpdateTime (1)']
---------------
"""
from pyspark.sql import DataFrame
from pyspark.sql import functions as F
from functools import reduce
from typing import Dict, List
from collections import namedtuple
from operator import add
ColumnDifference = namedtuple('ColumnDifference', ['column_name', 'difference'])
def count_nulls(df: DataFrame) -> DataFrame:
"""
Count null values in columns
:param df: Dataframe to count
:return: Dataframe of same schema with one row, each column has the count of nulls in all rows.
"""
return df.select([F.count(F.when(F.col(c).isNull(), c)).alias(c) for c in df.columns])
class DataframeConversionValidator:
"""Class for comparing transformed dataframes to their original form.
A class for comparing two dataframes. One before a transformation takes place and one after. This provides helper
methods for finding problems when transforming Spark dataframes. Makes it quicker to discover poorly formed data,
specification errors and datetime mismatches.
Attributes:
before_df : Dataframe
Dataframe before conversion.
after_df : Dataframe
Dataframe after conversion.
nulls_before_df : Dict[str, int]
Dictionary of column names and sum of nulls in each column for `before_df`.
nulls_after_df : Dict[str, int]
Dictionary of column names and sum of nulls in each column for `after_df`.
differing_columns : List[ColumnDifference]
List of Tuple[column_name, difference]. Difference is the subtraction of `after_df` by 'before_df`.
column_names : List[str]
List of only column names from `differing_columns`.
primary_key_column : str
Column name for existing primary key column. TODO: allow multiple columns
bad_row_column_comparison : Dataframe bad row counts for each dataframe joined on primary key column.
"""
before_df: DataFrame
after_df: DataFrame
nulls_before_df: Dict[str, int]
nulls_after_df: Dict[str, int]
differing_columns: List[ColumnDifference]
column_names: List[str]
primary_key_column: str
bad_row_column_comparison: DataFrame
def __init__(self, _before_df: DataFrame, _after_df: DataFrame, _primary_key_column: str, quiet: bool = False):
"""
:param _before_df: Dataframe before conversion, includes a PK column for matching
:param _after_df: Dataframe after conversion, includes a PK column for matching
:param _primary_key_column: Columnn name used to compare matching rows between dataframes
:param quiet: True suppresses the summary information on creation.
To add a PK column before conversion do this::
df = df.withColumn('pk', F.monotonically_increasing_id())
"""
self.column_names = list(_before_df.columns)
if _primary_key_column not in self.column_names:
raise LookupError("%s not found in '_before_df'" % _primary_key_column)
self.before_df = _before_df
self.after_df = _after_df
self.nulls_before_df = count_nulls(_before_df).collect()[0].asDict()
self.nulls_after_df = count_nulls(_after_df).collect()[0].asDict()
self.differing_columns = list([ColumnDifference(column_name=colname, difference=self.nulls_after_df[colname] - self.nulls_before_df[colname]) for colname in self.column_names if self.nulls_after_df[colname] - self.nulls_before_df[colname] != 0])
self.primary_key_column = _primary_key_column
select_left = list(map(lambda x: "left." + x, self.different_row_columns()))
select_right = list(map(lambda x: "right." + x, self.different_row_columns()))
merged = self.before_df.alias("left").join(self.after_df.alias('right'), on=self.primary_key_column, how='inner')
merged = merged.withColumn('leftNulls',
reduce(add, [F.col(colname).isNull().cast('int') for colname in select_left]))
merged = merged.withColumn('rightNulls',
reduce(add, [F.col(colname).isNull().cast('int') for colname in select_right]))
self.bad_row_column_comparison = merged.where(F.col('leftNulls') != F.col('rightNulls'))\
.select([self.primary_key_column] +
select_left +
select_right +
['leftNulls', 'rightNulls'])
if not quiet:
self.summary()
def summary(self) -> None:
"""
Prints a summary of original dataframe shape, problem shape, problem columns and the difference in null counts per.
:return: None
TODO: allow redirection.
"""
column_summary = repr([f"""{column} ({difference})""" for column, difference in self.differing_columns])
print(f"""---------------
Original Shape:
rows - {self.before_df.count()}
columns - {len(self.before_df.columns)}
Problem Shape:
rows - {self.bad_row_count()}
columns - {self.bad_column_count()}
Details:
{column_summary}
---------------""")
def different_row_columns(self) -> List[str]:
"""
:return: List of column names where null counts do not match between dataframes.
"""
return list(map(lambda x: x.column_name, self.differing_columns))
def bad_row_count(self) -> int:
"""
:return: Count of rows where null counts do not match between dataframes.
"""
return self.bad_row_column_comparison.count()
def bad_column_count(self) -> int:
"""
:return: Count of columns where null counts do not match between dataframes.
"""
return len(self.different_row_columns())
def original_problem_rows(self, full_row=False) -> DataFrame:
"""
:param full_row: True if you want to see all the original columns. Defaults to just problem columns.
:return: Dataframe of rows from original dataframe where counts do not match between dataframes.
"""
return self._get_dataframe_by_pk(df=self.before_df, pks=self._get_pks_of_bad_rows(), full_row=full_row)
def converted_problem_rows(self, full_row=False) -> DataFrame:
"""
:param full_row: True if you want to see all the after columns. Defaults to just problem columns.
:return: Dataframe of rows from after dataframe where counts do not match between dataframes.
"""
return self._get_dataframe_by_pk(df=self.after_df, pks=self._get_pks_of_bad_rows(), full_row=full_row)
def _get_dataframe_by_pk(self, df: DataFrame, pks: List, full_row=False) -> DataFrame:
"""
INTERNAL
:param df: Dataframe to select by primary key column
:param pks: List of primary key values
:param full_row: True if you want to see all columns from specified dataframe. Defaults to just problem columns.
:return: Dataframe of rows matching primary key values.
"""
if full_row:
return df.where(F.col(self.primary_key_column).isin(pks))
else:
return df.where(F.col(self.primary_key_column).isin(pks)).select([self.primary_key_column] + self.different_row_columns())
def _get_pks_of_bad_rows(self) -> List:
"""
INTERNAL
:return: List of primary key values for problem rows.
"""
return [row[self.primary_key_column] for row in self.bad_row_column_comparison.select(self.primary_key_column).collect()]
__all__ = ["DataframeConversionValidator", "count_nulls"]
|
import shapely.geometry as sg
from shapely.geometry import box, MultiLineString, Point, MultiPoint, Polygon, MultiPolygon, LineString
import shapely.affinity as sa
import shapely.ops as so
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import geopandas
import vpype_cli
from typing import List, Generic
from genpen import genpen as gp, utils as utils
from scipy import stats as ss
from tqdm import tqdm
import genpen
class GrowerParams(object):
def __init__(
self,
n_pts_eval_per_iter=30,
n_pts_add_per_iter=1,
rads=1.,
rotations=0.,
n_corners=6,
rad_func='static_rads',
rotation_func='static_rotations',
n_corners_func='static_n_corners',
loss_range=(0, 100),
rad_range=(10, 1),
boundary_pt_dist=ss.uniform(loc=0, scale=1),
loss_func='haussdorf_from_agg',
pt_to_poly_func='buffer_pt',
halt_condition_func='return_false',
loss_threshold=1000,
):
self.n_pts_eval_per_iter = n_pts_eval_per_iter
self.n_pts_add_per_iter = n_pts_add_per_iter
self.static_rads = gp.make_callable(rads)
self.static_rotations = gp.make_callable(rotations)
self.static_n_corners = gp.make_callable(n_corners)
self.loss_range=loss_range
self.rad_range = rad_range
self.rad_func = rad_func
self.rotation_func = rotation_func
self.n_corners_func = n_corners_func
self.boundary_pt_dist = boundary_pt_dist
self.loss_func = loss_func
self.pt_to_poly_func = pt_to_poly_func
self.halt_condition_func = halt_condition_func
self.loss_threshold = loss_threshold
@property
def _rad_func(self):
return getattr(self, self.rad_func)
def get_rad(self):
return self._rad_func()
def loss_scaled_rad(self):
return np.interp(self.sketch.current_pt['loss'], self.loss_range, self.rad_range)
@property
def _loss_func(self):
return getattr(self, self.loss_func)
def haussdorf_from_agg(self, pt):
return pt.hausdorff_distance(self.sketch.agg_poly)
def negative_distance_from_target(self, pt):
return -pt.distance(self.sketch.target)
def distance_from_target(self, pt):
return pt.distance(self.sketch.target)
@property
def _halt_condition_func(self):
return getattr(self, self.halt_condition_func)
def return_false(self):
return False
def below_loss_threshold(self):
return any(self.sketch.selected_pts['loss'] < self.loss_threshold)
def above_loss_threshold(self):
return any(self.sketch.selected_pts['loss'] > self.loss_threshold)
@property
def _pt_to_poly_func(self):
return getattr(self, self.pt_to_poly_func)
def buffer_pt(self, pt):
self.sketch.current_pt = pt
new_row = pt.copy()
new_row['geometry'] = pt['geometry'].buffer(self.get_rad())
return new_row
def reg_poly(self, pt):
self.sketch.current_pt = pt
new_row = pt.copy()
new_row['geometry'] = gp.RegPolygon(
pt['geometry'],
n_corners=self.get_n_corners(),
rotation=self.get_rotation(),
radius=self.get_rad()).poly
return new_row
@property
def _rotation_func(self):
return getattr(self, self.rotation_func)
def get_rotation(self):
return self._rotation_func()
@property
def _n_corners_func(self):
return getattr(self, self.n_corners_func)
def get_n_corners(self):
return self._n_corners_func()
class Grower(object):
def __init__(
self,
poly,
params: GrowerParams,
target=None,
):
self.polys = [poly]
self.params = params
self.params.sketch = self
self.new_pts = geopandas.GeoDataFrame({
'geometry': [],
'loss': [],
})
self.target = target
self.halt_condition_satisfied = False
@property
def _p(self):
return self.params
@_p.setter
def _p(self, _p):
self._p = _p
@property
def mpoly(self):
return gp.merge_Polygons(self.polys)
@property
def agg_poly(self):
return so.unary_union(self.mpoly)
def get_random_boundary_pts(self, n_pts=1):
self.new_pts = geopandas.GeoDataFrame()
self.new_pts['geometry'] = [self.agg_poly.boundary.interpolate(d, normalized=True) for d in self._p.boundary_pt_dist.rvs(n_pts)]
def calc_pts_loss(self):
self.new_pts['loss'] = self.new_pts['geometry'].apply(self._p._loss_func)
def select_pts(self, n_selections=1):
sorted_new_pts = self.new_pts.sort_values('loss')
self.selected_pts = sorted_new_pts.iloc[:n_selections]
def selected_pts_to_polys(self):
self.new_polys = self.selected_pts.apply(self._p._pt_to_poly_func, axis=1)
def agglomerate_polys(self):
for ii, row in self.new_polys.iterrows():
diff = row['geometry'].difference(self.agg_poly).buffer(1e-6)
self.polys.append(diff)
def check_halt_condition(self):
self.halt_condition_satisfied = self._p._halt_condition_func()
def grow(self, n_iters=1):
for ii in tqdm(range(n_iters)):
self.get_random_boundary_pts(n_pts=self._p.n_pts_eval_per_iter)
self.calc_pts_loss()
self.select_pts(n_selections=self._p.n_pts_add_per_iter)
self.selected_pts_to_polys()
self.agglomerate_polys()
self.check_halt_condition()
if self.halt_condition_satisfied:
break |
<gh_stars>10-100
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from builtins import str
import copy
import datetime
import mock
import sys
from unittest import TestCase
TEST_FOLLOWERS_COUNT = 543234
TEST_FRIENDS_COUNT = 634
TEST_BODY = 'This is a tweet body test 4 u #trendthis'
TEST_ID = 'test-tweet-id'
TEST_SENTIMENT = 'test-tweet-sentiment'
TEST_GENDER = 'test-tweet-gender'
TEST_COUNTRY = 'test-tweet-country'
TEST_YEAR = '2017'
TEST_MONTH = '01'
TEST_DAY = '23'
TEST_TIME = '{}-{}-{} 12:34:56.789'.format(TEST_YEAR, TEST_MONTH, TEST_DAY)
TEST_LONGITUDE = '12.345'
TEST_LATITUDE = '98.765'
TEST_POSITION = 'pos ({} {})'.format(TEST_LONGITUDE, TEST_LATITUDE)
TEST_ROW = type('TestRow', (object,), {'MESSAGE_LOCATION': TEST_POSITION,
'MESSAGE_POSTED_TIME': TEST_TIME,
})()
TEST_TWEET = type('TestTweet', (object,), {
'USER_FOLLOWERS_COUNT': TEST_FOLLOWERS_COUNT,
'USER_FRIENDS_COUNT': TEST_FRIENDS_COUNT,
'MESSAGE_BODY': TEST_BODY,
'MESSAGE_ID': TEST_ID,
'SENTIMENT': TEST_SENTIMENT,
'USER_GENDER': TEST_GENDER,
'USER_COUNTRY': TEST_COUNTRY,
'POSTING_TIME': TEST_TIME,
})()
# Mock most libraries. We aren't testing them.
sys.modules['matplotlib'] = mock.Mock()
sys.modules['matplotlib.pyplot'] = mock.MagicMock()
sys.modules['pyspark'] = mock.Mock()
sys.modules['pyspark.sql'] = mock.MagicMock()
sys.modules['pyspark.sql.functions'] = mock.Mock()
sys.modules['pyspark.sql.types'] = mock.Mock()
sys.modules['wordcloud'] = mock.Mock()
# Mock enough pyplot for the script code to execute
import matplotlib.pyplot as plt # noqa
plt.subplots.return_value = (mock.MagicMock(), mock.MagicMock())
params = mock.Mock()
params.get_size_inches.return_value = (1, 2)
plt.gcf.return_value = params
# Mock enough NumPy for the script code to execute
import numpy as np # noqa
np.add = mock.Mock()
np.subtract = mock.Mock()
# Mock enough wordcloud for the script code to execute
import wordcloud # noqa
wordcloud.STOPWORDS = []
from . import sourcecode as nb # nb is our notebook's testable Python code
class TestDsxTwitterAutoAnalysis(TestCase):
def test_hasWord(self):
self.assertTrue(nb.hasWord('message with foo in it', 'foo'))
self.assertFalse(nb.hasWord('message without', 'foo'))
def test_checkCarMaker(self):
expected = [True, False, True, True, False, True]
actual = nb.checkCarMaker('Test with gm, ford, vw, bmw and tesla')
self.assertEqual(expected, actual)
def test_addMissingDates(self):
base = mock.Mock()
base.iterrows.return_value = []
checked = mock.MagicMock()
expected = checked.copy().sort_values()
actual = nb.addMissingDates(base, checked)
self.assertEqual(expected, actual)
def test_getLongitudeLatitude(self):
actual = nb.getLongitudeLatitude(TEST_POSITION)
self.assertEqual([TEST_LONGITUDE, TEST_LATITUDE], actual)
def test_getLongitudeLatitude_except(self):
self.assertRaises(
IndexError, nb.getLongitudeLatitude, 'bogus')
def test_getLongitude(self):
actual = nb.getLongitude(TEST_ROW)
self.assertEqual(float(TEST_LONGITUDE), actual)
def test_getLongitude_none(self):
bad_row = copy.copy(TEST_ROW)
bad_row.MESSAGE_LOCATION = None
actual = nb.getLongitude(bad_row)
self.assertIsNone(actual)
def test_getLongitude_except(self):
bad_row = copy.copy(TEST_ROW)
bad_row.MESSAGE_LOCATION = 'bogus'
actual = nb.getLongitude(bad_row)
self.assertIsNone(actual)
def test_getLatitude(self):
actual = nb.getLatitude(TEST_ROW)
self.assertEqual(float(TEST_LATITUDE), actual)
def test_getLatitude_none(self):
bad_row = copy.copy(TEST_ROW)
bad_row.MESSAGE_LOCATION = None
actual = nb.getLatitude(bad_row)
self.assertIsNone(actual)
def test_getLatitude_except(self):
bad_row = copy.copy(TEST_ROW)
bad_row.MESSAGE_LOCATION = 'bogus'
actual = nb.getLatitude(bad_row)
self.assertIsNone(actual)
def test_getDateIgnoreTime(self):
expected = datetime.datetime(
int(TEST_YEAR), int(TEST_MONTH), int(TEST_DAY),
hour=0, minute=0, second=0, microsecond=0)
actual = nb.getDateIgnoreTime(TEST_ROW)
self.assertEqual(expected, actual)
def test_getInfluence(self):
expected = (TEST_FOLLOWERS_COUNT + TEST_FRIENDS_COUNT) / 2
actual = nb.getInfluence(TEST_TWEET)
self.assertEqual(expected, actual)
def test_getAllAttributes(self):
expected = [TEST_ID, TEST_BODY, TEST_SENTIMENT, TEST_GENDER,
str(TEST_COUNTRY).upper(),
TEST_TIME,
nb.getInfluence(TEST_TWEET)]
num = nb.num_car_makers
expected.extend([False] * num)
actual = nb.getAllAttributes(TEST_TWEET)
self.assertEqual(expected, actual)
def test_getInsights_Influence_empty(self):
df = mock.MagicMock()
num = nb.num_car_makers
expected = [df.filter().select().toPandas()] * num
actual = nb.getInsights_Influence(df, nb.car_makers_name_list)
self.assertEqual(expected, actual)
self.assertEqual(num, df.__getitem__.call_count)
self.assertEqual(
[mock.call(x) for x in nb.car_makers_name_list],
df.__getitem__.call_args_list)
def test_getListForCountry_empty(self):
actual = nb.getListForCountry(mock.MagicMock(),
'PRINCIPALITY OF SEALAND')
self.assertEqual([0], actual)
|
from typing import List, Union
from dataclasses import dataclass
@dataclass
class Player:
"""
Calculate stats for a player.
:param player_index: A unique player ID.
:type player_index: str or List[str]
:example: *None*
:note: This class is intended to be used internally.
"""
def __init__(self, player_index: Union[str, List[str]]):
if type(player_index) == str:
self._player_index = [player_index]
else:
self._player_index = player_index
self._other_player_indexes = self._player_index
self._player_money_dic = {}
self._hand_dic = {}
self._card_dic = {}
self._line_dic = {}
self._moves_dic = {}
self._win_percent = {}
self._win_count = {}
self._largest_win = {}
self._largest_loss = {}
self._hand_count = {}
self._all_in = {}
self._player_name = []
self._player_merged_moves = None
def __repr__(self):
return str(self._player_name)
@property
def win_percent(self) -> dict:
"""Returns player win percent"""
return self._win_percent
@win_percent.setter
def win_percent(self, val):
self._win_percent[val[0]] = val[1]
@property
def win_count(self) -> dict:
"""Returns player win count"""
return self._win_count
@win_count.setter
def win_count(self, val):
self._win_count[val[0]] = val[1]
@property
def largest_win(self) -> dict:
"""Returns players largest win"""
return self._largest_win
@largest_win.setter
def largest_win(self, val):
self._largest_win[val[0]] = val[1]
@property
def largest_loss(self) -> dict:
"""Returns players largest loss"""
return self._largest_loss
@largest_loss.setter
def largest_loss(self, val):
self._largest_loss[val[0]] = val[1]
@property
def hand_count(self) -> dict:
"""Returns total hand count when player involved"""
return self._hand_count
@hand_count.setter
def hand_count(self, val):
self._hand_count[val[0]] = val[1]
@property
def all_in(self) -> dict:
"""Returns a dict documenting when the player went all in"""
return self._all_in
@all_in.setter
def all_in(self, val):
self._all_in[val[0]] = val[1]
@property
def player_index(self) -> List[str]:
"""Returns player index or indexes"""
return self._player_index
@player_index.setter
def player_index(self, val):
self._player_index = val
@property
def player_name(self) -> List[str]:
"""Returns player name or names"""
return self._player_name
@player_name.setter
def player_name(self, val):
self._player_name = val
@property
def player_money_info(self) -> dict:
"""Returns a dict of DataFrames documenting player buy-in and loss counts"""
return self._player_money_dic
@player_money_info.setter
def player_money_info(self, val):
self._player_money_dic[val[0]] = val[1]
@property
def hand_dic(self) -> dict:
"""Returns a dict of DataFrames documenting hands when the player won"""
return self._hand_dic
@hand_dic.setter
def hand_dic(self, val):
self._hand_dic[val[0]] = val[1]
@property
def card_dic(self) -> dict:
"""Returns a dict of DataFrames documenting card appearances"""
return self._card_dic
@card_dic.setter
def card_dic(self, val):
self._card_dic[val[0]] = val[1]
@property
def line_dic(self) -> dict:
"""Returns a dict with a list of objects where player involved"""
return self._line_dic
@line_dic.setter
def line_dic(self, val):
self._line_dic[val[0]] = val[1]
@property
def moves_dic(self) -> dict:
"""Returns a players moves on the table"""
return self._moves_dic
@moves_dic.setter
def moves_dic(self, val):
self._moves_dic[val[0]] = val[1]
@property
def merged_moves(self) -> Union[dict, None]:
"""Returns a combined dict of player moves"""
return self._player_merged_moves
@merged_moves.setter
def merged_moves(self, val):
self._player_merged_moves = val
|
<gh_stars>1-10
import torch
import numpy as np
import torch.nn as nn
# io utils
from pytorch3d.io import load_obj
# datastructures
from pytorch3d.structures import Meshes, list_to_padded
from pytorch3d.renderer.mesh.textures import TexturesVertex
# 3D transformations functions
from pytorch3d.transforms import Rotate, Translate
# rendering components
from pytorch3d.renderer import (
OpenGLPerspectiveCameras, look_at_view_transform, look_at_rotation,
RasterizationSettings, MeshRenderer, MeshRasterizer, BlendParams,
SoftSilhouetteShader, SoftPhongShader, PointLights, DirectionalLights, HardPhongShader
)
from pytorch3d.ops import sample_points_from_meshes
from CustomRenderers import *
class BatchRender:
def __init__(self, obj_paths, device, batch_size=12, faces_per_pixel=16,
render_method="silhouette", image_size=256, norm_verts=False):
self.batch_size = batch_size
self.faces_per_pixel = faces_per_pixel
self.batch_indeces = np.arange(self.batch_size)
self.obj_paths = obj_paths
self.device = device
self.method = render_method
self.image_size = image_size
self.points = None
self.norm_verts = norm_verts
# Setup batch of meshes
self.vertices, self.faces, self.textures = self.initMeshes()
# Initialize the renderer
self.renderer = self.initRender(image_size=image_size, method=self.method)
def renderBatch(self, Rs, ts, ids=[]):
if(type(Rs) is list):
batch_R = torch.tensor(np.stack(Rs), device=self.device, dtype=torch.float32)
else:
batch_R = Rs
if(type(ts) is list):
batch_T = torch.tensor(np.stack(ts), device=self.device, dtype=torch.float32) # Bx3
else:
batch_T = ts
if(len(ids) == 0):
# No ids specified, assuming one object only
ids = [0 for r in Rs]
# Load meshes based on object ids
batch_verts_rgb = list_to_padded([self.textures[i] for i in ids])
batch_textures = TexturesVertex(verts_features=batch_verts_rgb.to(self.device))
batch_verts=[self.vertices[i].to(self.device) for i in ids]
batch_faces=[self.faces[i].to(self.device) for i in ids]
mesh = Meshes(
verts=batch_verts,
faces=batch_faces,
textures=batch_textures
)
images = self.renderer(meshes_world=mesh, R=batch_R, T=batch_T)
if(self.method == "soft-silhouette"):
images = images[..., 3]
elif(self.method == "hard-silhouette"):
images = images[..., 3]
elif(self.method == "hard-phong"):
images = images[..., :3]
elif(self.method == "soft-phong"):
images = images[..., :3]
elif(self.method == "soft-depth"):
images = images #[..., 0] #torch.mean(images, dim=3)
elif(self.method == "hard-depth"):
images = images #torch.mean(images, dim=3)
elif(self.method == "blurry-depth"):
images = torch.mean(images, dim=3)
return images
def initMeshes(self):
textures = []
vertices = []
faces = []
for p in self.obj_paths:
# Load the obj and ignore the textures and materials.
verts, faces_idx, _ = load_obj(p)
facs = faces_idx.verts_idx
# Normalize vertices
# such that all objects measures 100 mm
# along the biggest dimension (x,y,z)
if(self.norm_verts):
center = verts.mean(0)
verts_normed = verts - center
scale = max(verts_normed.abs().max(0)[0])
verts = (verts / scale)*100.0
# Initialize each vertex to be white in color.
verts_rgb = torch.ones_like(verts) # (V, 3)
vertices.append(verts)
textures.append(verts_rgb)
faces.append(facs)
return vertices, faces, textures
def initRender(self, method, image_size):
cameras = OpenGLPerspectiveCameras(device=self.device, fov=15)
if(method=="soft-silhouette"):
blend_params = BlendParams(sigma=1e-7, gamma=1e-7)
raster_settings = RasterizationSettings(
image_size=image_size,
blur_radius=np.log(1. / 1e-7 - 1.) * blend_params.sigma,
faces_per_pixel=self.faces_per_pixel
)
renderer = MeshRenderer(
rasterizer=MeshRasterizer(
cameras=cameras,
raster_settings=raster_settings
),
shader=SoftSilhouetteShader(blend_params=blend_params)
)
elif(method=="hard-silhouette"):
blend_params = BlendParams(sigma=1e-7, gamma=1e-7)
raster_settings = RasterizationSettings(
image_size=image_size,
blur_radius=np.log(1. / 1e-7 - 1.) * blend_params.sigma,
faces_per_pixel=1
)
renderer = MeshRenderer(
rasterizer=MeshRasterizer(
cameras=cameras,
raster_settings=raster_settings
),
shader=SoftSilhouetteShader(blend_params=blend_params)
)
elif(method=="soft-depth"):
# Soft Rasterizer - from https://github.com/facebookresearch/pytorch3d/issues/95
#blend_params = BlendParams(sigma=1e-7, gamma=1e-7)
blend_params = BlendParams(sigma=1e-3, gamma=1e-4)
raster_settings = RasterizationSettings(
image_size=image_size,
#blur_radius= np.log(1. / 1e-7 - 1.) * blend_params.sigma,
blur_radius= np.log(1. / 1e-3 - 1.) * blend_params.sigma,
faces_per_pixel=self.faces_per_pixel
)
renderer = MeshRenderer(
rasterizer=MeshRasterizer(
cameras=cameras,
raster_settings=raster_settings
),
shader=SoftDepthShader(blend_params=blend_params)
)
elif(method=="hard-depth"):
raster_settings = RasterizationSettings(
image_size=image_size,
blur_radius= 0,
faces_per_pixel= 20
)
renderer = MeshRenderer(
rasterizer=MeshRasterizer(
cameras=cameras,
raster_settings=raster_settings
),
shader=HardDepthShader()
)
elif(method=="blurry-depth"):
# Soft Rasterizer - from https://github.com/facebookresearch/pytorch3d/issues/95
blend_params = BlendParams(sigma=1e-4, gamma=1e-4)
raster_settings = RasterizationSettings(
image_size=image_size,
blur_radius= np.log(1. / 1e-4 - 1.) * blend_params.sigma,
faces_per_pixel=self.faces_per_pixel
)
renderer = MeshRenderer(
rasterizer=MeshRasterizer(
cameras=cameras,
raster_settings=raster_settings
),
shader=SoftDepthShader(blend_params=blend_params)
)
elif(method=="soft-phong"):
blend_params = BlendParams(sigma=1e-3, gamma=1e-3)
raster_settings = RasterizationSettings(
image_size=image_size,
blur_radius= np.log(1. / 1e-3 - 1.) * blend_params.sigma,
faces_per_pixel=self.faces_per_pixel
)
# lights = DirectionalLights(device=self.device,
# ambient_color=[[0.25, 0.25, 0.25]],
# diffuse_color=[[0.6, 0.6, 0.6]],
# specular_color=[[0.15, 0.15, 0.15]],
# direction=[[0.0, 1.0, 0.0]])
lights = DirectionalLights(device=self.device,
direction=[[0.0, 1.0, 0.0]])
renderer = MeshRenderer(
rasterizer=MeshRasterizer(
cameras=cameras,
raster_settings=raster_settings
),
shader=SoftPhongShader(device=self.device,
blend_params = blend_params,
lights=lights)
)
elif(method=="hard-phong"):
blend_params = BlendParams(sigma=1e-8, gamma=1e-8)
raster_settings = RasterizationSettings(
image_size=image_size,
blur_radius=0.0,
faces_per_pixel=1
)
lights = DirectionalLights(device=self.device,
ambient_color=[[0.25, 0.25, 0.25]],
diffuse_color=[[0.6, 0.6, 0.6]],
specular_color=[[0.15, 0.15, 0.15]],
direction=[[-1.0, -1.0, 1.0]])
renderer = MeshRenderer(
rasterizer=MeshRasterizer(
cameras=cameras,
raster_settings=raster_settings
),
shader=HardPhongShader(device=self.device, lights=lights)
)
else:
print("Unknown render method!")
return None
return renderer
|
from __future__ import division
from __future__ import print_function
import time
import argparse
import numpy as np
import math
import random
import torch
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from utils import load_data, accuracy, normalize, load_polblogs_data
from models import GCN
parser = argparse.ArgumentParser()
parser.add_argument('cuda', action='store_true', default=True,
help='Disables CUDA training.')
parser.add_argument('--fastmode', action='store_true', default=False,
help='Validate during training pass.')
parser.add_argument('--seed', type=int, default=20, help='Random seed.')
parser.add_argument('--epochs', type=int, default=200,
help='Number of epochs to train.')
parser.add_argument('--lr', type=float, default=0.01,
help='Initial learning rate.')
parser.add_argument('--weight_decay', type=float, default=5e-4,
help='Weight decay (L2 loss on parameters).')
parser.add_argument('--hidden', type=int, default=16,
help='Number of hidden units.')
parser.add_argument('--dropout', type=float, default=0.5,
help='Dropout rate (1 - keep probability).')
parser.add_argument('--dataset', type=str, default="cora",
help='The name of the network dataset.')
parser.add_argument('--radius', type=int, default=4,
help='The radius of l2 norm projection')
parser.add_argument('--evaluate_mode', type=str, default="universal",
help='The universal attack method.')
args = parser.parse_args()
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
if args.dataset == "polblogs":
tmp_adj, features, labels, idx_train, idx_test = load_polblogs_data()
print (sum(sum(tmp_adj)))
print (tmp_adj.shape)
else:
_, features, labels, idx_train, idx_val, idx_test, tmp_adj = load_data(args.dataset)
num_classes = labels.max().item() + 1
# tmp_adj = tmp_adj.toarray()
adj = tmp_adj
adj = np.eye(tmp_adj.shape[0]) + adj
adj, _ = normalize(adj)
adj = torch.from_numpy(adj.astype(np.float32))
# Model and optimizer
model = GCN(nfeat=features.shape[1],
nhid=args.hidden,
nclass=num_classes,
dropout=args.dropout)
optimizer = optim.Adam(model.parameters(),
lr=args.lr, weight_decay=args.weight_decay)
if args.cuda:
model.cuda()
features = features.cuda()
adj = adj.cuda()
labels = labels.cuda()
idx_train = idx_train.cuda()
if args.dataset != "polblogs":
idx_val = idx_val.cuda()
idx_test = idx_test.cuda()
def train(epoch):
t = time.time()
model.train()
optimizer.zero_grad()
x = Variable(adj, requires_grad=True)
output = model(features, x)
loss_train = F.nll_loss(output[idx_train], labels[idx_train])
acc_train = accuracy(output[idx_train], labels[idx_train])
# print ('output', output.size())
# print ('labels', labels.size())
loss_train.backward()
optimizer.step()
if args.dataset != "polblogs":
loss_val = F.nll_loss(output[idx_val], labels[idx_val])
acc_val = accuracy(output[idx_val], labels[idx_val])
print('Epoch: {:04d}'.format(epoch+1),
'loss_train: {:.4f}'.format(loss_train.item()),
'acc_train: {:.4f}'.format(acc_train.item()),
'loss_val: {:.4f}'.format(loss_val.item()),
'acc_val: {:.4f}'.format(acc_val.item()),
'time: {:.4f}s'.format(time.time() - t))
else:
print('Epoch: {:04d}'.format(epoch+1),
'loss_train: {:.4f}'.format(loss_train.item()),
'acc_train: {:.4f}'.format(acc_train.item()),
'time: {:.4f}s'.format(time.time() - t))
def test(adj_m):
model.eval()
output = model(features, adj_m)
loss_test = F.nll_loss(output[idx_test], labels[idx_test])
acc_test = accuracy(output[idx_test], labels[idx_test])
print("Test set results:",
"loss= {:.4f}".format(loss_test.item()),
"accuracy= {:.4f}".format(acc_test.item()))
return output
t_total = time.time()
for epoch in range(args.epochs):
train(epoch)
print("Optimization Finished!")
print("Total time elapsed: {:.4f}s".format(time.time() - t_total))
# torch.save(model, './cora_gcn.pth')
# torch.save(model.state_dict(), 'cora_gcn.pkl')
# Testing
ori_output = test(adj)
def add_perturb(input_adj, idx, perturb):
# (1-x)A + x(1-A)
# input_adj = input_adj.toarray()
x = np.zeros((input_adj.shape[0], input_adj.shape[1]))
x[idx] = perturb
x[:,idx] = perturb
# print ('x', x[idx])
# x += np.transpose(x) #change the idx'th row and column
x1 = np.ones((input_adj.shape[0], input_adj.shape[1])) - x
# print ('x1', x1[idx])
adj2 = np.ones((input_adj.shape[0], input_adj.shape[1])) - input_adj
# print ('adj2', adj2[idx])
for i in range(input_adj.shape[0]):
adj2[i][i] = 0
perturbed_adj = np.multiply(x1, input_adj) + np.multiply(x, adj2)
return perturbed_adj
def evaluate_attack(perturb):
res = []
# perturb = np.where(perturb>0.5, 1, 0)
print ('perturb', perturb)
new_pred = []
for i in range(num_classes):
new_pred.append(0)
for k in idx_test:
# for k in range(1):
innormal_x_p = add_perturb(tmp_adj, k, perturb)
# innormal_x_p = np.where(innormal_x_p<0.5, 0, 1)
# diff = innormal_x_p[k] - tmp_adj[k]
# diff_idx = np.where(diff != 0 )
# print ('diff_idx', diff_idx)
# one_idx = np.where(innormal_x_p[k]==1)[0]
# zero_idx = np.where(innormal_x_p[k]!=1)[0]
# total_idx = one_idx.shape[0] + zero_idx.shape[0]
# print ('total_idx', total_idx)
# print ('one_idx', one_idx)
# print ('corresponding perturb', perturb[one_idx])
# print (innormal_x_p[k][one_idx])
x_p, degree_p = normalize(innormal_x_p + np.eye(tmp_adj.shape[0]))
x_p = torch.from_numpy(x_p.astype(np.float32))
x_p = x_p.cuda()
output = model(features, x_p)
new_pred[int(torch.argmax(output[k]))] += 1
if int(torch.argmax(output[k])) == int(torch.argmax(ori_output[k])):
res.append(0)
print ('node {} attack failed'.format(k))
else:
res.append(1)
print ('node {} attack succeed'.format(k))
fooling_rate = float(sum(res)/len(res))
print ('the current fooling rate is', fooling_rate)
return fooling_rate, new_pred
def calculate_entropy(pred):
h = 0
all_pred = sum(pred)
for i in range(num_classes):
Pi = pred[i]/all_pred
if Pi != 0:
h -= Pi* math.log(Pi)
return h
new_pred = []
for i in range(num_classes):
new_pred.append(0)
for k in idx_test:
new_pred[int(torch.argmax(ori_output[k]))] += 1
entropy = calculate_entropy(new_pred)
print ('the entropy is', entropy)
#evaluate the universal attack
if args.evaluate_mode == "universal":
fool_res = []
p_times = []
all_entropy = []
for i in range(10):
perturb = np.array([float(line.rstrip('\n')) for line in open('./perturbation_results/{1}_xi{2}_epoch100/perturbation_{1}_{0}.txt'.format(i, args.dataset, args.radius))])
perturb = np.where(perturb>0.5, 1, 0)
pt = np.where(perturb>0)[0]
if len(list(pt)) == 0:
fool_res.append(0)
p_times.append(0)
continue
print ('the perturbation is', pt)
res, new_pred = evaluate_attack(perturb)
print ('the prediction result is', new_pred)
entropy = calculate_entropy(new_pred)
fool_res.append(res)
p_times.append(len(list(pt)))
print ('the perturbation times is', p_times)
print ('the fooling rates are', fool_res)
print ('the average fooling rates over 10 times of test is', sum(fool_res)/float(len(fool_res)))
print ('the entropy is', entropy)
all_entropy.append(entropy)
print ('all the entropy values are', all_entropy)
print ('the average entropy is', sum(all_entropy)/float(len(all_entropy)))
elif args.evaluate_mode == "global_random":
#set this equal to the ceil of the number of anchor nodes computed by universal attack
perturb_times = 8
fool_res = []
p_times = []
for i in range(10):
# perturb = np.array([float(line.rstrip('\n')) for line in open("perturbation.txt")])
# perturb = np.where(perturb>0.5, 1, 0)
# perturb_times = sum(perturb)
# perturb = np.zeros(adj.shape[1])
#the perturbation times of our universal perturbation
# attack_index = list(np.random.choice(range(adj.shape[1]), perturb_times, replace = False))
# perturb[attack_index] = 1
prob = float(perturb_times / tmp_adj.shape[0])
perturb = np.random.choice(2, tmp_adj.shape[0], p = [1-prob, prob])
print ('the prob is', prob)
pt = np.where(perturb>0)[0]
print ('the perturbation is', pt)
res, new_pred = evaluate_attack(perturb)
fool_res.append(res)
p_times.append(len(list(pt)))
print ('the perturbation times is', p_times)
print ('the fooling rates are', fool_res)
print ('the average fooling rates over 10 times of test is', sum(fool_res)/float(len(fool_res)))
print ('the average fooling rate with {} perturbation times is'.format(perturb_times), sum(fool_res)/float(len(fool_res)))
elif args.evaluate_mode == "limitted_random":
perturb_times = 8
fool_res = []
p_times = []
for i in range(10):
# perturb = np.array([float(line.rstrip('\n')) for line in open("perturbation.txt")])
# perturb = np.where(perturb>0.5, 1, 0)
# perturb_times = sum(perturb)
perturb = np.zeros(adj.shape[1])
#the perturbation times of our universal perturbation
attack_index = list(np.random.choice(range(adj.shape[1]), perturb_times, replace = False))
perturb[attack_index] = 1
pt = np.where(perturb>0)[0]
# print ('the perturbation is', pt)
res, new_pred = evaluate_attack(perturb)
fool_res.append(res)
p_times.append(len(list(pt)))
print ('the perturbation times is', p_times)
print ('the fooling rates are', fool_res)
print ('the average fooling rates over 10 times of test is', sum(fool_res)/float(len(fool_res)))
print ('the average fooling rate with {} perturbation times is'.format(perturb_times), sum(fool_res)/float(len(fool_res)))
elif args.evaluate_mode == "victim_attak":
#the perturbation times of our universal perturbation
perturb_time = 8 #set this equal to the ceil of the number of anchor nodes computed by universal attack
fool_res = []
# p_times = []
for k in range(num_classes):
# for k in range(4,6):
each_fool_res = []
idx = np.where(labels.cpu().numpy()==k)[0]
# for i in range(1):
for i in range(10):
attack_index = list(np.random.choice(idx, perturb_time, replace = False))
perturb = np.zeros(adj.shape[1])
perturb[attack_index] = 1
print ('perturbating by connecting to nodes of class', k)
res, new_pred = evaluate_attack(perturb)
each_fool_res.append(res)
print ('the fooling rates of current class are', each_fool_res)
avg_asr = sum(each_fool_res)/float(len(each_fool_res))
print ('the average fooling rates over 10 times of test is', avg_asr)
fool_res.append(avg_asr)
print ('fool_res', fool_res)
print ('the avg asr by connecting to each class of nodes is', fool_res)
elif args.evaluate_mode == "universal_delete":
all_fool = []
for i in range(8, 9):
fool_res = []
for j in range(8):
perturb = np.array([float(line.rstrip('\n')) for line in open('./perturbation_results/{1}_xi{2}_epoch100/perturbation_{1}_4.txt'.format(i, args.dataset, args.radius))])
perturb = np.where(perturb>0.5, 1, 0)
pt = np.where(perturb>0)[0]
a = list(np.random.choice(range(0, pt.shape[0]), i, replace = False))
perturb[pt[a]] = 0
pt = np.where(perturb>0)[0]
print ('the perturbation is', pt)
res, new_pred = evaluate_attack(perturb)
fool_res.append(res)
print ('the fooling rates are', fool_res)
print ('the average fooling rates over 10 times of test is', sum(fool_res)/float(len(fool_res)))
all_fool.append(sum(fool_res)/float(len(fool_res)))
print ('all the fooling rate is', all_fool)
|
<reponame>csanders-git/pyTenable<filename>tests/sc/test_scanners.py<gh_stars>0
from tenable.errors import *
from ..checker import check, single
import pytest
@pytest.fixture
def scanner(request, admin, vcr):
with vcr.use_cassette('test_scanners_create_success'):
scanner = admin.scanners.create('Example', '127.0.0.1',
username='nouser',
password='<PASSWORD>')
def teardown():
try:
with vcr.use_cassette('test_scanners_delete_success'):
admin.scanners.delete(int(scanner['id']))
except APIError:
pass
request.addfinalizer(teardown)
return scanner
def test_scanners_constructor_name_typeerror(sc):
with pytest.raises(TypeError):
sc.scanners._constructor(name=1)
def test_scanners_constructor_description_typeerror(sc):
with pytest.raises(TypeError):
sc.scanners._constructor(description=1)
def test_scanners_constructor_username_typeerror(sc):
with pytest.raises(TypeError):
sc.scanners._constructor(username=1)
def test_scanners_constructor_cert_typeerror(sc):
with pytest.raises(TypeError):
sc.scanners._constructor(cert=1)
def test_scanners_constructor_password_typeerror(sc):
with pytest.raises(TypeError):
sc.scanners._constructor(password=1)
def test_scanners_constructor_address_typeerror(sc):
with pytest.raises(TypeError):
sc.scanners._constructor(address=1)
def test_scanners_constructor_port_typeerror(sc):
with pytest.raises(TypeError):
sc.scanners._constructor(port='one')
def test_scanners_constructor_proxy_typeerror(sc):
with pytest.raises(TypeError):
sc.scanners._constructor(proxy='one')
def test_scanners_constructor_verify_typeerror(sc):
with pytest.raises(TypeError):
sc.scanners._constructor(verify='yup')
def test_scanners_constructor_enabled_typeerror(sc):
with pytest.raises(TypeError):
sc.scanners._constructor(enabled='nope')
def test_scanners_constructor_managed_typeerror(sc):
with pytest.raises(TypeError):
sc.scanners._constructor(managed='yup')
def test_scanners_constructor_agent_capable_typeerror(sc):
with pytest.raises(TypeError):
sc.scanners._constructor(agent_capable='nope')
def test_scanners_constructor_zone_ids_typeerror(sc):
with pytest.raises(TypeError):
sc.scanners._constructor(zone_ids=1)
def test_scanners_constructor_zone_ids_item_typeerror(sc):
with pytest.raises(TypeError):
sc.scanners._constructor(zone_ids=['one'])
def test_scanners_constructor_orgs_typeerror(sc):
with pytest.raises(TypeError):
sc.scanners._constructor(orgs=1)
def test_scanners_constructor_orgs_item_typeerror(sc):
with pytest.raises(TypeError):
sc.scanners._constructor(orgs=['one'])
def test_scanners_constructor_success(sc):
resp = sc.scanners._constructor(
name='Example',
description='Described',
username='username',
password='password',
address='scanner.company.tld',
port=443,
proxy=False,
verify=False,
enabled=True,
managed=False,
agent_capable=False,
zone_ids=[1,2,3],
orgs=[1,2,3]
)
assert resp == {
'name': 'Example',
'description': 'Described',
'authType': 'password',
'username': 'username',
'password': 'password',
'ip': 'scanner.company.tld',
'port': 443,
'useProxy': 'false',
'verifyHost': 'false',
'enabled': 'true',
'managedPlugins': 'false',
'agentCapable': 'false',
'zones': [{'id': 1}, {'id': 2}, {'id': 3}],
'nessusManagerOrgs': [{'id': 1}, {'id': 2}, {'id': 3}]
}
@pytest.mark.vcr()
def test_scanners_create_success(admin, scanner):
assert isinstance(scanner, dict)
check(scanner, 'id', str)
check(scanner, 'name', str)
check(scanner, 'description', str)
check(scanner, 'ip', str)
check(scanner, 'port', str)
check(scanner, 'useProxy', str)
check(scanner, 'enabled', str)
check(scanner, 'verifyHost', str)
check(scanner, 'managePlugins', str)
check(scanner, 'authType', str)
check(scanner, 'cert', str, allow_none=True)
check(scanner, 'username', str, allow_none=True)
check(scanner, 'password', str, allow_none=True)
check(scanner, 'version', str, allow_none=True)
check(scanner, 'webVersion', str, allow_none=True)
check(scanner, 'admin', str)
check(scanner, 'msp', str)
check(scanner, 'numScans', str)
check(scanner, 'numHosts', str)
check(scanner, 'numSessions', str)
check(scanner, 'numTCPSessions', str)
check(scanner, 'loadAvg', str)
check(scanner, 'uptime', int)
check(scanner, 'status', str)
check(scanner, 'pluginSet', str, allow_none=True)
check(scanner, 'loadedPluginSet', str, allow_none=True)
check(scanner, 'serverUUID', str, allow_none=True)
check(scanner, 'createdTime', str)
check(scanner, 'modifiedTime', str)
check(scanner, 'zones', list)
for z in scanner['zones']:
check(z, 'id', str)
check(z, 'name', str)
check(z, 'description', str)
check(scanner, 'nessusManagerOrgs', list)
for o in scanner['nessusManagerOrgs']:
check(o, 'id', str)
check(o, 'name', str)
check(o, 'description', str)
@pytest.mark.vcr()
def test_scanners_details_success(admin, scanner):
s = admin.scanners.details(int(scanner['id']))
assert isinstance(s, dict)
check(s, 'id', str)
check(s, 'name', str)
check(s, 'description', str)
check(s, 'ip', str)
check(s, 'port', str)
check(s, 'useProxy', str)
check(s, 'enabled', str)
check(s, 'verifyHost', str)
check(s, 'managePlugins', str)
check(s, 'authType', str)
check(s, 'cert', str, allow_none=True)
check(s, 'username', str, allow_none=True)
check(s, 'password', str, allow_none=True)
check(s, 'version', str, allow_none=True)
check(s, 'webVersion', str, allow_none=True)
check(s, 'admin', str)
check(s, 'msp', str)
check(s, 'numScans', str)
check(s, 'numHosts', str)
check(s, 'numSessions', str)
check(s, 'numTCPSessions', str)
check(s, 'loadAvg', str)
check(s, 'uptime', int)
check(s, 'status', str)
check(s, 'pluginSet', str, allow_none=True)
check(s, 'loadedPluginSet', str, allow_none=True)
check(s, 'serverUUID', str, allow_none=True)
check(s, 'createdTime', str)
check(s, 'modifiedTime', str)
check(s, 'zones', list)
for z in s['zones']:
check(z, 'id', str)
check(z, 'name', str)
check(z, 'description', str)
check(s, 'nessusManagerOrgs', list)
for o in s['nessusManagerOrgs']:
check(o, 'id', str)
check(o, 'name', str)
check(o, 'description', str)
@pytest.mark.vcr()
def test_scanners_edit_success(admin, scanner):
s = admin.scanners.edit(int(scanner['id']), name='Updated Scanner Name')
assert isinstance(s, dict)
check(s, 'id', str)
check(s, 'name', str)
assert s['name'] == 'Updated Scanner Name'
check(s, 'description', str)
check(s, 'ip', str)
check(s, 'port', str)
check(s, 'useProxy', str)
check(s, 'enabled', str)
check(s, 'verifyHost', str)
check(s, 'managePlugins', str)
check(s, 'authType', str)
check(s, 'cert', str, allow_none=True)
check(s, 'username', str, allow_none=True)
check(s, 'password', str, allow_none=True)
check(s, 'version', str, allow_none=True)
check(s, 'webVersion', str, allow_none=True)
check(s, 'admin', str)
check(s, 'msp', str)
check(s, 'numScans', str)
check(s, 'numHosts', str)
check(s, 'numSessions', str)
check(s, 'numTCPSessions', str)
check(s, 'loadAvg', str)
check(s, 'uptime', int)
check(s, 'status', str)
check(s, 'pluginSet', str, allow_none=True)
check(s, 'loadedPluginSet', str, allow_none=True)
check(s, 'serverUUID', str, allow_none=True)
check(s, 'createdTime', str)
check(s, 'modifiedTime', str)
check(s, 'zones', list)
for z in s['zones']:
check(z, 'id', str)
check(z, 'name', str)
check(z, 'description', str)
check(s, 'nessusManagerOrgs', list)
for o in s['nessusManagerOrgs']:
check(o, 'id', str)
check(o, 'name', str)
check(o, 'description', str)
@pytest.mark.vcr()
def test_scanners_delete_success(admin, scanner):
admin.scanners.delete(int(scanner['id']))
@pytest.mark.vcr()
def test_scanners_list_success(admin, scanner):
for s in admin.scanners.list():
check(s, 'id', str)
check(s, 'name', str)
check(s, 'description', str)
check(s, 'status', str)
@pytest.mark.vcr()
@pytest.mark.skip(reason='No Agent Scanner in test env')
def test_scanners_agent_scans_success(admin, scanner):
resp = admin.scanners.agent_scans(int(scanner['id']), '*')
assert isinstance(resp, list)
for i in resp:
check(i, 'name', str)
check(i, 'numResults', int)
@pytest.mark.vcr()
def test_scanners_update_status(admin, scanner):
resp = admin.scanners.update_status()
assert isinstance(resp, list)
for i in resp:
check(i, 'id', str)
check(i, 'name', str)
check(i, 'description', str)
check(i, 'status', str) |
<filename>core/app.py<gh_stars>10-100
from Cookie import Cookie
from random import randint
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth import SESSION_KEY
from django.contrib.auth.models import User
from django.contrib.sessions.models import Session
from django.utils.simplejson import loads, dumps
from socketio import socketio_manage
from socketio.mixins import BroadcastMixin
from socketio.namespace import BaseNamespace
from redis import Redis, ConnectionPool
from core.game import registry
redis = Redis(connection_pool=ConnectionPool())
USERS_KEY = "gamblor-users"
class GameNamespace(BaseNamespace, BroadcastMixin):
"""
Per-user socket.io namespace for event handlers.
"""
def on_start(self):
"""
Set up the initial user. We only have access to the
HTTP environment, so we use the session ID in the cookie
and look up a user with it. If a valid user is found, we
add them to the user set in redis, and broadcast their
join event to everyone else.
"""
try:
cookie = Cookie(self.environ["HTTP_COOKIE"])
session_key = cookie[settings.SESSION_COOKIE_NAME].value
session = Session.objects.get(session_key=session_key)
user_id = session.get_decoded().get(SESSION_KEY)
user = User.objects.get(id=user_id)
except (KeyError, ObjectDoesNotExist):
self.user = None
else:
self.user = {
"id": user.id,
"name": user.username,
"x": randint(780, 980),
"y": randint(100, 300),
}
self.broadcast_event_not_me("join", self.user)
redis.hset(USERS_KEY, self.user["id"], dumps(self.user))
# Send the current set of users to the new socket.
self.emit("users", [loads(u) for u in redis.hvals(USERS_KEY)])
for game in registry.values():
if game.players:
self.emit("game_users", game.name, game.players.keys())
def on_chat(self, message):
if self.user:
self.broadcast_event("chat", self.user, message)
def on_move(self, pos):
if self.user:
self.user.update(pos)
redis.hset(USERS_KEY, self.user["id"], dumps(self.user))
self.broadcast_event_not_me("move", self.user)
def recv_disconnect(self):
"""
Socket disconnected - if the user was authenticated, remove
them from redis and broadcast their leave event.
"""
self.disconnect()
if self.user:
redis.hdel(USERS_KEY, self.user["id"])
self.broadcast_event_not_me("leave", self.user)
def on_bet(self, game_name, amount, bet_args):
"""
Takes a bet for a game.
"""
try:
assert self.user is not None # Must have a user
assert str(amount).isdigit() # Amount must be digit
assert int(amount) > 0 # Amount must be positive
assert game_name in registry # Game must be valid
except AssertionError:
return
amount = int(amount)
user = User.objects.get(id=self.user["id"])
user.account.balance -= amount
if user.account.balance < 0:
self.emit("notice", "You don't have that amount to bet")
else:
game = registry[game_name]
if game.bet(self, amount, bet_args):
user.account.save()
self.broadcast_event("game_users", game_name, game.players.keys())
class GameApplication(object):
"""
Standard socket.io wsgi application.
"""
def __call__(self, environ, start_response):
if environ["PATH_INFO"].startswith("/socket.io/"):
socketio_manage(environ, {"": GameNamespace})
else:
start_response('404 Not Found', [])
return ['<h1>Not Found</h1>']
|
from plex import Plex
from plex.lib.six.moves.urllib_parse import urlencode
from plex_activity.sources.base import Source
import json
import logging
import re
import sys
import time
import websocket
log = logging.getLogger(__name__)
SCANNING_REGEX = re.compile('Scanning the "(?P<section>.*?)" section', re.IGNORECASE)
SCAN_COMPLETE_REGEX = re.compile('Library scan complete', re.IGNORECASE)
TIMELINE_STATES = {
0: 'created',
1: 'processing',
2: 'matching',
3: 'downloading',
4: 'loading',
5: 'finished',
6: 'analyzing',
9: 'deleted'
}
class ConnectionState(object):
disconnected = 'disconnected'
connecting = 'connecting'
connected = 'connected'
class WebSocket(Source):
name = 'websocket'
events = [
'websocket.playing',
'websocket.scanner.started',
'websocket.scanner.progress',
'websocket.scanner.finished',
'websocket.timeline.created',
'websocket.timeline.matching',
'websocket.timeline.downloading',
'websocket.timeline.loading',
'websocket.timeline.finished',
'websocket.timeline.analyzing',
'websocket.timeline.deleted'
]
opcode_data = (websocket.ABNF.OPCODE_TEXT, websocket.ABNF.OPCODE_BINARY)
def __init__(self, activity):
super(WebSocket, self).__init__()
self.state = ConnectionState.disconnected
self.ws = None
# Pipe events to the main activity instance
self.pipe(self.events, activity)
def connect(self):
uri = 'ws://%s:%s/:/websockets/notifications' % (
Plex.configuration.get('server.host', '127.0.0.1'),
Plex.configuration.get('server.port', 32400)
)
params = {}
# Set authentication token (if one is available)
if Plex.configuration['authentication.token']:
params['X-Plex-Token'] = Plex.configuration['authentication.token']
# Append parameters to uri
if params:
uri += '?' + urlencode(params)
# Ensure existing websocket has been closed
if self.ws:
try:
self.ws.close()
except Exception as ex:
log.info('Unable to close existing websocket: %s', ex)
# Update state
self.state = ConnectionState.connecting
# Try connect to notifications websocket
try:
self.ws = websocket.create_connection(uri)
# Update state
self.state = ConnectionState.connected
except Exception as ex:
# Reset state
self.ws = None
self.state = ConnectionState.disconnected
raise ex
def connect_retry(self):
if self.state == ConnectionState.connected:
return True
log.debug('Connecting...')
attempts = 0
exc_info = None
ex = None
while self.state == ConnectionState.disconnected and attempts < 10:
try:
attempts += 1
# Attempt socket connection
self.connect()
# Connected
log.debug('Connected')
return True
except websocket.WebSocketBadStatusException as ex:
exc_info = sys.exc_info()
# Break on client errors (not authorized, etc..)
if 400 <= ex.status_code < 500:
break
except Exception as ex:
exc_info = sys.exc_info()
# Retry socket connection
sleep = int(round(attempts * 1.2, 0))
log.debug('Connection failed: %s (retrying in %d seconds)', ex, sleep)
time.sleep(sleep)
# Check if we are connected
if self.state == ConnectionState.connected:
log.debug('Connected')
return True
# Display connection error
log.error('Unable to connect to the notification channel: %s (after %d attempts)', ex, attempts, exc_info=exc_info)
return False
def run(self):
# Connect to notification channel
if not self.connect_retry():
return
# Receive notifications from channel
while True:
try:
self.process(*self.receive())
except websocket.WebSocketConnectionClosedException:
# Try reconnect to notification channel
if not self.connect_retry():
return
def receive(self):
frame = self.ws.recv_frame()
if not frame:
raise websocket.WebSocketException("Not a valid frame %s" % frame)
elif frame.opcode in self.opcode_data:
return frame.opcode, frame.data
elif frame.opcode == websocket.ABNF.OPCODE_CLOSE:
self.ws.send_close()
return frame.opcode, None
elif frame.opcode == websocket.ABNF.OPCODE_PING:
self.ws.pong("Hi!")
return None, None
def process(self, opcode, data):
if opcode not in self.opcode_data:
return False
try:
info = json.loads(data)
except UnicodeDecodeError as ex:
log.warn('Error decoding message from websocket: %s' % ex, extra={
'event': {
'module': __name__,
'name': 'process.loads.unicode_decode_error',
'key': '%s:%s' % (ex.encoding, ex.reason)
}
})
log.debug(data)
return False
except Exception as ex:
log.warn('Error decoding message from websocket: %s' % ex, extra={
'event': {
'module': __name__,
'name': 'process.load_exception',
'key': ex.message
}
})
log.debug(data)
return False
# Handle modern messages (PMS 1.3.0+)
if type(info.get('NotificationContainer')) is dict:
info = info['NotificationContainer']
# Process message
m_type = info.get('type')
if not m_type:
log.debug('Received message with no "type" parameter: %r', info)
return False
# Pre-process message (if function exists)
process_func = getattr(self, 'process_%s' % m_type, None)
if process_func and process_func(info):
return True
# Emit raw message
return self.emit_notification('%s.notification.%s' % (self.name, m_type), info)
def process_playing(self, info):
children = info.get('_children') or info.get('PlaySessionStateNotification')
if not children:
log.debug('Received "playing" message with no children: %r', info)
return False
return self.emit_notification('%s.playing' % self.name, children)
def process_progress(self, info):
children = info.get('_children') or info.get('ProgressNotification')
if not children:
log.debug('Received "progress" message with no children: %r', info)
return False
for notification in children:
self.emit('%s.scanner.progress' % self.name, {
'message': notification.get('message')
})
return True
def process_status(self, info):
children = info.get('_children') or info.get('StatusNotification')
if not children:
log.debug('Received "status" message with no children: %r', info)
return False
# Process children
count = 0
for notification in children:
title = notification.get('title')
if not title:
continue
# Scan complete message
if SCAN_COMPLETE_REGEX.match(title):
self.emit('%s.scanner.finished' % self.name)
count += 1
continue
# Scanning message
match = SCANNING_REGEX.match(title)
if not match:
continue
section = match.group('section')
if not section:
continue
self.emit('%s.scanner.started' % self.name, {'section': section})
count += 1
# Validate result
if count < 1:
log.debug('Received "status" message with no valid children: %r', info)
return False
return True
def process_timeline(self, info):
children = info.get('_children') or info.get('TimelineEntry')
if not children:
log.debug('Received "timeline" message with no children: %r', info)
return False
# Process children
count = 0
for entry in children:
state = TIMELINE_STATES.get(entry.get('state'))
if not state:
continue
self.emit('%s.timeline.%s' % (self.name, state), entry)
count += 1
# Validate result
if count < 1:
log.debug('Received "timeline" message with no valid children: %r', info)
return False
return True
#
# Helpers
#
def emit_notification(self, name, info=None):
if info is None:
info = {}
# Emit children
children = self._get_children(info)
if children:
for child in children:
self.emit(name, child)
return True
# Emit objects
if info:
self.emit(name, info)
else:
self.emit(name)
return True
@staticmethod
def _get_children(info):
if type(info) is list:
return info
if type(info) is not dict:
return None
# Return legacy children
if info.get('_children'):
return info['_children']
# Search for modern children container
for key, value in info.items():
key = key.lower()
if (key.endswith('entry') or key.endswith('notification')) and type(value) is list:
return value
return None
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.