content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
# Authors: Jose C. Garcia Alanis <alanis.jcg@gmail.com>
#
# License: BSD-3-Clause
# -- WIP -- {
fig, ax = plt.subplots()
im = ax.imshow(np.median(channel_corrs, axis=0), cmap="YlGn", )
# Create colorbar
cbar = ax.figure.colorbar(im, ax=ax)
cbar.ax.set_ylabel('Channels correlation', rotation=-90, va="bottom")
# Show all ticks and label them with the respective list entries
ax.set_xticks(np.arange(n_channels), labels=channels, rotation=-90)
ax.set_yticks(np.arange(n_channels), labels=channels)
# }
|
nilq/baby-python
|
python
|
class Aws_Utils:
@staticmethod
def run_code_in_lambda(code):
file_Path = 'temp_code/code.py'
temp_Dir = 'temp_code'
zip_file = 'temp_code.zip'
def create_temp_files():
if not os.path.exists(temp_Dir):
os.mkdir(temp_Dir)
with open(file_Path, "w+") as f:
f.write(code)
def delete_temp_files():
os.remove(file_Path)
os.remove(zip_file)
os.rmdir(temp_Dir)
create_temp_files()
name = 'dynamic_code'
role = 'arn:aws:iam::244560807427:role/lambda_basic_execution'
handler = 'code.dynamic_method'
s3_bucket = 'gs-lambda-tests'
s3_key = 'dinis/lambda-using-dynamic-code.zip'
aws = Aws_Cli()
aws.lambda_delete_function(name)
aws.s3_upload_folder (temp_Dir, s3_bucket, s3_key)
aws.lambda_create_function(name, role, handler, s3_bucket, s3_key)
(result, response) = aws.lambda_invoke_function(name, {})
aws.lambda_delete_function(name)
delete_temp_files()
return result
@staticmethod
def zip_folder(root_dir):
return shutil.make_archive(root_dir, "zip", root_dir)
|
nilq/baby-python
|
python
|
#
# resolution(KB, q): Given a propositional knowledge base and query, return
# whether the query can be inferred from the knowledgebase using resolution.
# The implementation is more efficient than pl_resolution in the AIMA code.
# KnowledgeBasedAgent: An abstract class that makes decisions to navigate
# through a world based on its knowledge.
#
# Compiled against Python 2.7
# Author: Stephen Bahr (sbahr@bu.edu)
import collections
import logic
RESULT_DEATH = 0
RESULT_GIVE_UP = 1
RESULT_WIN = 2
class GameOver(Exception):
"""A class representing the event of the game ending."""
def __init__(self, result):
"""Result is one of the RESULT constants above."""
self.result = result
# Utility functions
def normalize(clause):
return frozenset(map(str, logic.disjuncts(clause)))
def negate(literal):
if literal[0] == '~': return literal[1:]
else: return '~' + literal
def resolution(KB, alpha):
"""Apply the resolution algorithm to determine if alpha can be inferred from KB.
Args:
KB: an instance of logic.PropKB
alpha: an instance of logic.Expr
Return True if KB |- alpha
"""
# We do not want to waste effort resolving clauses of the KB against
# one another directly, we only want to resolve clauses that contain
# information derived from alpha. tainted_clauses will be the set
# we grow.
tainted_clauses = set(normalize(clause)
for clause in logic.conjuncts(logic.to_cnf(~alpha)))
KB_clauses = [normalize(clause) for clause in KB.clauses]
new = set()
while True:
# clausesWith is a map from literals to clauses containing that literal.
clausesWith = collections.defaultdict(list)
for clause in list(tainted_clauses) + KB_clauses:
for literal in clause:
clausesWith[literal].append(clause)
# For each tainted clause, add a pair of that clause and any
# tainted or KB clause that matches it (i.e. opposes on one literal).
pairs = []
for clause0 in tainted_clauses:
for literal in clause0:
for clause1 in clausesWith[negate(literal)]:
pairs.append((literal, clause0, clause1))
# Resolve all the pairs found above. If any result in None, the
# resolution is a bust (provides no new information).
# If any result in False (empty set), we have reached a contradiction
# and proven our goal.
for literal, clause0, clause1 in pairs:
result = resolve(clause0, clause1, literal)
if result is not None:
if result == set(): return True
else: new.add(frozenset(result))
# We now survey all the new clauses. In order to want to keep them,
# they must not be a superset of any already-known clause (since that
# would provide no new information).
added = False
for clause in new:
if not any(old_clause.issubset(clause)
for old_clause in list(tainted_clauses) + KB_clauses):
tainted_clauses.add(clause)
added = True
# If we have not found any new information, we've reached the end
# and cannot prove our goal (it may be True, it may be False, but we
# can't definitively say either way).
if not added: return False
def resolve(clause0, clause1, literal):
"""Resolve two clauses.
Each input clause is represented as a sequence of strings, each string being
one literal. The two clauses must be resolvable, one containing literal,
the other the negation of literal.
Args:
clause0: An arbitrary clause, containing literal.
clause1: An arbitrary clause, containing the negation of literal.
literal: A string.
Returns:
None if the two clauses also match on a different literal, because
in that case, all the resolved clauses would be equivalent to True
The empty set if the two clauses are exactly literal and not-literal,
i.e. they resolve to False
Otherwise, a frozenset of literals, the resolved clause.
"""
clause0 = set(clause0)
clause1 = set(clause1)
clause0.remove(literal)
clause1.remove(negate(literal))
if any(negate(other) in clause1 for other in clause0): return None
return clause0.union(clause1)
class KnowledgeBasedAgent:
def __init__(self):
self.KB = logic.PropKB()
def safe(self):
"""Return the set of safe locations to move to."""
raise NotImplementedError()
def not_unsafe(self):
"""Return the set of locations that can't be proven unsafe to move to."""
raise NotImplementedError()
def unvisited(self):
"""Return the set of locations that haven't yet been visited."""
raise NotImplementedError()
def choose_location(self):
"""Return the next location to explore in the search for gold."""
unvisited_locations = self.unvisited()
safe_moves = self.safe().intersection(unvisited_locations)
if safe_moves:
location = min(safe_moves)
print 'Moving to safe location', location
else:
not_unsafe_moves = self.not_unsafe().intersection(unvisited_locations)
if not_unsafe_moves:
location = min(not_unsafe_moves)
print 'Taking a risk; moving to a not-unsafe location', location
else:
print 'Nowhere left to go'
raise GameOver(RESULT_GIVE_UP)
return location
|
nilq/baby-python
|
python
|
from django.db import models
from ...apps import UFDLCoreAppConfig
class LicenceQuerySet(models.QuerySet):
"""
A query-set of data-set licences.
"""
pass
class Licence(models.Model):
"""
The licence for a data-set.
"""
# The name for the licence
name = models.CharField(max_length=100)
# The URL to the licences homepage
url = models.URLField()
# The permissions of the licence
permissions = models.ManyToManyField(f"{UFDLCoreAppConfig.label}.Permission",
related_name="+")
# The permissions of the licence
limitations = models.ManyToManyField(f"{UFDLCoreAppConfig.label}.Limitation",
related_name="+")
# The permissions of the licence
conditions = models.ManyToManyField(f"{UFDLCoreAppConfig.label}.Condition",
related_name="+")
# The domains of the licence
domains = models.ManyToManyField(f"{UFDLCoreAppConfig.label}.Domain",
related_name="+")
objects = LicenceQuerySet.as_manager()
class Meta:
constraints = [
# Ensure that each licence has a unique name
models.UniqueConstraint(name="unique_licence_names",
fields=["name"])
]
|
nilq/baby-python
|
python
|
"""
This script contains all the functions related to the model
"""
import tensorflow as tf
import numpy as np
import random
from math import ceil
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, Conv2D, LeakyReLU, Dropout
from game import MOVES_POSSIBLE, ALL_BLOCK_POSSIBLE, GRID_SIZE_X, GRID_SIZE_Y
from tensorflow.keras.utils import to_categorical
from copy import deepcopy
EPS: float = 0.4 # probability of playing a random move
# list of all actions possible for the model
LIST_ACTIONS: [int] = [[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]]
MUTATION_RATE: float = 0.0001
MIN_RANGE_MUTATION: float = -5.0
MAX_RANGE_MUTATION: float = 5.0
# parent is for the rate of parent in the new generation
# children is for the rate of children in the new generation
# child must have a inferior or equal to parent
# new is the rate of new random model in the new generation
# the sum of child, parent and new must be equal to 1
GENERATION_PRESET: dict = {"parent": 0.4, "children": 0.4, "new": 0.2} # this preset is a model for a new generation
# model
class Model2048(Sequential):
"""
Create the main model for 2048
"""
def __init__(self):
super().__init__()
"""
# create the model
self.add(
Conv2D(ALL_BLOCK_POSSIBLE * MOVES_POSSIBLE,
(1, 2),
padding="same",
input_shape=(GRID_SIZE_Y, GRID_SIZE_X, ALL_BLOCK_POSSIBLE),
)
)
self.add(LeakyReLU())
self.add(
Conv2D(ALL_BLOCK_POSSIBLE * MOVES_POSSIBLE,
(2, 1),
padding="same",
)
)
self.add(LeakyReLU())
self.add(
Conv2D(ALL_BLOCK_POSSIBLE, # * MOVES_POSSIBLE
(1, 1),
padding="same",
)
)
self.add(LeakyReLU())
self.add(Flatten())
self.add(Dropout(0.2))
self.add(Dense(256))
self.add(LeakyReLU())
self.add(Dropout(0.2))
self.add(Dense(4, activation="softmax"))
self.compile(optimizer="adam", loss="huber_loss")
"""
self.add(
Conv2D(ALL_BLOCK_POSSIBLE * MOVES_POSSIBLE,
(1, 2),
padding="same",
input_shape=(GRID_SIZE_Y, GRID_SIZE_X, ALL_BLOCK_POSSIBLE),
)
)
self.add(LeakyReLU())
self.add(
Conv2D(ALL_BLOCK_POSSIBLE * MOVES_POSSIBLE,
(2, 1),
padding="same",
)
)
self.add(LeakyReLU())
self.add(
Conv2D(ALL_BLOCK_POSSIBLE, # * MOVES_POSSIBLE
(1, 1),
padding="same",
)
)
self.add(LeakyReLU())
self.add(Flatten())
self.add(Dropout(0.2))
self.add(Dense(256))
self.add(LeakyReLU())
self.add(Dropout(0.2))
self.add(Dense(4, activation="softmax"))
self.compile(optimizer="RMSprop", loss="huber_loss")
"""
self.add(
Conv2D(ALL_BLOCK_POSSIBLE * MOVES_POSSIBLE,
(1, 2),
padding="same",
input_shape=(GRID_SIZE_Y, GRID_SIZE_X, 1),
)
)
self.add(LeakyReLU())
self.add(
Conv2D(ALL_BLOCK_POSSIBLE * MOVES_POSSIBLE,
(2, 1),
padding="same",
)
)
self.add(LeakyReLU())
self.add(
Conv2D(ALL_BLOCK_POSSIBLE, # * MOVES_POSSIBLE
(1, 1),
padding="same",
)
)
self.add(LeakyReLU())
self.add(Flatten())
self.add(Dropout(0.2))
self.add(Dense(256))
self.add(LeakyReLU())
self.add(Dropout(0.2))
self.add(Dense(4, activation="softmax"))
self.compile(optimizer="adam", loss="huber_loss")
"""
def save_model(self, path: str) -> None:
"""
This function save the model as a h5 file
:param path: the path where to save the model
:return: None
"""
self.save(path)
def load_performance(self, path) -> None:
"""
This function will load the weight of the model
it is better than a tf.keras.load_model
:param path: the path of the model to load
:return: None
"""
_m = Model2048()
_m = tf.keras.models.load_model(path)
_w = m.get_weights()
self.set_weights(_w)
# @tf.function
def model_action(self, grid):
"""
This function return the input of the model
:param grid: a 2048 grid
:return: input of the model
"""
return self(np.array([grid_to_input(grid)], dtype=np.float32), training=False).numpy()[0]
def take_action(self, grid, eps: float = EPS):
"""
This function will take sometime a random action and sometime a model action
:param grid: a sequence of indicators of length SEQUENCE_LENGTH
:param eps: probability of playing a random move
:return: the action take
"""
if random.random() < eps:
# take random action
action = deepcopy(random.choice(LIST_ACTIONS))
else:
# let model choose a action
action = self.model_action(grid)
"""
returned_list = [0, 0, 0, 0]
returned_list[np.argmax(action)] = 1
return returned_list"""
return action
def normalization(x, min_range, max_range):
"""
Normalization function
:param x: List of value to normalize
:param min_range: Minimum range for norm
:param max_range: Maximum range for norm
:return: array normalize
"""
x_max = max(x.flatten().tolist())
x_min = min(x.flatten().tolist())
norm = min_range + ((x - x_min) * (max_range - min_range)) / (x_max - x_min)
return norm
def grid_to_input(grid):
"""
This function transform the grid to a model input
:param grid: a 2048 grid
:return: the input for the model
"""
# MULTI LAYER PERCEPTION
# transform to categorical
grid = to_categorical(np.log2(grid + 1) - 1, 18).tolist()
# remove 0
for y in range(4):
for x in range(4):
del grid[y][x][-1]
return np.array(grid)
"""
# ONE LAYER PERCEPTION
grid = grid * 2
grid[grid == 0] = 2
grid = np.log2(grid)
grid -= 1
grid = normalization(grid, 0, 1)
grid = np.reshape(grid, grid.shape + (1, ))
return grid
"""
# genetic algorithm
def model_crossover(parent1_weight: list, parent2_weight: list) -> list:
"""
This function make a crossover of tow models
:param parent1_weight: the weights of the firs model
:param parent2_weight:the weights of the second model
:return: new weight from a crossover of the two parents
"""
new_weight: list = []
# get the shape of the wight
shapes: [tuple] = [a.shape for a in parent1_weight]
# flatten weight
genes1: np.array = np.concatenate([a.flatten() for a in parent1_weight])
genes2: np.array = np.concatenate([a.flatten() for a in parent2_weight])
# create the split coordinate
split = random.randint(0, len(genes1) - 1)
# make the crossover from the two parents
child1_genes = np.array(genes1[0:split].tolist() + genes2[split:].tolist())
# give the good shape to the weight of the child
index = 0
for shape in shapes:
size = np.product(shape)
new_weight.append(child1_genes[index: index + size].reshape(shape))
index += size
return new_weight
def model_mutation(model_weight: list,
mutation_rate: float = MUTATION_RATE,
min_range_mutation: float = MIN_RANGE_MUTATION,
max_range_mutation: float = MAX_RANGE_MUTATION) -> list:
"""
This function add some mutation in the model weight
:param model_weight: model weight where mutation will be added
:param mutation_rate: 1 = 100% the probability of a mutation
:param min_range_mutation the minimum range of a random mutation
:param max_range_mutation the maximum range of a random mutation
:return: the model with mutation
"""
# get the shape of the wight
shapes: [tuple] = [a.shape for a in model_weight]
# flatten weight
genes: np.array = np.concatenate([a.flatten() for a in model_weight])
# create mutation
for i in range(len(genes)):
if random.uniform(0, 1) < mutation_rate:
genes[i] = random.uniform(min_range_mutation, max_range_mutation)
new_weight: list = []
# give the good shape to the muted weight
index = 0
for shape in shapes:
size = np.product(shape)
new_weight.append(genes[index: index + size].reshape(shape))
return new_weight
def new_generation(all_gen_weight: list,
all_gen_score: [int],
generation_preset: dict = None) -> list:
"""
this function return a new generation from a older generation
:param all_gen_weight: a list that contain all model's weight (should be a list of list of array)
you must get weight of all models
:param all_gen_score: a list that contain the score of each model (should be a list of int)
warning : index of all_gen_weight must correspond with index of all_gen_score
:param generation_preset: the presset for generation
:return: a new generation from a older generation
"""
# set generation to default if parameter if None
if generation_preset is None:
generation_preset = GENERATION_PRESET
# sort the score from the biggest to the smalest
best_all_gen_score = sorted(all_gen_score, reverse=True)
# create a list that store best model
best_models: list = []
# select best model
for i in range(ceil(len(all_gen_weight) * generation_preset["parent"])):
# get the index of the maximum score in the list
index_best: int = all_gen_score.index(best_all_gen_score[i])
# add the best model to the list of best model
best_models.append(all_gen_weight[index_best])
# create children
children_models: list = []
for i in range(ceil(len(all_gen_weight) * generation_preset["children"])):
children_models.append(
model_crossover(best_models[i], best_models[i - 1])
)
# create mutation
parent_children_list: list = best_models + children_models
for i in range(len(parent_children_list)):
parent_children_list[i] = model_mutation(parent_children_list[i])
# add random model
random_models: list = []
for i in range(ceil(len(all_gen_weight) * generation_preset["new"])):
_temp_m = Model2048()
_temp_w = _temp_m.get_weights()
random_models.append(_temp_w)
# create the full new gen
new_gen: list = parent_children_list + random_models
return new_gen
if __name__ == '__main__':
m = Model2048()
m.summary()
|
nilq/baby-python
|
python
|
from Templates.make_excel import make2exel
import Main.Tenderplan.participants2excel as p2ex
import openpyxl
excel_name = r'E:\Лиды_экспорт.xlsx'
make2exel(
['Название компании', 'ИНН', 'Список выигранных тендеров', 'Список остальных тендеров c участием'], excel_name)
wb = openpyxl.load_workbook(excel_name)
sheet = wb.active
participants_dict_list = p2ex.get_participants()
curr_row = 1
for x in range(len(participants_dict_list)):
curr_row += 1
print(participants_dict_list[x]['company'])
sheet.cell(row=curr_row, column=1).value = participants_dict_list[x]['company']
part_row = win_row = curr_row
for t in range(len(participants_dict_list[x]['tender_name'])):
if list(participants_dict_list[x]['tender_name'][t].values())[0] == 'par':
sheet.cell(row=part_row, column=4).value = (list(participants_dict_list[x]['tender_name'][t].keys())[0])
part_row += 1
else:
sheet.cell(row=win_row, column=3).value = (list(participants_dict_list[x]['tender_name'][t].keys())[0])
win_row += 1
curr_row += 1
# if t > len(participants_dict_list[x]['tender_name']):
# curr_row += 1
wb.save(excel_name)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
from HTMLParser import HTMLParser
import re
import os
import sys
import string
class Html2MarkdownParser(HTMLParser):
def __init__(self):
self._markdown = ''
self._tag_stack = []
self._tag_attr_data = {}
self._handled_tag_body_data = ''
self._convertible_tags = ['a',
'b', 'blockquote',
'em',
'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr',
'ol',
'p', 'pre',
'strong',
'ul']
# FIXME: special characters
HTMLParser.__init__(self)
def _append_to_markdown(self, new_markdown):
if len(self._markdown) > 1:
if re.match('\s', self._markdown[-1:]):
self._markdown += new_markdown
else:
self._markdown += ' ' + new_markdown
else:
self._markdown += new_markdown
# <a />
def handle_start_a(self, attrs):
self._tag_attr_data = dict(attrs)
def handle_end_a(self):
a_tag = ''
a_tag += '[' + self._handled_tag_body_data + ']'
a_tag += '(' + self._tag_attr_data.get('href')
title = self._tag_attr_data.get('title')
if title:
a_tag += ' "' + title + '") '
else:
a_tag += ') '
self._append_to_markdown(a_tag)
# <b />
def handle_end_b(self):
self._handled_tag_body_data = self._handled_tag_body_data.replace(os.linesep, ' ')
self._append_to_markdown('*' + self._handled_tag_body_data + '*')
# <blockquote />
def handle_end_blockquote(self):
blockquote_body = self._handled_tag_body_data.split(os.linesep)
for blockquote_line in blockquote_body:
blockquote_line = blockquote_line.strip()
self._append_to_markdown('> ' + blockquote_line + os.linesep)
# <em />
def handle_end_em(self):
self._handled_tag_body_data = self._handled_tag_body_data.replace(os.linesep, ' ')
self._append_to_markdown('*' + self._handled_tag_body_data + '*')
# <h1 />
def handle_end_h1(self):
self._handled_tag_body_data = self._handled_tag_body_data.replace(os.linesep, ' ')
self._append_to_markdown('# ' + self._handled_tag_body_data + ' #' + os.linesep)
# <h2 />
def handle_end_h2(self):
self._handled_tag_body_data = self._handled_tag_body_data.replace(os.linesep, ' ')
self._append_to_markdown('## ' + self._handled_tag_body_data + ' ##' + os.linesep)
# <h3 />
def handle_end_h3(self):
self._handled_tag_body_data = self._handled_tag_body_data.replace(os.linesep, ' ')
self._append_to_markdown('### ' + self._handled_tag_body_data + ' ###' + os.linesep)
# <h4 />
def handle_end_h4(self):
self._handled_tag_body_data = self._handled_tag_body_data.replace(os.linesep, ' ')
self._append_to_markdown('#### ' + self._handled_tag_body_data + ' ####' + os.linesep)
# <h5 />
def handle_end_h5(self):
self._handled_tag_body_data = self._handled_tag_body_data.replace(os.linesep, ' ')
self._append_to_markdown('##### ' + self._handled_tag_body_data + ' #####' + os.linesep)
# <h6 />
def handle_end_h6(self):
self._handled_tag_body_data = self._handled_tag_body_data.replace(os.linesep, ' ')
self._append_to_markdown('###### ' + self._handled_tag_body_data + ' ######' + os.linesep)
# <hr />
def handle_start_hr(self, attrs):
self._append_to_markdown('* * *' + os.linesep)
# <li />
def handle_end_li(self):
if len(self._tag_stack):
if self._tag_stack[-1] == 'ol':
self._append_to_markdown('1. ' + self._handled_tag_body_data + os.linesep)
elif self._tag_stack[-1] == 'ul':
self._append_to_markdown('* ' + self._handled_tag_body_data + os.linesep)
# <p />
def handle_start_p(self, attrs):
if len(self._markdown) > 1:
if self._markdown[-2:] == '%s%s' % (os.linesep, os.linesep):
pass
elif self._markdown[-1:] == os.linesep:
self._markdown += os.linesep
else:
self._markdown += os.linesep + os.linesep
def handle_end_p(self):
self._markdown += '%s%s' % (os.linesep, os.linesep)
# <pre />
def handle_end_pre(self):
code_lines = self._handled_tag_body_data.split(os.linesep)
for code_line in code_lines:
code_line = code_line.strip()
self._append_to_markdown(' ' + code_line + os.linesep)
# <strong />
def handle_end_strong(self):
self._handled_tag_body_data = self._handled_tag_body_data.replace(os.linesep, ' ')
self._append_to_markdown('**' + self._handled_tag_body_data + '**')
## ###
def handle_starttag(self, tag, attrs):
self._tag_stack.append(tag)
try:
eval('self.handle_start_' + tag + '(attrs)')
except AttributeError, e:
pass
def handle_endtag(self, tag):
self._tag_stack.pop()
try:
eval('self.handle_end_' + tag + '()')
# Collapse three successive CRs into two before moving on
while len(self._markdown) > 2 and \
self._markdown[-3:] == '%s%s%s' % (os.linesep, os.linesep, os.linesep):
self._markdown = self._markdown[:-3] + '%s%s' % (os.linesep, os.linesep)
except AttributeError, e:
pass
self._tag_attr_data = {}
self._handled_tag_body_data = ''
def handle_data(self, data):
data = os.linesep.join(map(string.strip, data.strip().split(os.linesep)))
if len(self._tag_stack) and self._tag_stack[-1] not in ['p']:
self._handled_tag_body_data += data
else:
self._append_to_markdown(data)
def get_markdown(self):
return self._markdown.rstrip() + '\n'
def main():
p = Html2MarkdownParser()
buf = sys.stdin.read()
p.feed(buf)
p.close()
print p.get_markdown()
if __name__ == "__main__":
sys.exit(main())
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function
def pe5(n=20):
"""
What is the smallest number divisible by each of the numbers 1 to 20?
>>> pe5()
232792560
"""
if n < 2:
return 1
p = 2
m = [2]
for x in range(3, n + 1):
for y in m:
if not x % y:
x //= y
if x > 1:
m.append(x)
p *= x
return p
if __name__ == "__main__":
import doctest
doctest.testmod()
try:
while True:
s = input('> ')
n = int(s)
print(pe5(n))
except (SyntaxError, EOFError, KeyboardInterrupt, NameError):
pass
|
nilq/baby-python
|
python
|
import aiohttp
import argparse
import asyncio
import ssl
from urllib.parse import urlsplit
from bs4 import BeautifulSoup
from sitemap.utils import write_text_sitemap, clean_link
# Have these at global scope so they remain shared.
urls = []
results = []
def sitemap(url, verbose=False):
""" Main mapping function.
Clears old results, adds the starting url to the pool of urls,
creates and runs an event loop, writes out if necessary.
"""
if len(results) > 0:
del results[:]
urls.append(url)
loop = asyncio.get_event_loop()
if loop.is_closed():
loop = asyncio.new_event_loop()
loop.run_until_complete(asyncio.ensure_future(crawler(urls, results, verbose)))
return results
async def crawler(urls, results, verbose):
""" Crawls urls that aren't already in the results list """
while len(urls) > 0:
await asyncio.gather(*(asyncio.ensure_future(crawl(url, verbose)) for url in urls if url not in results))
async def crawl(url, verbose):
""" Moves current url from urls pool to results,
gets, cleans & parses html content for new urls,
appends new urls to urls pool.
"""
results.append(url)
try:
urls.remove(url)
except ValueError:
pass
try:
async with aiohttp.ClientSession() as session:
async with session.request(url=url, method='GET') as response:
if response.content_type == 'text/html':
content = await response.read()
clean_content(content, url, verbose)
except ssl.SSLError as e:
pass
except aiohttp.ClientError as e:
pass
def clean_content(content, url, verbose):
""" Parse a webpage for links """
soup = BeautifulSoup(content, 'html.parser')
domain = "{0.scheme}://{0.netloc}".format(urlsplit(url))
for link in [h.get('href') for h in soup.find_all('a')]:
link = clean_link(link, domain)
if link is not None:
if link not in urls and link not in results:
urls.append(link)
if verbose:
print(link)
def main():
parser = argparse.ArgumentParser() # pragma: no cover
parser.add_argument( # pragma: no cover
"-u", "--u", # pragma: no cover
help="Base url of the site to be mapped", # pragma: no cover
dest="url" # pragma: no cover
) # pragma: no cover
parser.add_argument( # pragma: no cover
"--w", # pragma: no cover
help="Write output to file", # pragma: no cover
dest="output" # pragma: no cover
) # pragma: no cover
args = parser.parse_args() # pragma: no cover
if args.output: # pragma: no cover
out = sitemap(url=args.url) # pragma: no cover
write_text_sitemap(out, args.output)
elif args.url: # pragma: no cover
sitemap(url=args.url, verbose=True) # pragma: no cover
else: # pragma: no cover
parser.print_help() # pragma: no cover
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
import list_wifi_distances
import requests
def report(rogue_mac):
pi_id = 8
distance = list_wifi_distances.get_network(rogue_mac.upper())
print(distance)
requests.post("http://10.10.10.93:8000/report", data={'id':pi_id, 'dist': distance})
|
nilq/baby-python
|
python
|
import logging
from cellfinder.figures import heatmap
def run(args, atlas, downsampled_shape):
logging.info("Generating heatmap")
heatmap.run(
args.paths.downsampled_points,
atlas,
downsampled_shape,
args.brainreg_paths.registered_atlas,
args.paths.heatmap,
smoothing=args.heatmap_smooth,
mask=args.mask_figures,
)
|
nilq/baby-python
|
python
|
# Generated by Django 3.1.2 on 2021-04-08 02:58
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('bpmn', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Diagram',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=255)),
('xml', models.TextField(default='', max_length=255)),
('svg', models.TextField(default='', max_length=255)),
('process', models.OneToOneField(blank=True, default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, to='bpmn.process')),
],
),
]
|
nilq/baby-python
|
python
|
"""Aramaic Bible Module tool"""
from pathlib import Path
import click
from abm_tools.sedra.bible import parse_sedra3_bible_db_file
from abm_tools.sedra.db import from_transliteration, parse_sedra3_words_db_file, sedra4_db_word_json
@click.group()
def tool():
"""Tools for generating Aramaic bible software modules"""
@tool.command()
@click.argument("word_id", type=int)
def lookup(word_id: int):
"""Lookup a word in the SEDRA 4 DataBase"""
print(sedra4_db_word_json(word_id))
@tool.command()
@click.argument("file_name", type=click.Path(exists=True))
def gen(file_name: Path) -> int:
"""Create Aramaic Sword modules"""
words = parse_sedra3_words_db_file()
for book, chapter, verse, word_num, word_id in parse_sedra3_bible_db_file(
file_name=str(file_name)
):
#word = from_transliteration(words.loc[word_id]["strVocalised"])
word = sedra4_db_word_json(word_id)["western"]
print(
book,
chapter,
verse,
word_num,
word_id,
word,
)
return 0
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
>>> import snmp_helper
c. Create a script that connects to both routers (pynet-rtr1 and pynet-rtr2) and prints out both the MIB2 sysName and sysDescr.
|
nilq/baby-python
|
python
|
##
##
##
import queue
class BundGrammarQueue:
def __init__(self):
self.q = {}
self.default_queue_name = "__root__"
self.createLIFO("__root__")
def queue(self, name, auto_create=True):
if name not in self.q:
if auto_create:
self.default_queue_name = name
return self.createQueue(name)
else:
return None
else:
return self.q[name]
def lifo(self, name, auto_create=True):
if name not in self.q:
if auto_create:
self.default_queue_name = name
return self.createLIFO(name)
else:
return None
else:
return self.q[name]
def push(self, data):
if self.default_queue_name in self.q:
self.q[self.default_queue_name].put_nowait(data)
else:
return False
return True
def pull(self):
if self.default_queue_name in self.q:
try:
data = self.q[self.default_queue_name].get_nowait()
except queue.Empty:
return None
return data
return None
def createQueue(self, name):
if name in self.q:
return False
self.q[name] = queue.Queue()
return self.q[name]
def createLIFO(self, name):
if name in self.q:
return False
self.q[name] = queue.LifoQueue()
return self.q[name]
|
nilq/baby-python
|
python
|
import vcr
from umm.cli.client import umm_request
from umm.server.utils import setup_folder
@vcr.use_cassette()
def test_umm_request():
setup_folder()
resp = umm_request([])
assert resp == {"commands": []}
|
nilq/baby-python
|
python
|
"""
Copyright (c) 2016-present, Facebook, Inc.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree. An additional grant
of patent rights can be found in the PATENTS file in the same directory.
"""
"""Fabric commands for packaging our external software.
External meaning non-python software.
"""
from fabric.api import cd, env, run
from fabric.contrib.files import exists
from fabric.operations import get, put
def package_freeswitch(fs_version='1.6.16~33~e6d643b-1~jessie+1'):
"""Builds freeswitch with our patches.
This will build the package based on what is currently checked out in the
local freeswitc repo. Be sure that the tag that is checked out matches the
version string that is needed. The tag we have used in the past is v1.6.9 and
the FS repo itself is at https://stash.freeswitch.org/scm/fs/freeswitch.git
git clone --branch v1.6.9 https://stash.freeswitch.org/scm/fs/freeswitch.git
"""
path = '/home/vagrant/freeswitch'
if not exists(path):
print('path %s does not exist on the VM, cannot package' % path)
return
with cd(path):
run('cp ../client/packaging/py3.h src/mod/languages/mod_python')
run('./build/set-fs-version.sh %s' % fs_version)
run('dch -b -m -v "%s" --force-distribution -D unstable "Endaga build."' % fs_version)
run('./bootstrap.sh', warn_only=True)
get(remote_path='modules.conf', local_path='/tmp/modules.conf')
o = open('/tmp/modules.conf', 'a')
o.write("event_handlers/mod_smpp\n")
o.write("languages/mod_python\n")
o.write("applications/mod_esl\n")
o.close()
with cd('debian/'):
put(remote_path='modules.conf', local_path='/tmp/modules.conf')
run('./bootstrap.sh -c jessie')
run('./configure --with-python=`which python3`')
run('sudo mk-build-deps -i -t "apt-get -y --no-install-recommends" debian/control')
run('dpkg-buildpackage -b -nc -us')
run('mkdir -p ~/endaga-packages')
run('mv ../*.deb ~/endaga-packages/')
def package_sipauthserve(make_clean='no'):
"""Create a deb for sipauthserve (subscriberRegistry).
The subscriberRegistry repo has its own build script.
"""
_package_external('/home/vagrant/subscriberRegistry', 'sipauthserve-public', make_clean)
def package_smqueue(make_clean='no'):
"""Create a deb for smqueue.
The smqueue repo has its own build script which itself calls FPM.
"""
_package_external('/home/vagrant/smqueue', 'smqueue-public', make_clean)
def package_openbts(make_clean='no'):
"""Create a deb for openbts-public."""
_package_external('/home/vagrant/openbts', 'openbts-public', make_clean)
def package_liba53(make_clean='no'):
"""Create a deb for liba53."""
_package_external('/home/vagrant/liba53', 'liba53', make_clean)
def _package_external(directory, package_name, make_clean):
"""Builds packages with mk-build-deps and dpkg-buildpackage.
Args:
directory: the path to a repo synced on the VM via vagrant
package_name: the name of the debian package that will be created
"""
if env.pkgfmt != "deb":
print("External packages only support deb, not building.")
return
if not exists(directory):
print('path %s does not exist, cannot package' % directory)
return
print('packaging %s as %s' % (directory, package_name))
run('mkdir -p ~/endaga-packages')
with cd('/home/vagrant/'):
with cd(directory):
run('echo y | sudo mk-build-deps')
run('sudo gdebi --n %s-build-deps*.deb' % package_name)
run('rm -f %s-build-deps*.deb' % package_name)
clean_arg = '' if make_clean == 'yes' else '-nc'
run('dpkg-buildpackage -b -uc -us %s' % clean_arg)
run('mv %s_*.deb ~/endaga-packages/.' % package_name)
run('rm %s_*' % package_name)
|
nilq/baby-python
|
python
|
def fun(x: int) -> str:
return str(x)
class SomeClass:
def meth(self, x: int) -> str:
return str(x)
|
nilq/baby-python
|
python
|
from typing import (
Any,
Callable,
Dict,
Iterable,
Iterator,
NamedTuple,
Optional,
Set,
)
from contextlib import contextmanager
import pendulum
import prefect
from prefect.core import Edge, Flow, Task
from prefect.engine.result import Result
from prefect.engine.results import ConstantResult
from prefect.engine.runner import ENDRUN, Runner, call_state_handlers
from prefect.engine.state import (
Failed,
Mapped,
Pending,
Running,
Scheduled,
State,
Success,
)
from prefect.utilities import executors
from prefect.utilities.collections import flatten_seq
FlowRunnerInitializeResult = NamedTuple(
"FlowRunnerInitializeResult",
[
("state", State),
("task_states", Dict[Task, State]),
("context", Dict[str, Any]),
("task_contexts", Dict[Task, Dict[str, Any]]),
],
)
class FlowRunner(Runner):
"""
FlowRunners handle the execution of Flows and determine the State of a Flow
before, during and after the Flow is run.
In particular, through the FlowRunner you can specify which tasks should be
the first tasks to run, which tasks should be returned after the Flow is finished,
and what states each task should be initialized with.
Args:
- flow (Flow): the `Flow` to be run
- task_runner_cls (TaskRunner, optional): The class used for running
individual Tasks. Defaults to [TaskRunner](task_runner.html)
- state_handlers (Iterable[Callable], optional): A list of state change handlers
that will be called whenever the flow changes state, providing an
opportunity to inspect or modify the new state. The handler
will be passed the flow runner instance, the old (prior) state, and the new
(current) state, with the following signature:
`state_handler(fr: FlowRunner, old_state: State, new_state: State) -> Optional[State]`
If multiple functions are passed, then the `new_state` argument will be the
result of the previous handler.
Note: new FlowRunners are initialized within the call to `Flow.run()` and in general,
this is the endpoint through which FlowRunners will be interacted with most frequently.
Example:
```python
@task
def say_hello():
print('hello')
with Flow("My Flow") as f:
say_hello()
fr = FlowRunner(flow=f)
flow_state = fr.run()
```
"""
def __init__(
self,
flow: Flow,
task_runner_cls: type = None,
state_handlers: Iterable[Callable] = None,
):
self.flow = flow
if task_runner_cls is None:
task_runner_cls = prefect.engine.get_default_task_runner_class()
self.task_runner_cls = task_runner_cls
super().__init__(state_handlers=state_handlers)
def __repr__(self) -> str:
return "<{}: {}>".format(type(self).__name__, self.flow.name)
def call_runner_target_handlers(self, old_state: State, new_state: State) -> State:
"""
A special state handler that the FlowRunner uses to call its flow's state handlers.
This method is called as part of the base Runner's `handle_state_change()` method.
Args:
- old_state (State): the old (previous) state
- new_state (State): the new (current) state
Returns:
- State: the new state
"""
self.logger.debug(
"Flow '{name}': Handling state change from {old} to {new}".format(
name=self.flow.name,
old=type(old_state).__name__,
new=type(new_state).__name__,
)
)
for handler in self.flow.state_handlers:
new_state = handler(self.flow, old_state, new_state) or new_state
return new_state
def initialize_run( # type: ignore
self,
state: Optional[State],
task_states: Dict[Task, State],
context: Dict[str, Any],
task_contexts: Dict[Task, Dict[str, Any]],
parameters: Dict[str, Any],
) -> FlowRunnerInitializeResult:
"""
Initializes the Task run by initializing state and context appropriately.
If the provided state is a Submitted state, the state it wraps is extracted.
Args:
- state (Optional[State]): the initial state of the run
- task_states (Dict[Task, State]): a dictionary of any initial task states
- context (Dict[str, Any], optional): prefect.Context to use for execution
to use for each Task run
- task_contexts (Dict[Task, Dict[str, Any]], optional): contexts that will be
provided to each task
- parameters(dict): the parameter values for the run
Returns:
- NamedTuple: a tuple of initialized objects:
`(state, task_states, context, task_contexts)`
"""
# overwrite context parameters one-by-one
context_params = context.setdefault("parameters", {})
for p in self.flow.parameters():
if not p.required:
context_params.setdefault(p.name, p.default)
for param, value in (parameters or {}).items():
context_params[param] = value
context.update(flow_name=self.flow.name)
context.setdefault("scheduled_start_time", pendulum.now("utc"))
# add various formatted dates to context
now = pendulum.now("utc")
dates = {
"date": now,
"today": now.strftime("%Y-%m-%d"),
"yesterday": now.add(days=-1).strftime("%Y-%m-%d"),
"tomorrow": now.add(days=1).strftime("%Y-%m-%d"),
"today_nodash": now.strftime("%Y%m%d"),
"yesterday_nodash": now.add(days=-1).strftime("%Y%m%d"),
"tomorrow_nodash": now.add(days=1).strftime("%Y%m%d"),
}
for key, val in dates.items():
context.setdefault(key, val)
for task in self.flow.tasks:
task_contexts.setdefault(task, {}).update(
task_name=task.name, task_slug=self.flow.slugs[task],
)
state, context = super().initialize_run(state=state, context=context)
return FlowRunnerInitializeResult(
state=state,
task_states=task_states,
context=context,
task_contexts=task_contexts,
)
def run(
self,
state: State = None,
task_states: Dict[Task, State] = None,
return_tasks: Iterable[Task] = None,
parameters: Dict[str, Any] = None,
task_runner_state_handlers: Iterable[Callable] = None,
executor: "prefect.engine.executors.Executor" = None,
context: Dict[str, Any] = None,
task_contexts: Dict[Task, Dict[str, Any]] = None,
) -> State:
"""
The main endpoint for FlowRunners. Calling this method will perform all
computations contained within the Flow and return the final state of the Flow.
Args:
- state (State, optional): starting state for the Flow. Defaults to
`Pending`
- task_states (dict, optional): dictionary of task states to begin
computation with, with keys being Tasks and values their corresponding state
- return_tasks ([Task], optional): list of Tasks to include in the
final returned Flow state. Defaults to `None`
- parameters (dict, optional): dictionary of any needed Parameter
values, with keys being strings representing Parameter names and values being
their corresponding values
- task_runner_state_handlers (Iterable[Callable], optional): A list of state change
handlers that will be provided to the task_runner, and called whenever a task
changes state.
- executor (Executor, optional): executor to use when performing
computation; defaults to the executor specified in your prefect configuration
- context (Dict[str, Any], optional): prefect.Context to use for execution
to use for each Task run
- task_contexts (Dict[Task, Dict[str, Any]], optional): contexts that will be
provided to each task
Returns:
- State: `State` representing the final post-run state of the `Flow`.
"""
self.logger.info("Beginning Flow run for '{}'".format(self.flow.name))
# make copies to avoid modifying user inputs
task_states = dict(task_states or {})
context = dict(context or {})
task_contexts = dict(task_contexts or {})
parameters = dict(parameters or {})
if executor is None:
executor = prefect.engine.get_default_executor_class()()
try:
state, task_states, context, task_contexts = self.initialize_run(
state=state,
task_states=task_states,
context=context,
task_contexts=task_contexts,
parameters=parameters,
)
with prefect.context(context):
state = self.check_flow_is_pending_or_running(state)
state = self.check_flow_reached_start_time(state)
state = self.set_flow_to_running(state)
state = self.get_flow_run_state(
state,
task_states=task_states,
task_contexts=task_contexts,
return_tasks=return_tasks,
task_runner_state_handlers=task_runner_state_handlers,
executor=executor,
)
except ENDRUN as exc:
state = exc.state
# All other exceptions are trapped and turned into Failed states
except Exception as exc:
self.logger.exception(
"Unexpected error while running flow: {}".format(repr(exc))
)
if prefect.context.get("raise_on_exception"):
raise exc
new_state = Failed(
message="Unexpected error while running flow: {}".format(repr(exc)),
result=exc,
)
state = self.handle_state_change(state or Pending(), new_state)
return state
@contextmanager
def check_for_cancellation(self) -> Iterator:
"""Contextmanager used to wrap a cancellable section of a flow run.
No-op for the default `FlowRunner` class.
"""
yield
@call_state_handlers
def check_flow_reached_start_time(self, state: State) -> State:
"""
Checks if the Flow is in a Scheduled state and, if it is, ensures that the scheduled
time has been reached.
Args:
- state (State): the current state of this Flow
Returns:
- State: the state of the flow after performing the check
Raises:
- ENDRUN: if the flow is Scheduled with a future scheduled time
"""
if isinstance(state, Scheduled):
if state.start_time and state.start_time > pendulum.now("utc"):
self.logger.debug(
"Flow '{name}': start_time has not been reached; ending run.".format(
name=self.flow.name
)
)
raise ENDRUN(state)
return state
@call_state_handlers
def check_flow_is_pending_or_running(self, state: State) -> State:
"""
Checks if the flow is in either a Pending state or Running state. Either are valid
starting points (because we allow simultaneous runs of the same flow run).
Args:
- state (State): the current state of this flow
Returns:
- State: the state of the flow after running the check
Raises:
- ENDRUN: if the flow is not pending or running
"""
# the flow run is already finished
if state.is_finished() is True:
self.logger.info("Flow run has already finished.")
raise ENDRUN(state)
# the flow run must be either pending or running (possibly redundant with above)
elif not (state.is_pending() or state.is_running()):
self.logger.info("Flow is not ready to run.")
raise ENDRUN(state)
return state
@call_state_handlers
def set_flow_to_running(self, state: State) -> State:
"""
Puts Pending flows in a Running state; leaves Running flows Running.
Args:
- state (State): the current state of this flow
Returns:
- State: the state of the flow after running the check
Raises:
- ENDRUN: if the flow is not pending or running
"""
if state.is_pending():
self.logger.info("Starting flow run.")
return Running(message="Running flow.")
elif state.is_running():
return state
else:
raise ENDRUN(state)
@executors.run_with_heartbeat
@call_state_handlers
def get_flow_run_state(
self,
state: State,
task_states: Dict[Task, State],
task_contexts: Dict[Task, Dict[str, Any]],
return_tasks: Set[Task],
task_runner_state_handlers: Iterable[Callable],
executor: "prefect.engine.executors.base.Executor",
) -> State:
"""
Runs the flow.
Args:
- state (State): starting state for the Flow. Defaults to
`Pending`
- task_states (dict): dictionary of task states to begin
computation with, with keys being Tasks and values their corresponding state
- task_contexts (Dict[Task, Dict[str, Any]]): contexts that will be provided to
each task
- return_tasks ([Task], optional): list of Tasks to include in the
final returned Flow state. Defaults to `None`
- task_runner_state_handlers (Iterable[Callable]): A list of state change handlers
that will be provided to the task_runner, and called whenever a task changes
state.
- executor (Executor): executor to use when performing computation; defaults to the
executor provided in your prefect configuration
Returns:
- State: `State` representing the final post-run state of the `Flow`.
"""
# this dictionary is used for tracking the states of "children" mapped tasks;
# when running on Dask, we want to avoid serializing futures, so instead
# of storing child task states in the `map_states` attribute we instead store
# in this dictionary and only after they are resolved do we attach them to the Mapped state
mapped_children = dict() # type: Dict[Task, list]
if not state.is_running():
self.logger.info("Flow is not in a Running state.")
raise ENDRUN(state)
if return_tasks is None:
return_tasks = set()
if set(return_tasks).difference(self.flow.tasks):
raise ValueError("Some tasks in return_tasks were not found in the flow.")
def extra_context(task: Task, task_index: int = None) -> dict:
return {
"task_name": task.name,
"task_tags": task.tags,
"task_index": task_index,
}
# -- process each task in order
with self.check_for_cancellation(), executor.start():
for task in self.flow.sorted_tasks():
task_state = task_states.get(task)
# if a task is a constant task, we already know its return value
# no need to use up resources by running it through a task runner
if task_state is None and isinstance(
task, prefect.tasks.core.constants.Constant
):
task_states[task] = task_state = Success(result=task.value)
# if the state is finished, don't run the task, just use the provided state if
# the state is cached / mapped, we still want to run the task runner pipeline
# steps to either ensure the cache is still valid / or to recreate the mapped
# pipeline for possible retries
if (
isinstance(task_state, State)
and task_state.is_finished()
and not task_state.is_cached()
and not task_state.is_mapped()
):
continue
upstream_states = {} # type: Dict[Edge, State]
# this dictionary is used exclusively for "reduce" tasks in particular we store
# the states / futures corresponding to the upstream children, and if running
# on Dask, let Dask resolve them at the appropriate time.
# Note: this is an optimization that allows Dask to resolve the mapped
# dependencies by "elevating" them to a function argument.
upstream_mapped_states = {} # type: Dict[Edge, list]
# -- process each edge to the task
for edge in self.flow.edges_to(task):
# load the upstream task states (supplying Pending as a default)
upstream_states[edge] = task_states.get(
edge.upstream_task, Pending(message="Task state not available.")
)
# if the edge is flattened and not the result of a map, then we
# preprocess the upstream states. If it IS the result of a
# map, it will be handled in `prepare_upstream_states_for_mapping`
if edge.flattened:
if not isinstance(upstream_states[edge], Mapped):
upstream_states[edge] = executor.submit(
executors.flatten_upstream_state, upstream_states[edge]
)
# this checks whether the task is a "reduce" task for a mapped pipeline
# and if so, collects the appropriate upstream children
if not edge.mapped and isinstance(upstream_states[edge], Mapped):
children = mapped_children.get(edge.upstream_task, [])
# if the edge is flattened, then we need to wait for the mapped children
# to complete and then flatten them
if edge.flattened:
children = executors.flatten_mapped_children(
mapped_children=children, executor=executor,
)
upstream_mapped_states[edge] = children
# augment edges with upstream constants
for key, val in self.flow.constants[task].items():
edge = Edge(
upstream_task=prefect.tasks.core.constants.Constant(val),
downstream_task=task,
key=key,
)
upstream_states[edge] = Success(
"Auto-generated constant value",
result=ConstantResult(value=val),
)
# handle mapped tasks
if any([edge.mapped for edge in upstream_states.keys()]):
# wait on upstream states to determine the width of the pipeline
# this is the key to depth-first execution
upstream_states = executor.wait(
{e: state for e, state in upstream_states.items()}
)
# we submit the task to the task runner to determine if
# we can proceed with mapping - if the new task state is not a Mapped
# state then we don't proceed
task_states[task] = executor.wait(
executor.submit(
run_task,
task=task,
state=task_state, # original state
upstream_states=upstream_states,
context=dict(
prefect.context, **task_contexts.get(task, {})
),
flow_result=self.flow.result,
task_runner_cls=self.task_runner_cls,
task_runner_state_handlers=task_runner_state_handlers,
upstream_mapped_states=upstream_mapped_states,
is_mapped_parent=True,
extra_context=extra_context(task),
)
)
# either way, we should now have enough resolved states to restructure
# the upstream states into a list of upstream state dictionaries to iterate over
list_of_upstream_states = executors.prepare_upstream_states_for_mapping(
task_states[task],
upstream_states,
mapped_children,
executor=executor,
)
submitted_states = []
for idx, states in enumerate(list_of_upstream_states):
# if we are on a future rerun of a partially complete flow run,
# there might be mapped children in a retrying state; this check
# looks into the current task state's map_states for such info
if (
isinstance(task_state, Mapped)
and len(task_state.map_states) >= idx + 1
):
current_state = task_state.map_states[
idx
] # type: Optional[State]
elif isinstance(task_state, Mapped):
current_state = None
else:
current_state = task_state
# this is where each child is submitted for actual work
submitted_states.append(
executor.submit(
run_task,
task=task,
state=current_state,
upstream_states=states,
context=dict(
prefect.context,
**task_contexts.get(task, {}),
map_index=idx,
),
flow_result=self.flow.result,
task_runner_cls=self.task_runner_cls,
task_runner_state_handlers=task_runner_state_handlers,
upstream_mapped_states=upstream_mapped_states,
extra_context=extra_context(task, task_index=idx),
)
)
if isinstance(task_states.get(task), Mapped):
mapped_children[task] = submitted_states # type: ignore
else:
task_states[task] = executor.submit(
run_task,
task=task,
state=task_state,
upstream_states=upstream_states,
context=dict(prefect.context, **task_contexts.get(task, {})),
flow_result=self.flow.result,
task_runner_cls=self.task_runner_cls,
task_runner_state_handlers=task_runner_state_handlers,
upstream_mapped_states=upstream_mapped_states,
extra_context=extra_context(task),
)
# ---------------------------------------------
# Collect results
# ---------------------------------------------
# terminal tasks determine if the flow is finished
terminal_tasks = self.flow.terminal_tasks()
# reference tasks determine flow state
reference_tasks = self.flow.reference_tasks()
# wait until all terminal tasks are finished
final_tasks = terminal_tasks.union(reference_tasks).union(return_tasks)
final_states = executor.wait(
{
t: task_states.get(t, Pending("Task not evaluated by FlowRunner."))
for t in final_tasks
}
)
# also wait for any children of Mapped tasks to finish, and add them
# to the dictionary to determine flow state
all_final_states = final_states.copy()
for t, s in list(final_states.items()):
if s.is_mapped():
# ensure we wait for any mapped children to complete
if t in mapped_children:
s.map_states = executor.wait(mapped_children[t])
s.result = [ms.result for ms in s.map_states]
all_final_states[t] = s.map_states
assert isinstance(final_states, dict)
key_states = set(flatten_seq([all_final_states[t] for t in reference_tasks]))
terminal_states = set(
flatten_seq([all_final_states[t] for t in terminal_tasks])
)
return_states = {t: final_states[t] for t in return_tasks}
state = self.determine_final_state(
state=state,
key_states=key_states,
return_states=return_states,
terminal_states=terminal_states,
)
return state
def determine_final_state(
self,
state: State,
key_states: Set[State],
return_states: Dict[Task, State],
terminal_states: Set[State],
) -> State:
"""
Implements the logic for determining the final state of the flow run.
Args:
- state (State): the current state of the Flow
- key_states (Set[State]): the states which will determine the success / failure of
the flow run
- return_states (Dict[Task, State]): states to return as results
- terminal_states (Set[State]): the states of the terminal tasks for this flow
Returns:
- State: the final state of the flow run
"""
# check that the flow is finished
if not all(s.is_finished() for s in terminal_states):
self.logger.info("Flow run RUNNING: terminal tasks are incomplete.")
state.result = return_states
# check if any key task failed
elif any(s.is_failed() for s in key_states):
self.logger.info("Flow run FAILED: some reference tasks failed.")
state = Failed(message="Some reference tasks failed.", result=return_states)
# check if all reference tasks succeeded
elif all(s.is_successful() for s in key_states):
self.logger.info("Flow run SUCCESS: all reference tasks succeeded")
state = Success(
message="All reference tasks succeeded.", result=return_states
)
# check for any unanticipated state that is finished but neither success nor failed
else:
self.logger.info("Flow run SUCCESS: no reference tasks failed")
state = Success(message="No reference tasks failed.", result=return_states)
return state
def run_task(
task: Task,
state: State,
upstream_states: Dict[Edge, State],
context: Dict[str, Any],
flow_result: Result,
task_runner_cls: Callable,
task_runner_state_handlers: Iterable[Callable],
upstream_mapped_states: Dict[Edge, list],
is_mapped_parent: bool = False,
) -> State:
"""
Runs a specific task. This method is intended to be called by submitting it to
an executor.
Args:
- task (Task): the task to run
- state (State): starting state for the Flow. Defaults to `Pending`
- task_runner_cls (Callable): the `TaskRunner` class to use
- upstream_states (Dict[Edge, State]): dictionary of upstream states
- context (Dict[str, Any]): a context dictionary for the task run
- flow_result (Result): the `Result` associated with the flow (if any)
- task_runner_state_handlers (Iterable[Callable]): A list of state change
handlers that will be provided to the task_runner, and called
whenever a task changes state.
- upstream_mapped_states (Dict[Edge, list]): dictionary of upstream states
corresponding to mapped children dependencies
- is_mapped_parent (bool): a boolean indicating whether this task run is the
run of a parent mapped task
Returns:
- State: `State` representing the final post-run state of the `Flow`.
"""
with prefect.context(context):
# Update upstream_states with info from upstream_mapped_states
for edge, upstream_state in upstream_states.items():
if not edge.mapped and upstream_state.is_mapped():
assert isinstance(upstream_state, Mapped) # mypy assert
upstream_state.map_states = upstream_mapped_states.get(
edge, upstream_state.map_states
)
upstream_state.result = [s.result for s in upstream_state.map_states]
task_runner = task_runner_cls(
task=task,
state_handlers=task_runner_state_handlers,
flow_result=flow_result,
)
return task_runner.run(
state=state,
upstream_states=upstream_states,
is_mapped_parent=is_mapped_parent,
context=context,
)
|
nilq/baby-python
|
python
|
''' Menu Module
Module to deal with menus and buttons. Used initially for start menu.
Can be extended if required to create pause and other menues throughout
the game.
@author: Robert (unless stated otherwise)
'''
import pygame
from classes.text import Text
from classes.sfxbox import SFXBox
button_location = 'graphics/menu/button.png'
SFX = SFXBox()
class Menu:
''' Menu
Class which generates and contains menu functionality
'''
def __init__(self, screen, title_obj, background_location, *buttons):
''' Menu
Unpacks buttons passed into menu
'''
self.screen = screen
self.title_obj = title_obj
self.unpackButtons(buttons)
self.background_location = background_location
if background_location != False:
self.background = pygame.image.load(background_location)
self.background_rect = pygame.Rect((0, 0, 1, 1))
# Quitting Bool to determine whether to quit screen
self.playing = True
def display(self):
'''
Displays all buttons on the screen
'''
if self.background_location != False:
self.screen.blit(self.background, self.background_rect)
self.title_obj.display()
# self.play_obj.display()
# self.help_obj.display()
for button in self.buttons:
button.display()
def do(self, event):
''' do function
Actions whatever is input by user. Receives events from game
loop and if applicable actions them.
The buttons have a record of whether they have been
'button-downed' yet. If they have, then if they are als
'button-upped' will call their function
If button is pressed, will also return any output that the
buttons functions may give. This allows the menu to be used in
any scenario such as the pause button, where we want it to
return a string, to tell the pause screen what to do.
'''
if event.type == pygame.QUIT:
# Detecting user pressing quit button, if X pressed,
# break loop and quit screen.
self.playing = False
if event.type == pygame.MOUSEBUTTONDOWN:
for button in self.buttons:
if self.checkPress(button, event.pos):
button.mouse_down = True
if event.type == pygame.MOUSEBUTTONUP:
for button in self.buttons:
if self.checkPress(button, event.pos) and button.mouse_down:
SFX.click()
any_output = button.press()
button.mouse_down = False
return any_output
button.mouse_down = False
def checkPress(self, button, pos):
'''
Checks whether a position hits any of the buttons on the menu
'''
x0, x1, y0, y1 = button.coords
if (x0 < pos[0] < x1) and (y0 < pos[1] < y1):
return True
return False
def unpackButtons(self, buttons):
''' Unpacks buttons form tuple to list
'''
self.buttons = []
for button in buttons:
self.buttons.append(button)
class Button:
''' Button Class
Creates an automatically highlighted button using the Text class
from the text module.
The buttons display function checks whether cursor is covering it.
If the button is being covered, it highlights the text (yellow by
default) and if clicked it calls the given function.
I have used property decorators to deal with button position, as in
the Text class so that the button can easily be moved on screen if
required. For this reason, __position is private, so that it cannot
be edited from outside the function.
'''
def __init__(self, screen, text, position, func,
font_size = 35, size = (128, 64),
text_colour = 'white', highlight = 'yellow'):
# Storing attributes
self.screen = screen
self.text = text
self.__position = position
self.func = func
self.size = size
self.text_colour = text_colour
self.highlight = highlight
self.font_size = font_size
self.highlighted = False
self.mouse_down = False
# Make edges attributes
self.setEdgesAttributes()
# Making text and images
self.makeText()
self.makeImage()
def press(self):
''' Call button function when pressed, and return any output
'''
return self.func()
def makeText(self):
''' Create text object
'''
self.text = Text(self.screen, self.position, self.font_size,
self.text, self.text_colour)
def makeImage(self):
''' Make image object from image to be loaded
'''
self.image = pygame.transform.scale(
pygame.image.load(button_location),
self.size
)
self.rect = self.image.get_rect()
self.rect.center = self.position
def update(self):
''' Updates highlighting if cursor hovering over button
'''
pos_x, pos_y = pygame.mouse.get_pos()
over_button = (self.left < pos_x < self.right) \
and (self.top < pos_y < self.bottom)
if over_button:
self.highlighted = True
self.text.colour = self.highlight
elif self.highlighted:
self.text.colour = self.text_colour
self.highlighted = False
def display(self):
''' Displays all button components on screen
'''
self.update()
self.screen.blit(self.image, self.rect)
self.text.display()
def setEdgesAttributes(self):
''' Sets left/right/top/bottom attributes from position
'''
self.left = self.position[0] - (self.size[0] // 2)
self.right = self.position[0] + (self.size[0] // 2)
self.top = self.position[1] - (self.size[1] // 2)
self.bottom = self.position[1] + (self.size[1] // 2)
# The following decorated functions deal with position and
# coordinates of our button. The position gives the centre
# position, x and y give the corresponding components of the centre,
# and coords give the corner positions. These are all updated by
# updating the position, and the position setter cascades the
# changes to all attributes.
@property
def position(self):
return self.__position
@position.setter
def position(self, new_pos):
self.__position = new_pos
self.setEdgesAttributes()
self.text.position = self.position
self.rect.center = self.position
@property
def x(self):
return self.__position[0]
@x.setter
def x(self, new_x):
self.position = [new_x, self.position[1]]
@property
def y(self):
return self.__position[1]
@y.setter
def y(self, new_y):
self.position = [self.position[0], new_y]
@property
def coords(self):
return (self.left, self.right, self.top, self.bottom)
|
nilq/baby-python
|
python
|
from loguru import logger
import cv2
import os
import pickle
import typing
import numpy as np
from sklearn.svm import LinearSVC
from stagesepx.classifier.base import BaseModelClassifier
from stagesepx import toolbox
from stagesepx.video import VideoFrame
from stagesepx import constants
class SVMClassifier(BaseModelClassifier):
FEATURE_DICT = {
"hog": toolbox.turn_hog_desc,
"lbp": toolbox.turn_lbp_desc,
# do not use feature transform
"raw": lambda x: x,
}
UNKNOWN_STAGE_NAME = constants.UNKNOWN_STAGE_FLAG
def __init__(
self, feature_type: str = None, score_threshold: float = None, *args, **kwargs
):
"""
init classifier
:param feature_type:
before training, classifier will convert pictures into feature, for better classification.
eg: 'hog', 'lbp' or 'raw'
:param score_threshold:
float, 0 - 1.0, under this value, label -> UNKNOWN_STAGE_NAME
default value is 0 (None)
"""
super().__init__(*args, **kwargs)
# feature settings
if not feature_type:
feature_type = "hog"
if feature_type not in self.FEATURE_DICT:
raise AttributeError(f"no feature func named {feature_type}")
self.feature_func: typing.Callable = self.FEATURE_DICT[feature_type]
logger.debug(f"feature function: {feature_type}")
# model settings
self._model: typing.Optional[LinearSVC] = None
self.score_threshold: float = score_threshold or 0.0
logger.debug(f"score threshold: {self.score_threshold}")
def clean_model(self):
self._model = None
def save_model(self, model_path: str, overwrite: bool = None):
"""
save trained model
:param model_path:
:param overwrite:
:return:
"""
logger.debug(f"save model to {model_path}")
# assert model file
if os.path.isfile(model_path) and not overwrite:
raise FileExistsError(
f"model file {model_path} already existed, you can set `overwrite` True to cover it"
)
# assert model data is not empty
assert self._model, "model is empty"
with open(model_path, "wb") as f:
pickle.dump(self._model, f)
def load_model(self, model_path: str, overwrite: bool = None):
"""
load trained model
:param model_path:
:param overwrite:
:return:
"""
logger.debug(f"load model from {model_path}")
# assert model file
assert os.path.isfile(model_path), f"model file {model_path} not existed"
# assert model data is empty
if self._model and not overwrite:
raise RuntimeError(
f"model is not empty, you can set `overwrite` True to cover it"
)
# joblib raise an error ( i have no idea about how to fix it ) here, so use pickle instead
with open(model_path, "rb") as f:
self._model = pickle.load(f)
def train(self):
"""
train your classifier with data. must be called before prediction
:return:
"""
if not self._model:
logger.debug("no model can be used. build a new one.")
self._model = LinearSVC()
else:
logger.debug("already have a trained model. train on this model.")
train_data = list()
train_label = list()
for each_label, each_label_pic_list in self.read():
for each_pic_object in each_label_pic_list:
logger.debug(f"training label: {each_label}")
# apply hook
each_pic_object = self._apply_hook(
VideoFrame(-1, -1.0, each_pic_object)
)
each_pic_object = each_pic_object.data
each_pic_object = self.feature_func(each_pic_object).flatten()
train_data.append(each_pic_object)
train_label.append(each_label)
logger.debug("data ready")
assert (
len(train_label) > 1
), f"seems only one class in the training dataset, at least two classes are required: {train_label}"
self._model.fit(train_data, train_label)
logger.debug("train finished")
def predict(self, pic_path: str) -> str:
"""
predict a single picture
:param pic_path:
:return:
"""
pic_object = toolbox.imread(pic_path)
return self.predict_with_object(pic_object)
def predict_with_object(self, frame: np.ndarray) -> str:
"""
predict a single object
:param frame:
:return:
"""
pic_object = self.feature_func(frame)
pic_object = pic_object.reshape(1, -1)
# scores for each stages
# IMPORTANT:
# these scores are not always precise
# at the most of time, we used a tiny train data set for training
# which may causes 'liblinear failed to converge'
# actually, it can know which one is the target class
# but the calculated value may becomes weird
scores = self._model.decision_function(pic_object)[0]
logger.debug(f"scores: {scores}")
# in the binary case, return type is different (wtf ...)
# for more effective i think
if len(self._model.classes_) == 2:
# scores is a float
# confidence score for self.classes_[1] where >0 means this
# class would be predicted
return self._model.classes_[1 if scores > 0 else 0]
# unknown
if max(scores) < self.score_threshold:
logger.warning(
f"max score is lower than {self.score_threshold}, unknown class"
)
return self.UNKNOWN_STAGE_NAME
return self._model.classes_[np.argmax(scores)]
def _classify_frame(self, frame: VideoFrame, *_, **__) -> str:
return self.predict_with_object(frame.data)
|
nilq/baby-python
|
python
|
from fastapi import APIRouter
from client.api.api_v1.endpoints import twitter, disk_space
router = APIRouter()
router.include_router(disk_space.router, prefix="/diskspace", tags=["diskspace"])
router.include_router(twitter.router, prefix="/twitter", tags=["twitter"])
|
nilq/baby-python
|
python
|
import socket
import imagezmq
import cv2
import time
sender = imagezmq.ImageSender(connect_to='tcp://localhost:5555')
sender_name = socket.gethostname() # send your hostname with each image
# image = open("C:/Users/H S/PycharmProjects/Kivy/Untitled.png", 'rb')
image = cv2.imread("C:/Users/H S/PycharmProjects/Kivy/Untitled.png")
print(image)
print(sender_name)
s = time.time()
sender.send_image(sender_name, image)
e = time.time()
print('it took- ', e-s, ' sec')
|
nilq/baby-python
|
python
|
import zerorpc
client = zerorpc.Client()
client.connect("tcp://127.0.0.1:4242")
num = 7
result = client.double(num)
print("Double", num, "is", result)
|
nilq/baby-python
|
python
|
from Components.Converter.Converter import Converter
from Components.config import config
from Components.Element import cached
from Poll import Poll
from enigma import eDVBVolumecontrol
class ArcticVolume(Poll, Converter):
def __init__(self, val):
Converter.__init__(self, val)
Poll.__init__(self)
self.poll_interval = 500
self.poll_enabled = True
self.volctrl = eDVBVolumecontrol.getInstance()
#print "ArcticVolume start Converter"
def doSuspend(self, suspended):
if suspended:
self.poll_enabled = False
else:
self.downstream_elements.changed((self.CHANGED_POLL,))
self.poll_enabled = True
@cached
def getText(self):
#print "ArcticVolume: " + str(self.volctrl.getVolume())
return str(self.volctrl.getVolume())
@cached
def getValue(self):
#print "ArcticVolume: " + str(self.volctrl.getVolume())
return str(self.volctrl.getVolume())
text = property(getText)
value = property(getValue)
|
nilq/baby-python
|
python
|
###############################################################################
# WaterTAP Copyright (c) 2021, The Regents of the University of California,
# through Lawrence Berkeley National Laboratory, Oak Ridge National
# Laboratory, National Renewable Energy Laboratory, and National Energy
# Technology Laboratory (subject to receipt of any required approvals from
# the U.S. Dept. of Energy). All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and license
# information, respectively. These files are also available online at the URL
# "https://github.com/watertap-org/watertap/"
###############################################################################
"""
Data model for electrolyte database.
Usage to get configuration for IDAES::
base = <query database for Base config of interest>
c_list = <get Components from database>
# add all the components to the base
for c in c_list:
base.add(c)
# get the merged configuration for IDAES functions
config = base.idaes_config
Class diagram::
┌────────────────────────────────┐
│ ConfigGenerator <<abstract>> │
uses ├────────────────────────────────┤
┌─────►│+ConfigGenerator(data) │
│ │ │
│ ├────────────────────────────────┤
│ │+config │
│ │_transform(data) │
│ └────────────┬───────────────────┘
│ │
│ ├───────────┬───────────────────────┐
│ │ │ │
│ ┌──────────────┴┐ ┌──┴──────────┐ ┌─────┴─────┐
│ │ ReactionConfig│ │ ThermoConfig│ │ BaseConfig│
│ └─────▲─────────┘ └─▲───────────┘ └───────▲───┘
│ │ │ │
│ │ │ │
│ │ │ │
│ │uses │uses │uses
│ │ │ │
│ │ │ │
│ ┌───────┼───────────────────┼──────────────────────────┼────────────┐
│ │ │ │ │ │
│ │ ┌────┴─────┐ ┌─────────┴───┐ ┌─────────────────┴─────────┐ │
│ │ │ Reaction │ │ Component │ │ Base │ │
│ │ └─────┬────┘ └──────┬──────┘ │ │ │
│ │ │ │ │ +add(item:DataWrapper) │ │
│ │ │ │ └─────────┬─────────────────┘ │
│ │ │ │ │ │
│ │ │ │ │ │
│ │ ├───────────────┴─────────────────────┘ │
│ │ │ │
│ │ │ │
│ └────────┼──────────────────────────────────────────────────┬───────┘
│ │ │
│ │ │
│ │ ┌────────┴─────────────┐
│ │ subclass │ │
│ ┌───────▼────────────────────────────┐ │ Public interface to │
│ │DataWrapper <<abstract>> │ │ the rest of │
│ ├────────────────────────────────────┤ │ WaterTAP │
│ │+DataWrapper(data, config_gen_class)│ │ │
└───┼────────────────────────────────────┤ └──────────────────────┘
│+idaes_config: dict │
│+merge_keys: tuple[str] │
└────────────────────────────────────┘
"""
__author__ = "Dan Gunter"
# stdlib
import collections
from contextlib import contextmanager
import copy
from fnmatch import fnmatchcase
import logging
from pprint import pformat
import re
from typing import Dict, Type, List, Union, Tuple
# 3rd party
from pyomo.environ import units as pyunits
# IDAES methods and constants
from idaes.core import AqueousPhase, LiquidPhase, SolidPhase, VaporPhase
from idaes.core.base.phases import PhaseType
from idaes.core import Component as IComponent
from idaes.models.properties.modular_properties.eos.ideal import Ideal
from idaes.models.properties.modular_properties.base.generic_reaction import (
ConcentrationForm,
)
from idaes.models.properties.modular_properties.phase_equil.forms import fugacity
from idaes.models.properties.modular_properties.pure import Perrys
from idaes.models.properties.modular_properties.pure.ConstantProperties import Constant
from idaes.models.properties.modular_properties.pure.NIST import NIST
from idaes.models.properties.modular_properties.reactions.dh_rxn import constant_dh_rxn
from idaes.models.properties.modular_properties.pure.electrolyte import (
relative_permittivity_constant,
)
from idaes.models.properties.modular_properties.reactions.equilibrium_constant import (
van_t_hoff,
)
from idaes.models.properties.modular_properties.reactions.equilibrium_forms import (
power_law_equil,
log_power_law_equil,
solubility_product,
log_solubility_product,
)
from idaes.models.properties.modular_properties.state_definitions import FTPx, FpcTP
from idaes.core.base.components import Solvent, Solute, Cation, Anion
from idaes.models.properties.modular_properties.phase_equil import SmoothVLE
from idaes.models.properties.modular_properties.phase_equil.bubble_dew import (
IdealBubbleDew,
)
from .error import ConfigGeneratorError, BadConfiguration
_log = logging.getLogger(__name__)
@contextmanager
def field(f):
"""Clean way to use a field in block (see code below for lots of examples)."""
yield f
class ConfigGenerator:
"""Interface for getting an IDAES 'idaes_config' dict."""
merge_keys = ()
substitute_values = {}
SUBST_UNITS = "units"
def __init__(self, data: Dict, name=None):
"""Constructor.
Args:
data: Input data
name: Name of the component, e.g. "H2O"
"""
data_copy = copy.deepcopy(data)
_log.info(f"transform to IDAES config.start: name={name}")
self._transform(data_copy)
_log.info(f"transform to IDAES config.end: name={name}")
self.config = data_copy
@classmethod
def _transform(cls, data):
pass # subclasses should implement
@staticmethod
def _build_units(x: str = None):
if not x:
_log.info("setting dimensionless unit")
x = "dimensionless"
s = re.sub(r"([A-Za-z]+)", r"U.\1", x).replace("U.None", "U.dimensionless")
try:
units = eval(s, {"U": pyunits})
# Syntax/NameError are just general badness, AttributeError is an unknown unit
except (SyntaxError, NameError, AttributeError) as err:
_log.error(f"while evaluating unit {s}: {err}")
raise
return units
# shared
@classmethod
def _transform_parameter_data(cls, comp):
debugging, comp_name = _log.isEnabledFor(logging.DEBUG), comp.get("name", "?")
params = comp.get(DataWrapperNames.param, None)
if not params:
_log.warning(f"No parameter data found in data name={comp_name}")
return
for param_key in params:
val = params[param_key]
if param_key == Reaction.NAMES.reaction_order:
# change form of reaction_order, just like stoichiometry
reaction_order_table = {}
for phase in val:
for species, num in val[phase].items():
reaction_order_table[(phase, species)] = num
params[param_key] = reaction_order_table
elif len(val) > 1:
# List of objects with 'v', 'u', and maybe 'i' keys
# -> transform into dict of tuples with key `i` and
# value (<value>, built(<units>))
coeff_table = {}
if debugging:
_log.debug(f"start: transform parameter list key={param_key}")
for item in val:
try:
index = item.get("i", 0)
built_units = cls._build_units(item["u"])
except (AttributeError, TypeError, ValueError) as err:
raise ConfigGeneratorError(
f"Cannot extract parameter. name='{comp_name}', "
f"item='{item}': {err}"
)
coeff_table[index] = (item["v"], built_units)
params[param_key] = coeff_table
if debugging:
_log.debug(f"done: transform parameter list key={param_key}")
else:
# Single object with 'v', 'u' keys
# -> transform into single tuple (<value>, built(<units>))
if debugging:
_log.debug(f"start: transform single parameter key={param_key}")
item = val[0]
built_units = cls._build_units(item["u"])
params[param_key] = (item["v"], built_units)
if debugging:
_log.debug(f"done: transform single parameter key={param_key}")
@staticmethod
def _iterate_dict_or_list(value):
# if value is a dict, use dict keys as indexes, so really just do `.items()`
if hasattr(value, "keys"):
return value.items()
# otherwise number from 1..N
elif hasattr(value, "append"):
num = 1
for item in value:
yield str(num), item
@classmethod
def _wrap_section(cls, section: str, data: Dict):
"""Put all `data` inside {<section>: <name>: { /data/ }}.
The `<name>` is taken from `data["name"]`.
Also removes keys 'name' and special keys starting with underscore
like _id from the `data`.
Changes input argument.
Section will be, e.g., "components" or "equilibrium_reactions"
"""
comp_name = data["name"]
# create new location for component data
if section not in data:
data[section] = {}
assert comp_name not in data[section], "trying to add existing component"
data[section][comp_name] = {}
# copy existing to new location
to_delete = set() # cannot delete while iterating, so store keys to delete here
for key, value in data.items():
# if this is not a special field, add it to the the component
if key not in (
"name",
"base_units",
"reaction_type",
"components",
"reactant_elements",
section,
"_id",
):
data[section][comp_name][key] = value
# mark field for deletion, if not top-level field
if key not in ("base_units", section):
to_delete.add(key)
# remove copied fields from old location
for key in to_delete:
del data[key]
# remove special
cls._remove_special(data)
@classmethod
def _remove_special(cls, data):
"""Remove 'special' keys starting with an underscore (e.g. _id) as well as 'name'."""
for key in list(data.keys()):
if key.startswith("_") or key == "name":
del data[key]
@classmethod
def _substitute(cls, data):
debugging = _log.isEnabledFor(logging.DEBUG)
def dicty(d):
return hasattr(d, "keys")
def substitute_value(d, subst, key):
"""Find string value(s) at 'd[key]' in mapping 'subst' and substitute mapped value.
Return True if found, False otherwise.
"""
if debugging:
_log.debug(f"substitute value: d={d} subst={subst} key={key}")
# make a scalar into a list of length 1, but remember whether
# it's a list or not
if (
isinstance(d[key], str)
or isinstance(d[key], int)
or isinstance(d[key], float)
):
str_values = [d[key]]
is_list = False
else:
try:
str_values = list(d[key])
except TypeError:
str_values = [str(d[key])]
is_list = True
# substitute all values in the list, with the result in `new_list`
num_subst, new_list = 0, []
for str_value in str_values:
new_value = None
if dicty(subst):
if str_value in subst:
new_value = subst[str_value]
# add case-insensitivity
elif str_value.lower() in subst:
new_value = subst[str_value.lower()]
elif subst == cls.SUBST_UNITS:
if isinstance(
str_value, str
): # make sure it's not already evaluated
_log.debug(
f"Substituting units: set {{'{key}': units('{str_value}')}} in {d}"
)
new_value = cls._build_units(str_value)
if new_value is None:
new_list.append(str_value) # unsubstituted value
else:
new_list.append(new_value)
num_subst += 1
# change input to substituted list (or single value)
d[key] = new_list if is_list else new_list[0]
# return True only if all values were substituted
return num_subst == len(new_list)
def stringish(x):
"""String or list/tuple of strings?"""
if isinstance(x, str):
return True
if isinstance(x, list) or isinstance(x, tuple):
for item in x:
if not isinstance(x, str):
return False
return True
return False
sv = cls.substitute_values
for sv_section in sv:
if debugging:
_log.debug(f"start: substitute section {sv_section}")
# get parent dict at dotted path given by 'sv_section'
key_list = sv_section.split(".")
data_section = data
# walk down the dotted path to the terminal dict
while dicty(data_section) and len(key_list) > 1:
subsection = key_list.pop(0)
if subsection in data_section:
data_section = data_section[subsection]
else:
data_section = None # not present
# if found, perform substitution(s)
if dicty(data_section):
sv_key = key_list.pop()
_log.debug(
f"perform substitutions in data={data_section} for key='{sv_key}'"
)
# if it is a wildcard, allow multiple substitutions
if "*" in sv_key:
matches = [k for k in data_section if fnmatchcase(k, sv_key)]
for match_key in matches:
if not stringish(data_section[match_key]):
continue # don't try to substitute non strings/string-lists
did_subst = substitute_value(
data_section, sv[sv_section], match_key
)
if not did_subst:
_log.warning(
f"Could not find substitution: section={sv_section} match={match_key} "
f"value={data_section[match_key]}"
)
# if not a wildcard, do zero or one substitutions
elif sv_key in data_section:
did_subst = substitute_value(data_section, sv[sv_section], sv_key)
if not did_subst:
_log.warning(
f"Could not find substitution: section={sv_section} "
f"value={data_section[sv_key]}"
)
if debugging:
_log.debug(f"done: substitute section {sv_section}")
class ThermoConfig(ConfigGenerator):
substitute_values = {
"valid_phase_types": {
"pt.liquidphase": PhaseType.liquidPhase,
"pt.solidphase": PhaseType.solidPhase,
"pt.vaporphase": PhaseType.vaporPhase,
"pt.aqueousphase": PhaseType.aqueousPhase,
},
"*_comp": {
"perrys": Perrys,
"constant": Constant,
"nist": NIST,
"relative_permittivity_constant": relative_permittivity_constant,
},
"phase_equilibrium_form.*": {
"fugacity": fugacity,
},
"type": {
"solvent": Solvent,
"solute": Solute,
"cation": Cation,
"anion": Anion,
"component": IComponent,
},
}
def __init__(self, data, name="unknown", validation=True):
"""Constructor.
Args:
data: Input data
name: Name of the component, e.g. "H2O"
validation: If True, perform schema validation against input.
Raises:
ValidationError: If the input is bad.
"""
super().__init__(data, name=name)
if validation:
from .validate import validate # put here to avoid circular import
if _log.isEnabledFor(logging.DEBUG):
_log.debug(f"Validating Component:\n{pformat(data)}")
validate(data, obj_type="component")
@classmethod
def _transform(cls, data):
cls._transform_parameter_data(data)
cls._substitute(data)
with field("valid_phase_types") as fld:
if isinstance(data.get(fld, None), (list, tuple)) and len(data[fld]) == 1:
data[fld] = data[fld][0]
del data["elements"]
cls._wrap_section("components", data)
for name in data["components"]:
cls._key_to_tuple(data["components"][name], "phase_equilibrium_form")
@classmethod
def _key_to_tuple(cls, data, section):
"""Change all key values separated by '-' in the given section to tuples of those values."""
if section not in data:
return
temp = {}
for key in data[section]:
item_list = key.split("-")
if len(item_list) != 2:
raise BadConfiguration(
"ThermoConfig._key_to_tuple",
data,
missing=None,
why="\n" + section + " tuple key must be only 2 items\n",
)
temp[tuple(item_list)] = data[section][key]
data[section] = temp
class ReactionConfig(ConfigGenerator):
substitute_values = {
"heat_of_reaction": {"constant_dh_rxn": constant_dh_rxn},
"*_form": {
"log_power_law_equil": log_power_law_equil,
"power_law_equil": power_law_equil,
"log_solubility_product": log_solubility_product,
"solubility_product": solubility_product,
"concentrationform.molarity": ConcentrationForm.molarity,
"concentrationform.molefraction": ConcentrationForm.moleFraction,
"concentrationform.activity": ConcentrationForm.activity,
},
"*_constant": {
"van_t_hoff": van_t_hoff,
},
}
def __init__(self, data, name="unknown", validation=True):
"""Constructor.
Args:
data: Input data
name: Name of the component, e.g. "H2O"
validation: If True, perform schema validation against input.
Raises:
ValidationError: If the input is bad.
"""
super().__init__(data, name=name)
if validation:
from .validate import validate # put here to avoid circular import
if _log.isEnabledFor(logging.DEBUG):
_log.debug(f"Validating Reaction:\n{pformat(data)}")
validate(data, obj_type="reaction")
@classmethod
def _transform(cls, data):
"""In-place data transformation from standard storage format to
format expected by IDAES idaes_config methods
"""
cls._transform_parameter_data(data)
for key, value in data.items():
# reformat stoichiometry to have tuple keys
if key == "stoichiometry":
stoich = value
stoich_table = {}
for phase in stoich:
for component_name, num in stoich[phase].items():
skey = (phase, component_name)
stoich_table[skey] = num
data[key] = stoich_table
cls._substitute(data)
reaction_type = data["type"]
reaction_section = f"{reaction_type}_reactions"
# The section should match a merge-key for the Reaction class
if reaction_section not in Reaction.merge_keys:
raise RuntimeError(
f"Unexpected reaction type while generating config: "
f"type={reaction_type} data={data}"
)
del data["type"] # remove from output
cls._wrap_section(reaction_section, data)
class BaseConfig(ConfigGenerator):
substitute_values = {
"state_definition": {"FTPx": FTPx, "FpcTP": FpcTP},
"phases.Liq.type": {"LiquidPhase": LiquidPhase, "AqueousPhase": AqueousPhase},
"phases.Sol.type": {"SolidPhase": SolidPhase},
"phases.Vap.type": {"VaporPhase": VaporPhase},
"phases.Liq.equation_of_state": {"Ideal": Ideal},
"phases.Sol.equation_of_state": {"Ideal": Ideal},
"phases.Vap.equation_of_state": {"Ideal": Ideal},
"bubble_dew_method": {"IdealBubbleDew": IdealBubbleDew},
"phase_equilibrium_state.*": {
"SmoothVLE": SmoothVLE,
},
"base_units.*": ConfigGenerator.SUBST_UNITS,
}
@classmethod
def _transform(cls, data):
cls._substitute(data)
cls._remove_special(data)
cls._list_to_tuple(data, "state_bounds")
cls._list_of_lists_to_tuple(data, "phases_in_equilibrium")
cls._key_to_tuple(data, "phase_equilibrium_state")
@classmethod
def _list_to_tuple(cls, data, section):
"""Change all list values in the given section to tuples."""
if section not in data:
return
for key in data[section]:
if isinstance(data[section][key], list):
data[section][key] = tuple(data[section][key])
@classmethod
def _list_of_lists_to_tuple(cls, data, section):
"""Change all list of list values in the given section to tuples."""
if section not in data:
return
temp = []
for item in data[section]:
if isinstance(item, list):
temp.append(tuple(item))
data[section] = temp
@classmethod
def _key_to_tuple(cls, data, section):
"""Change all key values separated by '-' in the given section to tuples of those values."""
if section not in data:
return
temp = {}
for key in data[section]:
item_list = key.split("-")
if len(item_list) != 2:
raise BadConfiguration(
"BaseConfig._key_to_tuple",
data,
missing=None,
why="\n" + section + " tuple key must be only 2 items\n",
)
temp[tuple(item_list)] = data[section][key]
data[section] = temp
class DataWrapperNames:
param = "parameter_data"
reaction_order = "reaction_order"
class DataWrapper:
"""Interface to wrap data from DB in convenient ways for consumption by the rest of the library.
Do not use this class directly.
Derived classes will feed the data (from the database) and the appropriate subclass of GenerateConfig to the
constructor. Then the IDAES config will be available from the `idaes_config` attribute.
Note that no conversion work is done before the first access, and the converted result is cached to
avoid extra work on repeated accesses.
"""
#: Subclasses should set this to the list of top-level keys that should be added,
# i.e. merged, into the result when an instance is added to the base data wrapper.
merge_keys = ()
NAMES = DataWrapperNames
def __init__(
self,
data: Dict,
config_gen_class: Type[ConfigGenerator] = None,
validate_as_type=None,
):
"""Ctor.
Args:
data: Data from the DB
config_gen_class: Used to transform DB data to IDAES idaes_config
"""
self._data, self._config_gen, self._config = data, config_gen_class, None
self.name = self._data.get("name", "")
if "_id" in self._data:
del self._data["_id"]
self._preprocess() # additional subclass-specific preprocessing
if validate_as_type:
from .validate import validate
validate(self._data, obj_type=validate_as_type)
def remove(self, key):
if key in self.data:
del self.data[key]
def remove_parameter(self, key):
param = self.NAMES.param # alias
if param in self.data and key in self.data[param]:
del self.data[param][key]
self._config = None
def set_parameter(self, key: str, value, units: str = "dimensionless", index=0):
"""Add to existing parameters or create a new parameter value.
Args:
key: Name of parameter
value: New value
units: Units for value
index: If parameter is a list of values, index to set. Otherwise.
a list of length of 1 will be created with an index of 0.
Returns:
None
Raises:
KeyError: If the data structure doesn't have a spot for parameters.
This is likely a more basic problem with the current instance.
"""
param = self.NAMES.param # alias
if param not in self.data:
raise KeyError(f"Missing section {param}, so cannot set a parameter")
entry = {"v": value, "u": units, "i": index}
# check if there are alread value(s)
if key in self.data[param]:
# if existing values, replace matching index or add new
new_param, replaced = [], False
for item in self.data[param][key]:
if item["i"] == index:
# replace entry at this index with the new entry
new_param.append(entry)
replaced = True
else:
# keep current entry for this index
new_param.append(item)
if not replaced:
new_param.append(entry)
else:
# if no existing param, create new list of size 1
new_param = [entry]
self.data[param][key] = new_param
self._config = None # force regeneration
def _preprocess(self):
pass # define in subclasses
@property
def data(self):
return self._data
@property
def idaes_config(self) -> Dict:
""" "Get the data as an IDAES config dict.
Returns:
Python dict that can be passed to the IDAES as a config.
"""
if self._config is None:
# the config_gen() call will copy its input, so get the result from
# the .config attr
self._config = self._config_gen(self._data, name=self.name).config
return self._config
@property
def json_data(self) -> Dict:
"""Get the data in its "natural" form as a dict that can be serialized to JSON."""
copy = self._data.copy() # shallow copy is fine
if "_id" in copy:
del copy["_id"]
return copy
@classmethod
def from_idaes_config(cls, config: Dict) -> List["DataWrapper"]:
"""The inverse of the `idaes_config` property, this method constructs a new
instance of the wrapped data from the IDAES config information.
Args:
config: Valid IDAES configuration dictionary
Raises:
BadConfiguration: If the configuration can't be transformed into the EDB form due
to missing/invalid fields.
"""
pass # subclasses need to define this, using helper functions in this class
@classmethod
def _method_to_str(
cls, fld, src, tgt, subst, required=False, default=None, caller: str = None
):
"""Convert a method object to a string representation.
Raises:
BadConfiguration: if field is missing and required, or unrecognized without a default
"""
if fld in src:
value = src[fld]
try:
str_value = subst[value]
except KeyError:
if default is not None:
str_value = default
else:
raise BadConfiguration(
caller, config=src, why=f"Unknown value for {fld}"
)
tgt[fld] = str_value
elif required:
raise BadConfiguration(caller, config=src, missing=fld)
@classmethod
def _convert_parameter_data(cls, src, tgt, caller="unknown"):
if cls.NAMES.param not in src:
raise BadConfiguration(caller, src, missing=cls.NAMES.param)
pd, data = src[cls.NAMES.param], {}
for param, value in pd.items():
if isinstance(value, tuple):
data[param] = [{"v": value[0], "u": str(value[1])}]
elif isinstance(value, dict) and len(value) > 0:
key0 = list(value.keys())[0]
if isinstance(key0, tuple):
# process dict with tuple keys
if param == "reaction_order":
# skip, not something we need to store in EDB
_log.debug(f"skip 'reaction_order' in parameters from {caller}")
else:
pass # not implemented -- no other known values
else:
# process dict with scalar keys
param_list = []
for i, value2 in value.items():
try:
i = int(i)
except ValueError:
pass
except TypeError as err:
raise BadConfiguration(
caller,
src,
why=f"Unexpected key type in parameter_data: "
f"key='{i}' param={value}",
)
param_list.append({"i": i, "v": value2[0], "u": str(value2[1])})
data[param] = param_list
else:
raise BadConfiguration(
caller,
src,
why=f"Unexpected value type for '{cls.NAMES.param}': key='{param}', "
f"value='{value}'",
)
tgt[cls.NAMES.param] = data
class ComponentNames(DataWrapperNames):
pass
class Component(DataWrapper):
merge_keys = ("components",)
NAMES = ComponentNames
def __init__(self, data: Dict, validation=True):
"""Constructor.
Args:
data: Data from the DB
validation: If true, do schema validation of input
"""
vtype = "component" if validation else None
super().__init__(data, ThermoConfig, validate_as_type=vtype)
def _preprocess(self):
# set "type" field
if "type" in self._data:
return # already present
name, elements = None, None
try:
name = self._data["name"]
elements = self._data["elements"]
except KeyError:
missing = "name" if name is None else "elements"
raise BadConfiguration("Component._preprocess", self._data, missing=missing)
if name.endswith("-"): # negatively charged
component_type = "anion"
match = re.match(r".*(\d+)-$", name)
charge = -1 if match is None else -int(match.group(1))
self._data["charge"] = charge
elif name.endswith("+"): # positively charged
component_type = "cation"
match = re.match(r".*(\d+)\+$", name)
charge = 1 if match is None else int(match.group(1))
self._data["charge"] = charge
elif name == "H2O": # water is always "H2O"
component_type = "solvent"
else: # anything else neutral
component_type = "solute"
self._data["type"] = component_type
@classmethod
def from_idaes_config(cls, config: Dict) -> List["Component"]:
"""See documentation on parent class."""
whoami = "Component.from_idaes_config"
# get inverse mapping of strings and values from ThermoConfig.substitute_values, used
# for calls to _method_to_str()
subst_strings = {}
for _, mapping in ThermoConfig.substitute_values.items():
for k, v in mapping.items():
subst_strings[v] = k
if "components" not in config:
raise BadConfiguration(config=config, whoami=whoami, missing="components")
result = []
for name, c in config["components"].items():
d = {"name": name}
with field("type") as fld:
if fld not in c:
raise BadConfiguration(whoami, config, missing=fld)
possible = {Solvent, Solute, Cation, Anion, IComponent}
if c[fld] not in possible:
possible_list = ", ".join([str(t) for t in possible])
raise BadConfiguration(
whoami,
config,
why=f"Bad value for '{fld}': expected one of: {possible_list}; "
f"got='{c[fld]}'",
)
cls._method_to_str("valid_phase_types", c, d, subst_strings, caller=whoami)
for fld in c:
if fld.endswith("_comp"):
cls._method_to_str(fld, c, d, subst_strings, caller=whoami)
with field("phase_equilibrium_form") as fld:
if fld in c:
d[fld] = {}
for key, value in c[fld].items():
break
cls._method_to_str(fld, c[fld], d, subst_strings, caller=whoami)
# extract elements from name
d["elements"] = re.findall(r"[A-Z][a-z]?", name)
cls._convert_parameter_data(c, d)
result.append(Component(d))
return result
class ReactionNames(DataWrapperNames):
stoich = "stoichiometry"
hor = "heat_of_reaction"
eq_const = "equilibrium_constant"
eq_form = "equilibrium_form"
conc_form = "concentration_form"
class Reaction(DataWrapper):
merge_keys = ("equilibrium_reactions", "rate_reactions", "inherent_reactions")
NAMES = ReactionNames
PHASES = ("Liq", "Vap", "Sol")
def __init__(self, data: Dict, validation=True):
"""Constructor.
Args:
data: Data from the DB
validation: If true, do schema validation of input
"""
vtype = "reaction" if validation else None
super().__init__(data, ReactionConfig, validate_as_type=vtype)
@property
def reaction_type(self):
return self.data.get("type", "")
def set_reaction_order(
self,
phase: str,
order: Union[List[Tuple[str, float]], Dict[str, float]],
require_all: bool = False,
) -> None:
"""Set the reaction order for the given phase.
Args:
phase: a value from self.PHASES
order: Either a dict or list of (element, value) pairs
require_all: If True, require that all components in the reaction be
given an order. If False, it is OK if some components are missing.
Returns:
None. Reaction order is modified in place.
Raises:
KeyError: something is missing in the data structure, or unknown
component provided
ValueError: Wrong or incomplete components provided
"""
if bool(order) is False:
raise ValueError("No components provided for reaction order")
# schema validation should guarantee this structure
# If 'reaction_order' key does not exist, then create one as a copy of stoich
if self.NAMES.reaction_order in self.data[self.NAMES.param]:
ro = self.data[self.NAMES.param][self.NAMES.reaction_order]
else:
self.data[self.NAMES.param][self.NAMES.reaction_order] = self.data[
self.NAMES.stoich
].copy()
ro = self.data[self.NAMES.param][self.NAMES.reaction_order]
if phase not in self.PHASES:
raise ValueError(
f"Invalid phase '{phase}'. Valid values: " f"{', '.join(self.PHASES)}"
)
if phase not in ro:
raise KeyError(f"Phase '{phase}' not found")
ro = ro[phase]
# normalize input to dict form
if not hasattr(order, "keys"):
order = dict(order)
# additional checks for 'require_all' flag
if require_all:
if len(order) != len(ro):
why = "not enough" if len(order) < len(ro) else "too many"
raise ValueError(
f"{why.title()} components provided for new reaction "
f"order, with 'require_all' flag set to True"
)
if set(order.keys()) != set(ro.keys()):
raise ValueError(
"Components in new reaction order do not match "
"components in reaction, with 'require_all' flag "
"set to True"
)
# Replace one component at a time, raising a KeyError if unknown component
# Ensure that the instance is not modified if there are any errors.
ro_tmp = ro.copy()
for key, value in order.items():
if key not in ro:
raise KeyError(f"Component '{key}' not found in reaction")
ro_tmp[key] = value
# Update reaction order in this object
self.data[self.NAMES.param][self.NAMES.reaction_order][phase] = ro_tmp
@classmethod
def from_idaes_config(cls, config: Dict) -> List["Reaction"]:
"""See documentation on parent class."""
whoami = "Reaction.from_idaes_config" # for logging
# get inverse mapping of strings and values from
# ReactionConfig.substitute_values, used for calls to _method_to_str()
subst_strings = {}
for _, mapping in ReactionConfig.substitute_values.items():
for k, v in mapping.items():
subst_strings[v] = k
result = []
# XXX: base units?
for reaction_type in (
k for k in cls.merge_keys if (k.endswith("_reactions") and k in config)
):
for name, r in config[reaction_type].items():
d = {"name": name, "type": reaction_type.split("_reactions")[0]}
# convert all non-dictionary-valued fields into equivalent string values
for fld, val in r.items():
if isinstance(val, str): # leave string values as-is
d[fld] = val
elif not isinstance(val, dict): # convert all other non-dict values
cls._method_to_str(fld, r, d, subst_strings, caller=whoami)
cls._convert_parameter_data(r, d)
with field("stoichiometry") as fld:
if fld in r:
cls._convert_stoichiometry(r[fld], d)
result.append(Reaction(d))
return result
@classmethod
def _convert_stoichiometry(cls, src, tgt):
data, component_list = {}, []
for key, value in src.items():
phase, species = key
if phase in data:
data[phase][species] = value # set species & quantity
else:
data[phase] = {species: value} # create new dictionary
component_list.append(species)
tgt["stoichiometry"] = data
tgt["components"] = component_list
class Base(DataWrapper):
"""Wrapper for 'base' information to which a component or reaction is added."""
def __init__(self, data: Dict):
super().__init__(data, BaseConfig)
self._to_merge = []
self._component_names = set()
self._dirty = True
self._idaes_config = None
def add(self, item: DataWrapper):
"""Add wrapped data to this base object."""
self._to_merge.append(item)
if isinstance(item, Component):
self._component_names.add(item.name)
self._dirty = True
@property
def component_names(self):
return list(self._component_names)
@property
def idaes_config(self):
# if there is no change, return previously merged value
if not self._dirty:
return self._idaes_config
# if the base config has not yet been created, do that now
if self._idaes_config is None:
self._idaes_config = super().idaes_config
# merge in items that were added with the `add()` method
for item in self._to_merge:
self._merge(self._idaes_config, item)
# reset for more calls to `add()` or this method
self._dirty, self._to_merge = False, []
# return merged value
return self._idaes_config
@staticmethod
def _merge(dst, src: DataWrapper) -> Dict:
"""Merge on defined configuration keys."""
src_config = src.idaes_config
for key in src.merge_keys:
if key not in src_config:
continue
if key in dst:
dst[key].update(src_config[key])
else:
dst[key] = src_config[key]
return dst
class Result:
"""Encapsulate one or more JSON objects in the appropriate :class:`DataWrapper` subclass.
Users won't need to instantiate this directly, just iterate over it to retrieve the result of
a database query or other operation that returns EDB data objects.
For example::
result = db.get_reactions(..search-params...)
for reaction_obj in result:
# ..work with instance of class Reaction..
print(reaction_obj.name)
"""
def __init__(self, iterator=None, item_class=None):
if iterator is not None:
assert issubclass(item_class, DataWrapper)
self._it = iterator
self._it_class = item_class
def __iter__(self):
return self
def __next__(self):
datum = next(self._it)
obj = self._it_class(datum)
return obj
|
nilq/baby-python
|
python
|
import os
import csv
import timeit
from datetime import datetime
import numpy
import logging
import coloredlogs
import numpy as np
import argparse
import copy
import json
import re
import sys
import onnxruntime
from onnx import numpy_helper
from perf_utils import *
import pprint
import time
from float16 import *
# import torch
debug = False
sys.path.append('.')
logger = logging.getLogger('')
ep_to_provider_list = {
"CPUExecutionProvider": ["CPUExecutionProvider"],
"CUDAExecutionProvider": ["CUDAExecutionProvider"],
"CUDAExecutionProvider_fp16": ["CUDAExecutionProvider"],
"TensorrtExecutionProvider": ["TensorrtExecutionProvider", "CUDAExecutionProvider"],
"TensorrtExecutionProvider_fp16": ["TensorrtExecutionProvider", "CUDAExecutionProvider"],
}
def run_trt_standalone(trtexec, model_path, ort_inputs, all_inputs_shape, fp16):
model_path = "--onnx=" + model_path
input_shape = []
print(all_inputs_shape)
for i in range(len(ort_inputs)):
name = ort_inputs[i].name
shape = []
for j in all_inputs_shape[i]:
shape.append(str(j))
shape = "x".join(shape)
shape = name + ':' + shape
input_shape.append(shape)
shapes_arg = '--optShapes=' + ','.join(input_shape)
print(shapes_arg)
result = {}
try:
if fp16:
p1 = subprocess.Popen([trtexec, model_path, "--fp16", "--percentile=90", "--explicitBatch", shapes_arg], stdout=subprocess.PIPE)
else:
p1 = subprocess.Popen([trtexec, model_path, "--percentile=90", "--explicitBatch", shapes_arg], stdout=subprocess.PIPE)
stdout, sterr = p1.communicate()
print(stdout)
stdout = stdout.decode("ascii").strip()
tmp = stdout.split("\n")
target_list = []
for t in tmp:
if 'mean:' in t:
target_list.append(t)
if 'percentile:' in t:
target_list.append(t)
target = target_list[2]
start = target.find('mean:') + 6
end = target.find('ms')
result["average_latency_ms"] = target[start:end]
target = target_list[3]
start = target.find('percentile:') + 12
end = target.find('ms')
result["latency_90_percentile"] = target[start:end]
print(result)
return result
except Exception as e:
logger.info("trtexec fails...")
return None
def get_latency_result(runtimes, batch_size):
latency_ms = sum(runtimes) / float(len(runtimes)) * 1000.0
latency_variance = numpy.var(runtimes, dtype=numpy.float64) * 1000.0
throughput = batch_size * (1000.0 / latency_ms)
return {
"test_times": len(runtimes),
"latency_variance": "{:.2f}".format(latency_variance),
"latency_90_percentile": "{:.2f}".format(numpy.percentile(runtimes, 90) * 1000.0),
"latency_95_percentile": "{:.2f}".format(numpy.percentile(runtimes, 95) * 1000.0),
"latency_99_percentile": "{:.2f}".format(numpy.percentile(runtimes, 99) * 1000.0),
"average_latency_ms": "{:.2f}".format(latency_ms),
"QPS": "{:.2f}".format(throughput),
}
def get_ort_session_inputs_and_outptus(name, session, ort_input):
sess_inputs = {}
sess_outputs = None
if name == 'BERT-Squad':
unique_ids_raw_output = ort_input[0]
input_ids = ort_input[1]
input_mask = ort_input[2]
segment_ids = ort_input[3]
sess_inputs = {
"unique_ids_raw_output___9:0": unique_ids_raw_output,
"input_ids:0": input_ids[0:1],
"input_mask:0": input_mask[0:1],
"segment_ids:0": segment_ids[0:1]}
sess_outputs = ["unique_ids:0", "unstack:0", "unstack:1"]
elif name == 'BiDAF':
sess_inputs = {
"context_word": ort_input[0],
"context_char": ort_input[2],
"query_word": ort_input[1],
"query_char": ort_input[3]}
sess_outputs = ["start_pos","end_pos"]
elif name == 'Yolov4':
sess_inputs[session.get_inputs()[0].name] = ort_input[0]
sess_outputs = ['Identity:0']
elif name == 'Shufflenet-v2':
sess_inputs[session.get_inputs()[0].name] = ort_input
else:
sess_inputs = {}
for i in range(len(session.get_inputs())):
sess_inputs[session.get_inputs()[i].name] = ort_input[i]
return (sess_inputs, sess_outputs)
def inference_ort(args, name, session, ep, ort_inputs, result_template, repeat_times, batch_size):
runtimes = []
for ort_input in ort_inputs:
sess_inputs, sess_outputs = get_ort_session_inputs_and_outptus(name, session, ort_input)
print("sess_inputs:")
print(sess_inputs)
print("sess_outputs:")
print(sess_outputs)
try:
if args.input_data == "random":
repeat_times = 1 # warn-up run is included in ort_inputs
else:
repeat_times += 1 # add warn-up run
runtime = timeit.repeat(lambda: session.run(sess_outputs, sess_inputs), number=1, repeat=repeat_times)
runtimes += runtime
except Exception as e:
logger.error(e)
return None
print(runtimes)
runtimes[:] = runtimes[1:]
print(runtimes)
result = {}
result.update(result_template)
result.update({"io_binding": False})
result.update(get_latency_result(runtimes, batch_size))
return result
def inference_ort_and_get_prediction(name, session, ort_inputs):
ort_outputs = []
for ort_input in ort_inputs:
sess_inputs, sess_outputs = get_ort_session_inputs_and_outptus(name, session, ort_input)
print("sess_inputs:")
print(sess_inputs)
print("sess_outputs:")
print(sess_outputs)
try:
result = session.run(sess_outputs, sess_inputs)
# handle shape of output differently
if name == 'BERT-Squad':
ort_outputs.append([result])
elif name == 'Shufflenet-v2':
ort_outputs.append(result[0])
else:
ort_outputs.append(result)
except Exception as e:
logger.error(e)
return None
return ort_outputs
# not use for this script yet
def inference_ort_with_io_binding(model, ort_inputs, result_template, repeat_times, batch_size, device='cuda'):
runtimes = []
session = model.get_session()
# Bind inputs and outputs to onnxruntime session
io_binding = session.io_binding()
for ort_input in ort_inputs:
# Bind inputs to device
if model.get_model_name() == 'BERT-Squad':
name = session.get_inputs()[0].name
print(name)
np_input = torch.from_numpy(ort_input[0]).to(device)
io_binding.bind_input(name, np_input.device.type, 0, numpy.longlong, np_input.shape, np_input.data_ptr())
name = session.get_inputs()[1].name
print(name)
np_input = torch.from_numpy(ort_input[1][0:1]).to(device)
io_binding.bind_input(name, np_input.device.type, 0, numpy.longlong, np_input.shape, np_input.data_ptr())
name = session.get_inputs()[2].name
print(name)
np_input = torch.from_numpy(ort_input[2][0:1]).to(device)
io_binding.bind_input(name, np_input.device.type, 0, numpy.longlong, np_input.shape, np_input.data_ptr())
name = session.get_inputs()[3].name
print(name)
np_input = torch.from_numpy(ort_input[3][0:1]).to(device)
io_binding.bind_input(name, np_input.device.type, 0, numpy.longlong, np_input.shape, np_input.data_ptr())
else:
name = session.get_inputs()[0].name
print(ort_input[0])
np_input = torch.from_numpy(ort_input[0]).to(device)
io_binding.bind_input(name, np_input.device.type, 0, numpy.float32, np_input.shape, np_input.data_ptr())
name_o = session.get_outputs()[0].name
io_binding.bind_output(name_o)
# name = session.get_inputs()[0].name
# np_input = torch.from_numpy(numpy.asarray(ort_inputs[0][0])).to(device)
# io_binding.bind_input(name, np_input.device.type, 0, numpy.float32, np_input.shape, np_input.data_ptr())
# name_o = session.get_outputs()[0].name
# io_binding.bind_output(name_o, 'cpu', 0, numpy.float32, session.get_outputs()[0].shape, None)
try:
runtimes = runtimes + timeit.repeat(lambda: session.run_with_iobinding(io_binding), number=1, repeat=repeat_times)
except Exception as e:
logger.error(e)
return None
print(runtimes)
result = {}
result.update(result_template)
result.update({"io_binding": True})
result.update(get_latency_result(runtimes, batch_size))
return result
def get_cuda_version():
from pathlib import Path
home = str(Path.home())
p1 = subprocess.Popen(["find", home+"/.local/lib/", "-name", "onnxruntime_pybind11_state.so"], stdout=subprocess.PIPE)
stdout, sterr = p1.communicate()
stdout = stdout.decode("ascii").strip()
p1 = subprocess.Popen(["ldd", stdout], stdout=subprocess.PIPE)
p2 = subprocess.Popen(["grep", "libcudart.so"], stdin=p1.stdout, stdout=subprocess.PIPE)
stdout, sterr = p2.communicate()
stdout = stdout.decode("ascii").strip()
return stdout
def get_trt_version():
from pathlib import Path
home = str(Path.home())
p1 = subprocess.Popen(["find", home+"/.local/lib/", "-name", "onnxruntime_pybind11_state.so"], stdout=subprocess.PIPE)
stdout, sterr = p1.communicate()
stdout = stdout.decode("ascii").strip()
p1 = subprocess.Popen(["ldd", stdout], stdout=subprocess.PIPE)
p2 = subprocess.Popen(["grep", "libnvinfer.so"], stdin=p1.stdout, stdout=subprocess.PIPE)
stdout, sterr = p2.communicate()
stdout = stdout.decode("ascii").strip()
if stdout == "":
p1 = subprocess.Popen(["find", home+"/.local/lib/", "-name", "libonnxruntime_providers_tensorrt.so"], stdout=subprocess.PIPE)
stdout, sterr = p1.communicate()
stdout = stdout.decode("ascii").strip()
p1 = subprocess.Popen(["ldd", stdout], stdout=subprocess.PIPE)
p2 = subprocess.Popen(["grep", "libnvinfer.so"], stdin=p1.stdout, stdout=subprocess.PIPE)
stdout, sterr = p2.communicate()
stdout = stdout.decode("ascii").strip()
return stdout
# not use for this script temporarily
def tmp_get_trt_version():
p1 = subprocess.Popen(["dpkg", "-l"], stdout=subprocess.PIPE)
p2 = subprocess.Popen(["grep", "TensorRT runtime libraries"], stdin=p1.stdout, stdout=subprocess.PIPE)
stdout, sterr = p2.communicate()
stdout = stdout.decode("ascii").strip()
if stdout != "":
stdout = re.sub('\s+', ' ', stdout)
return stdout
if os.path.exists("/usr/lib/x86_64-linux-gnu/libnvinfer.so"):
p1 = subprocess.Popen(["readelf", "-s", "/usr/lib/x86_64-linux-gnu/libnvinfer.so"], stdout=subprocess.PIPE)
p2 = subprocess.Popen(["grep", "version"], stdin=p1.stdout, stdout=subprocess.PIPE)
stdout, sterr = p2.communicate()
stdout = stdout.decode("ascii").strip()
stdout = stdout.split(" ")[-1]
return stdout
elif os.path.exists("/usr/lib/aarch64-linux-gnu/libnvinfer.so"):
p1 = subprocess.Popen(["readelf", "-s", "/usr/lib/aarch64-linux-gnu/libnvinfer.so"], stdout=subprocess.PIPE)
p2 = subprocess.Popen(["grep", "version"], stdin=p1.stdout, stdout=subprocess.PIPE)
stdout, sterr = p2.communicate()
stdout = stdout.decode("ascii").strip()
stdout = stdout.split(" ")[-1]
return stdout
return ""
#
# The following two lists will be generated.
#
# inputs: [[test_data_0_input_0.pb, test_data_0_input_1.pb ...], [test_data_1_input_0.pb, test_data_1_input_1.pb ...] ...]
# outputs: [[test_data_0_output_0.pb, test_data_0_output_1.pb ...], [test_data_1_output_0.pb, test_data_1_output_1.pb ...] ...]
#
def load_onnx_model_zoo_test_data(path, all_inputs_shape, data_type="fp32"):
print("Parsing test data in {} ...".format(path))
# p1 = subprocess.Popen(["find", path, "-name", "test_data_set*", "-type", "d"], stdout=subprocess.PIPE)
p1 = subprocess.Popen(["find", path, "-name", "test_data*", "-type", "d"], stdout=subprocess.PIPE)
p2 = subprocess.Popen(["sort"], stdin=p1.stdout, stdout=subprocess.PIPE)
stdout, sterr = p2.communicate()
stdout = stdout.decode("ascii").strip()
test_data_set_dir = stdout.split("\n")
print(test_data_set_dir)
inputs = []
outputs = []
shape_flag = False
# if not empty means input shape has been parsed before.
if len(all_inputs_shape) > 0:
shape_flag = True
# find test data path
for test_data_dir in test_data_set_dir:
pwd = os.getcwd()
os.chdir(test_data_dir)
# load inputs
p1 = subprocess.Popen(["find", ".", "-name", "input*"], stdout=subprocess.PIPE)
p2 = subprocess.Popen(["sort"], stdin=p1.stdout, stdout=subprocess.PIPE)
stdout, sterr = p2.communicate()
stdout = stdout.decode("ascii").strip()
input_data = stdout.split("\n")
print(input_data)
input_data_pb = []
for data in input_data:
tensor = onnx.TensorProto()
with open(data, 'rb') as f:
tensor.ParseFromString(f.read())
tensor_to_array = numpy_helper.to_array(tensor)
if data_type == "fp16" and tensor_to_array.dtype == np.dtype(np.float32):
tensor_to_array = tensor_to_array.astype(np.float16)
input_data_pb.append(tensor_to_array)
# print(np.array(input_data_pb[-1]).shape)
if not shape_flag:
all_inputs_shape.append(input_data_pb[-1].shape)
print(all_inputs_shape[-1])
inputs.append(input_data_pb)
print('Loaded {} inputs successfully.'.format(len(inputs)))
# load outputs
p1 = subprocess.Popen(["find", ".", "-name", "output*"], stdout=subprocess.PIPE)
p2 = subprocess.Popen(["sort"], stdin=p1.stdout, stdout=subprocess.PIPE)
stdout, sterr = p2.communicate()
stdout = stdout.decode("ascii").strip()
output_data = stdout.split("\n")
print(output_data)
if len(output_data) > 0 and output_data[0] != '':
output_data_pb = []
for data in output_data:
tensor = onnx.TensorProto()
with open(data, 'rb') as f:
tensor.ParseFromString(f.read())
tensor_to_array = numpy_helper.to_array(tensor)
if data_type == "fp16" and tensor_to_array.dtype == np.dtype(np.float32):
tensor_to_array = tensor_to_array.astype(np.float16)
output_data_pb.append(tensor_to_array)
print(np.array(output_data_pb[-1]).shape)
outputs.append(output_data_pb)
print('Loaded {} outputs successfully.'.format(len(outputs)))
os.chdir(pwd)
return inputs, outputs
def generate_onnx_model_random_input(test_times, ref_input):
inputs = []
for i in range(test_times):
input_data = []
for tensor in ref_input:
shape = tensor.shape
dtype = tensor.dtype
if dtype == np.int8 or \
dtype == np.uint8 or \
dtype == np.int16 or \
dtype == np.uint16 or \
dtype == np.int32 or \
dtype == np.uint32 or \
dtype == np.int64 or \
dtype == np.uint64:
new_tensor = np.random.randint(0, np.max(tensor)+1, shape, dtype)
else:
new_tensor = np.random.random_sample(shape).astype(dtype)
print("original tensor:")
print(tensor)
print("new random tensor:")
print(new_tensor)
print("\n")
input_data.append(new_tensor)
inputs.append(input_data)
return inputs
def validate(all_ref_outputs, all_outputs, decimal):
print('Reference {} results.'.format(len(all_ref_outputs)))
print('Predicted {} results.'.format(len(all_outputs)))
print('decimal {}'.format(decimal))
# print(np.array(all_ref_outputs).shape)
# print(np.array(all_outputs).shape)
try:
for i in range(len(all_outputs)):
ref_outputs = all_ref_outputs[i]
outputs = all_outputs[i]
for j in range(len(outputs)):
ref_output = ref_outputs[j]
output = outputs[j]
# print(ref_output)
# print(output)
# Compare the results with reference outputs up to x decimal places
for ref_o, o in zip(ref_output, output):
# abs(desired-actual) < 1.5 * 10**(-decimal)
np.testing.assert_almost_equal(ref_o, o, decimal)
except Exception as e:
logger.error(e)
return False, e
print('ONNX Runtime outputs are similar to reference outputs!')
return True, None
# not use for this script
def cleanup_files():
files = []
p = subprocess.Popen(["find", ".", "-name", "test_data_set*", "-type", "d"], stdout=subprocess.PIPE)
stdout, sterr = p.communicate()
stdout = stdout.decode("ascii").strip()
files = files + stdout.split("\n")
p = subprocess.Popen(["find", ".", "-name", "*.onnx"], stdout=subprocess.PIPE)
stdout, sterr = p.communicate()
stdout = stdout.decode("ascii").strip()
files = files + stdout.split("\n")
p = subprocess.Popen(["find", ".", "-name", "*.gz"], stdout=subprocess.PIPE)
stdout, sterr = p.communicate()
stdout = stdout.decode("ascii").strip()
files = files + stdout.split("\n")
for f in files:
if "custom_test_data" in f:
print(f)
continue
subprocess.Popen(["rm","-rf", f], stdout=subprocess.PIPE)
def remove_profiling_files(path):
files = []
p = subprocess.Popen(["find", path, "-name", "onnxruntime_profile*"], stdout=subprocess.PIPE)
stdout, sterr = p.communicate()
stdout = stdout.decode("ascii").strip()
files = files + stdout.split("\n")
for f in files:
if "custom_test_data" in f:
continue
subprocess.Popen(["rm","-rf", f], stdout=subprocess.PIPE)
def update_fail_report(fail_results, args, model, ep, e_type, e):
result = {}
result["model"] = model
result["ep"] = ep
result["error type"] = e_type
result["error message"] = re.sub('^\n', '', str(e))
fail_results.append(result)
def update_fail_model(model_ep_fail_map, fail_results, args, model_name, ep, e_type, e):
if not model_name in model_ep_fail_map:
model_ep_fail_map[model_name] = [ep]
else:
if ep not in model_ep_fail_map[model_name]:
model_ep_fail_map[model_name].append(ep)
update_fail_report(fail_results, args, model_name, ep, e_type, e)
# If TRT fails, TRT FP16 should fail as well
if ep == 'TensorrtExecutionProvider':
ep_ = "TensorrtExecutionProvider_fp16"
e_ = "Not benchmarking TRT FP16 since TRT failed already."
update_fail_report(fail_results, args, model_name, ep_, e_type, e_)
model_ep_fail_map[model_name].append(ep_)
def skip_ep(model_name, ep, model_ep_fail_map):
if model_name == 'vision-yolov3' and "fp16" in ep:
return True
if model_name == 'speech' and "fp16" in ep:
return True
if model_name not in model_ep_fail_map:
return False
ep_fail_list = model_ep_fail_map[model_name]
if ep in ep_fail_list:
return True
return False
def read_model_ep_fail_map_from_file(map_file):
with open(map_file) as f:
try:
data = json.load(f)
except Exception as e:
return None
return data
def write_model_ep_fail_map_to_file(model_ep_fail_map):
with open('.model_ep_fail_map.json', 'w') as file:
file.write(json.dumps(model_ep_fail_map)) # use `json.loads` to do the reverse
def get_system_info(info):
info["cuda"] = get_cuda_version()
info["trt"] = get_trt_version()
p = subprocess.Popen(["cat", "/etc/os-release"], stdout=subprocess.PIPE)
stdout, sterr = p.communicate()
stdout = stdout.decode("ascii").strip()
stdout = stdout.split("\n")[:2]
infos = []
for row in stdout:
row = re.sub('=', ': ', row)
row = re.sub('"', '', row)
infos.append(row)
info["linux_distro"] = infos
p = subprocess.Popen(["lscpu"], stdout=subprocess.PIPE)
stdout, sterr = p.communicate()
stdout = stdout.decode("ascii").strip()
stdout = stdout.split("\n")
infos = []
for row in stdout:
if "mode" in row or "Arch" in row or "name" in row:
# row = row.replace(":\s+", ": ")
row = re.sub(': +', ': ', row)
infos.append(row)
info["cpu_info"] = infos
p1 = subprocess.Popen(["lspci", "-v"], stdout=subprocess.PIPE)
p2 = subprocess.Popen(["grep", "NVIDIA"], stdin=p1.stdout, stdout=subprocess.PIPE)
stdout, sterr = p2.communicate()
stdout = stdout.decode("ascii").strip()
stdout = stdout.split("\n")
infos = []
for row in stdout:
row = re.sub('.*:', '', row)
infos.append(row)
info["gpu_info"] = infos
p = subprocess.Popen(["cat", "/proc/meminfo"], stdout=subprocess.PIPE)
stdout, sterr = p.communicate()
stdout = stdout.decode("ascii").strip()
stdout = stdout.split("\n")
infos = []
for row in stdout:
if "Mem" in row:
row = re.sub(': +', ': ', row)
infos.append(row)
info["memory"] = infos
def parse_models_info(path):
models = {}
with open(path) as f:
data = json.load(f)
for row in data:
if 'model_name' in row:
models[row['model_name']] = {}
else:
logger.error('Model name must be provided in models_info.json')
raise
model = models[row['model_name']]
if 'working_directory' in row:
model['working_directory'] = row['working_directory']
else:
logger.error('Model path must be provided in models_info.json')
raise
if 'model_path' in row:
model['model_path'] = row['model_path']
else:
logger.error('Model path must be provided in models_info.json')
raise
if 'test_data_path' in row:
model['test_data_path'] = row['test_data_path']
else:
logger.error('Test data path must be provided in models_info.json')
raise
return models
def convert_model_from_float_to_float16(model_path):
# from onnxmltools.utils.float16_converter import convert_float_to_float16
from onnxmltools.utils import load_model, save_model
from float16 import convert_float_to_float16
onnx_model = load_model(model_path)
new_onnx_model = convert_float_to_float16(onnx_model)
save_model(new_onnx_model, 'new_fp16_model.onnx')
return os.path.join(os.getcwd(), "new_fp16_model.onnx")
def create_session(model_path, providers, session_options):
logger.info(model_path)
try:
session = onnxruntime.InferenceSession(model_path, providers=providers, sess_options=session_options)
return session
except:
logger.info("Use symbolic_shape_infer.py")
try:
new_model_path = model_path[:].replace(".onnx", "_new.onnx")
if not os.path.exists(new_model_path):
subprocess.run("python3 ../symbolic_shape_infer.py --input " + model_path + " --output " + new_model_path + " --auto_merge", shell=True, check=True)
session = onnxruntime.InferenceSession(new_model_path, providers=providers, sess_options=session_options)
return session
except Exception as e:
print(e)
raise
def run_onnxruntime(args, models):
success_results = []
fail_results = []
latency_comparison_map = {} # model -> CUDA/TRT latency
profile_metrics_map = {} # model -> metrics from profiling file
model_ep_fail_map = {} # model -> failing ep
# read failing ep information if file exists
if args.running_mode == 'benchmark':
if os.path.exists('.model_ep_fail_map.json'):
model_ep_fail_map = read_model_ep_fail_map_from_file('.model_ep_fail_map.json')
if args.fp16:
ep_list = ["CUDAExecutionProvider", "TensorrtExecutionProvider", "CUDAExecutionProvider_fp16", "TensorrtExecutionProvider_fp16"]
else:
ep_list = ["CUDAExecutionProvider", "TensorrtExecutionProvider"]
validation_exemption = ["TensorrtExecutionProvider_fp16"]
#######################
# iterate model
#######################
for name, info in models.items():
latency_result = {}
path = info["working_directory"]
pwd = os.getcwd()
if not os.path.exists(path):
os.mkdir(path)
os.chdir(path)
path = os.getcwd()
# cleanup files before running a new inference
if args.running_mode == "validate":
remove_profiling_files(path)
inputs = []
ref_outputs = []
inputs_fp32 = []
ref_outputs_fp32 = []
inputs_fp16 = []
ref_outputs_fp16 = []
all_inputs_shape = [] # use for standalone trt
ep_to_ep_op_map = {} # ep -> { ep -> operator }
profile_already_parsed = set()
#######################
# iterate ep
#######################
for ep in ep_list:
if skip_ep(name, ep, model_ep_fail_map):
continue
ep_ = ep_to_provider_list[ep][0]
if (ep_ not in onnxruntime.get_available_providers()):
logger.error("No {} support".format(ep_))
continue
model_path = info["model_path"]
if "fp16" in ep:
fp16 = True
os.environ["ORT_TENSORRT_FP16_ENABLE"] = "1"
if ep == "CUDAExecutionProvider_fp16":
model_path = convert_model_from_float_to_float16(model_path)
logger.info("\nInitializing {} with float16 enabled to run on {} ...".format(name, ep))
else:
fp16 = False
os.environ["ORT_TENSORRT_FP16_ENABLE"] = "0"
logger.info("\nInitializing {} to run on {} ...".format(name, ep))
test_data_dir = info["test_data_path"]
# read input/output of test data
if fp16 and ep == "CUDAExecutionProvider_fp16":
if not inputs_fp16 or not ref_outputs_fp16:
inputs_fp16, ref_outputs_fp16 = load_onnx_model_zoo_test_data(test_data_dir, all_inputs_shape, "fp16")
inputs = inputs_fp16
ref_outputs = ref_outputs_fp16
else:
if not inputs_fp32 or not ref_outputs_fp32:
inputs_fp32, ref_outputs_fp32 = load_onnx_model_zoo_test_data(test_data_dir, all_inputs_shape)
inputs = inputs_fp32
ref_outputs = ref_outputs_fp32
if args.input_data == "random":
inputs = generate_onnx_model_random_input(args.test_times+1, inputs[0])
#######################################
# benchmark or validation
#######################################
if args.running_mode == 'benchmark':
logger.info("===========================")
logger.info("======== benchmark ========")
logger.info("===========================")
options = onnxruntime.SessionOptions()
options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
# create onnxruntime inference session
try:
sess = create_session(model_path, ep_to_provider_list[ep], options)
except Exception as e:
logger.error(e)
# update_fail_model(model_ep_fail_map, fail_results, args, name, ep, e)
continue
logger.info("[start] Begin to inference {} with {} ...".format(name, ep))
logger.info(sess.get_providers())
if sess:
logger.info("Model inputs nodes:")
for input_meta in sess.get_inputs():
logger.info(input_meta)
logger.info("Model outputs nodes:")
for output_meta in sess.get_outputs():
logger.info(output_meta)
batch_size = 1
result_template = {
"engine": "onnxruntime",
"version": onnxruntime.__version__,
"device": ep,
"fp16": fp16,
"io_binding": False,
"model_name": name,
"inputs": len(sess.get_inputs()),
"batch_size": batch_size,
"sequence_length": 1,
"datetime": str(datetime.now()),}
result = inference_ort(args, name, sess, ep, inputs, result_template, args.test_times, batch_size)
if result:
success_results.append(result)
logger.info(result)
latency_result[ep] = {}
latency_result[ep]["average_latency_ms"] = result["average_latency_ms"]
latency_result[ep]["latency_90_percentile"] = result["latency_90_percentile"]
# get standalone TensorRT perf
if "TensorrtExecutionProvider" in ep and args.trtexec:
result = run_trt_standalone(args.trtexec, model_path, sess.get_inputs(), all_inputs_shape, fp16)
if result and len(result) > 0:
if fp16:
latency_result["Standalone_TRT_fp16"] = result
else:
latency_result["Standalone_TRT"] = result
latency_comparison_map[name] = copy.deepcopy(latency_result)
elif args.running_mode == 'validate':
logger.info("==========================")
logger.info("======== validate ========")
logger.info("==========================")
# enable profiling to generate profiling file for analysis
options = onnxruntime.SessionOptions()
options.enable_profiling = True
options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
time.sleep(1) # avoid to generate same profile file name
# create onnxruntime inference session
try:
sess = create_session(model_path, ep_to_provider_list[ep], options)
except Exception as e:
logger.error(e)
update_fail_model(model_ep_fail_map, fail_results, args, name, ep, 'runtime error', e)
continue
sess.disable_fallback()
logger.info("Start to inference {} with {} ...".format(name, ep))
logger.info(sess.get_providers())
if sess:
logger.info("Model inputs nodes:")
for input_meta in sess.get_inputs():
logger.info(input_meta)
logger.info("Model outputs nodes:")
for output_meta in sess.get_outputs():
logger.info(output_meta)
# run inference and validate the result
#
# currently skip TensorRT float16 validation intentionally
if ep not in validation_exemption:
try:
ort_outputs = inference_ort_and_get_prediction(name, sess, inputs)
decimal = 0
status = validate(ref_outputs, ort_outputs, decimal)
if not status[0]:
update_fail_model(model_ep_fail_map, fail_results, args, name, ep, 'result accuracy issue', status[1])
continue
except Exception as e:
logger.error(e)
update_fail_model(model_ep_fail_map, fail_results, args, name, ep, 'runtime error', e)
continue
# Run inference again. the reason is that some ep like tensorrt
# it takes much longer time to generate graph on first run and
# we need to skip the perf result of that expensive run.
inference_ort_and_get_prediction(name, sess, inputs)
else:
inference_ort_and_get_prediction(name, sess, inputs)
inference_ort_and_get_prediction(name, sess, inputs)
sess.end_profiling()
# get metrics from profiling file
metrics = get_profile_metrics(path, profile_already_parsed)
if metrics:
print(ep)
ep_to_ep_op_map[ep] = metrics
####################
# end of iterate ep
####################
# get percentage of execution time and operators in TRT
if len(ep_to_ep_op_map) > 0:
trt_op_map = None
trt_fp16_op_map = None
cuda_op_map = None
cuda_fp16_op_map = None
for ep, op_map in ep_to_ep_op_map.items():
if ep == "CUDAExecutionProvider":
cuda_op_map = op_map
elif ep == "CUDAExecutionProvider_fp16":
cuda_fp16_op_map = op_map
elif ep == "TensorrtExecutionProvider":
trt_op_map = op_map
elif ep == "TensorrtExecutionProvider_fp16":
trt_fp16_op_map = op_map
profile_metrics_map[name] = {}
if cuda_op_map:
profile_metrics_map[name]['ratio_of_ops_in_cuda_not_fallback_cpu'] = calculate_cuda_op_percentage(cuda_op_map)
if trt_op_map:
total_trt_execution_time, total_execution_time, ratio_of_execution_time_in_trt = calculate_trt_latency_percentage(trt_op_map)
profile_metrics_map[name]['total_trt_execution_time'] = total_trt_execution_time
profile_metrics_map[name]['total_execution_time'] = total_execution_time
profile_metrics_map[name]['ratio_of_execution_time_in_trt'] = ratio_of_execution_time_in_trt
if cuda_op_map:
total_ops_in_trt, total_ops, ratio_of_ops_in_trt = calculate_trt_op_percentage(trt_op_map, cuda_op_map)
profile_metrics_map[name]['total_ops_in_trt'] = total_ops_in_trt
profile_metrics_map[name]['total_ops'] = total_ops
profile_metrics_map[name]['ratio_of_ops_in_trt'] = ratio_of_ops_in_trt
if trt_fp16_op_map:
total_trt_execution_time, total_execution_time, ratio_of_execution_time_in_trt = calculate_trt_latency_percentage(trt_fp16_op_map)
name_ = name + " (FP16)"
profile_metrics_map[name_] = {}
profile_metrics_map[name_]['total_trt_execution_time'] = total_trt_execution_time
profile_metrics_map[name_]['total_execution_time'] = total_execution_time
profile_metrics_map[name_]['ratio_of_execution_time_in_trt'] = ratio_of_execution_time_in_trt
if cuda_fp16_op_map:
total_ops_in_trt, total_ops, ratio_of_ops_in_trt = calculate_trt_op_percentage(trt_fp16_op_map, cuda_op_map)
profile_metrics_map[name_]['total_ops_in_trt'] = total_ops_in_trt
profile_metrics_map[name_]['total_ops'] = total_ops
profile_metrics_map[name_]['ratio_of_ops_in_trt'] = ratio_of_ops_in_trt
if debug:
pp = pprint.PrettyPrinter(indent=4)
print('CUDA operator map:')
pp.pprint(cuda_op_map)
print('TRT operator map:')
pp.pprint(trt_op_map)
print('CUDA FP16 operator map:')
pp.pprint(cuda_fp16_op_map)
print('TRT FP16 operator map:')
pp.pprint(trt_fp16_op_map)
# cleanup_files()
os.chdir(pwd)
# end of model
return success_results, fail_results, latency_comparison_map, model_ep_fail_map, profile_metrics_map
def add_improvement_information(latency_comparison_map):
for key, value in latency_comparison_map.items():
if not ('TensorrtExecutionProvider' in value and 'CUDAExecutionProvider' in value):
continue
trt_latency = float(value['TensorrtExecutionProvider']['average_latency_ms'])
cuda_latency = float(value['CUDAExecutionProvider']['average_latency_ms'])
gain = (cuda_latency - trt_latency)*100/cuda_latency
value["Tensorrt_gain(%)"] = "{:.2f} %".format(gain)
if "TensorrtExecutionProvider_fp16" in value and "CUDAExecutionProvider_fp16" in value:
trt_fp16_latency = float(value['TensorrtExecutionProvider_fp16']['average_latency_ms'])
cuda_fp16_latency = float(value['CUDAExecutionProvider_fp16']['average_latency_ms'])
gain = (cuda_fp16_latency - trt_fp16_latency)*100/cuda_fp16_latency
value["Tensorrt_fp16_gain(%)"] = "{:.2f} %".format(gain)
def output_details(results, csv_filename):
with open(csv_filename, mode="a", newline='') as csv_file:
column_names = [
"engine", "version", "device", "fp16", "io_binding", "model_name", "inputs", "batch_size",
"sequence_length", "datetime", "test_times", "QPS", "average_latency_ms", "latency_variance",
"latency_90_percentile", "latency_95_percentile", "latency_99_percentile"
]
csv_writer = csv.DictWriter(csv_file, fieldnames=column_names)
csv_writer.writeheader()
for result in results:
csv_writer.writerow(result)
logger.info(f"Detail results are saved to csv file: {csv_filename}")
def output_fail(results, csv_filename):
with open(csv_filename, mode="a", newline='') as csv_file:
column_names = [
"model", "ep", "error type", "error message"
]
csv_writer = csv.DictWriter(csv_file, fieldnames=column_names)
csv_writer.writeheader()
for result in results:
csv_writer.writerow(result)
logger.info(f"Failing results are saved to csv file: {csv_filename}")
def output_latency(results, csv_filename):
with open(csv_filename, mode="a", newline='') as csv_file:
column_names = ["Model",
"CUDA \nmean (ms)",
"CUDA \n90th percentile (ms)",
"TRT EP \nmean (ms)",
"TRT EP \n90th percentile (ms)",
"Standalone TRT \nmean (ms)",
"Standalone TRT \n90th percentile (ms)",
"CUDA fp16 \nmean (ms)",
"CUDA fp16 \n90th percentile (ms)",
"TRT EP fp16 \nmean (ms)",
"TRT EP fp16 \n90 percentile (ms)",
"Standalone TRT fp16 \nmean (ms)",
"Standalone TRT fp16 \n90th percentile (ms)",
"TRT EP \ngain (mean) (%)",
"TRT EP fp16 \ngain (mean) (%)"]
csv_writer = csv.writer(csv_file)
csv_writer.writerow(column_names)
for key, value in results.items():
cuda_average = ""
if 'CUDAExecutionProvider' in value and 'average_latency_ms' in value['CUDAExecutionProvider']:
cuda_average = value['CUDAExecutionProvider']['average_latency_ms']
cuda_99_percentile = ""
if 'CUDAExecutionProvider' in value and 'latency_90_percentile' in value['CUDAExecutionProvider']:
cuda_99_percentile = value['CUDAExecutionProvider']['latency_90_percentile']
trt_average = ""
if 'TensorrtExecutionProvider' in value and 'average_latency_ms' in value['TensorrtExecutionProvider']:
trt_average = value['TensorrtExecutionProvider']['average_latency_ms']
trt_99_percentile = ""
if 'TensorrtExecutionProvider' in value and 'latency_90_percentile' in value['TensorrtExecutionProvider']:
trt_99_percentile = value['TensorrtExecutionProvider']['latency_90_percentile']
standalone_trt_average = ""
if 'Standalone_TRT' in value and 'average_latency_ms' in value['Standalone_TRT']:
standalone_trt_average = value['Standalone_TRT']['average_latency_ms']
standalone_trt_99_percentile = ""
if 'Standalone_TRT' in value and 'latency_90_percentile' in value['Standalone_TRT']:
standalone_trt_99_percentile = value['Standalone_TRT']['latency_90_percentile']
cuda_fp16_average = ""
if 'CUDAExecutionProvider_fp16' in value and 'average_latency_ms' in value['CUDAExecutionProvider_fp16']:
cuda_fp16_average = value['CUDAExecutionProvider_fp16']['average_latency_ms']
cuda_fp16_99_percentile = ""
if 'CUDAExecutionProvider_fp16' in value and 'latency_90_percentile' in value['CUDAExecutionProvider_fp16']:
cuda_fp16_99_percentile = value['CUDAExecutionProvider_fp16']['latency_90_percentile']
trt_fp16_average = ""
if 'TensorrtExecutionProvider_fp16' in value and 'average_latency_ms' in value['TensorrtExecutionProvider_fp16']:
trt_fp16_average = value['TensorrtExecutionProvider_fp16']['average_latency_ms']
trt_fp16_99_percentile = ""
if 'TensorrtExecutionProvider_fp16' in value and 'latency_90_percentile' in value['TensorrtExecutionProvider_fp16']:
trt_fp16_99_percentile = value['TensorrtExecutionProvider_fp16']['latency_90_percentile']
standalone_trt_fp16_average = ""
if 'Standalone_TRT_fp16' in value and 'average_latency_ms' in value['Standalone_TRT_fp16']:
standalone_trt_fp16_average = value['Standalone_TRT_fp16']['average_latency_ms']
standalone_trt_fp16_99_percentile = ""
if 'Standalone_TRT_fp16' in value and 'latency_90_percentile' in value['Standalone_TRT_fp16']:
standalone_trt_fp16_99_percentile = value['Standalone_TRT_fp16']['latency_90_percentile']
row = [key,
cuda_average,
cuda_99_percentile,
trt_average,
trt_99_percentile,
standalone_trt_average,
standalone_trt_99_percentile,
cuda_fp16_average,
cuda_fp16_99_percentile,
trt_fp16_average,
trt_fp16_99_percentile,
standalone_trt_fp16_average,
standalone_trt_fp16_99_percentile,
value['Tensorrt_gain(%)'] if 'Tensorrt_gain(%)' in value else " ",
value['Tensorrt_fp16_gain(%)'] if 'Tensorrt_fp16_gain(%)' in value else " "
]
csv_writer.writerow(row)
logger.info(f"CUDA/TRT latency comparison are saved to csv file: {csv_filename}")
def output_ratio(results, csv_filename):
with open(csv_filename, mode="a", newline='') as csv_file:
column_names = ["Model",
"% CUDA operators (not fall back to CPU)",
"Total TRT operators",
"Total operators",
"% TRT operator",
"Total TRT execution time",
"Total execution time",
"% TRT execution time"]
csv_writer = csv.writer(csv_file)
csv_writer.writerow(column_names)
for key, value in results.items():
row = [key,
value['ratio_of_ops_in_cuda_not_fallback_cpu'] if 'ratio_of_ops_in_cuda_not_fallback_cpu' in value else " ",
value['total_ops_in_trt'] if 'total_ops_in_trt' in value else " ",
value['total_ops'] if 'total_ops' in value else " ",
value['ratio_of_ops_in_trt'] if 'ratio_of_ops_in_trt' in value else " ",
value['total_trt_execution_time'] if 'total_trt_execution_time' in value else " ",
value['total_execution_time'] if 'total_execution_time' in value else " ",
value['ratio_of_execution_time_in_trt'] if 'ratio_of_execution_time_in_trt' in value else " ",
]
csv_writer.writerow(row)
logger.info(f"Tensorrt ratio metrics are saved to csv file: {csv_filename}")
def output_system_info(result, csv_filename):
with open(csv_filename, mode="a", newline='') as csv_file:
column_names = [
"cpu_info", "cuda", "gpu_info", "linux_distro", "memory", "trt"
]
csv_writer = csv.DictWriter(csv_file, fieldnames=column_names)
csv_writer.writeheader()
csv_writer.writerow(result)
logger.info(f"System information are saved to csv file: {csv_filename}")
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--model_list_file", required=False, default="model_list.json", help="Model list file.")
parser.add_argument("-r", "--running_mode", required=False, default="benchmark", choices=["validate", "benchmark"], help="Testing mode.")
parser.add_argument("-i", "--input_data", required=False, default="zoo", choices=["zoo", "random"], help="source of input data.")
parser.add_argument("--fp16", required=False, default=True, action="store_true", help="Inlcude Float16 into benchmarking.")
parser.add_argument("--trtexec", required=False, default=None, help="trtexec executable path.")
parser.add_argument("-t",
"--test_times",
required=False,
default=1,
type=int,
help="Number of repeat times to get average inference latency.")
args = parser.parse_args()
return args
def setup_logger(verbose):
if verbose:
coloredlogs.install(level='DEBUG', fmt='[%(filename)s:%(lineno)s - %(funcName)20s()] %(message)s')
else:
coloredlogs.install(fmt='%(message)s')
logging.getLogger("transformers").setLevel(logging.WARNING)
def main():
args = parse_arguments()
setup_logger(False)
pp = pprint.PrettyPrinter(indent=4)
models = parse_models_info(args.model_list_file)
perf_start_time = datetime.now()
success_results, fail_results, latency_comparison_map, failing_models, profile_metrics_map = run_onnxruntime(args, models)
perf_end_time = datetime.now()
logger.info("\nTotal time for running/profiling all models: {}".format(perf_end_time - perf_start_time))
logger.info(list(models.keys()))
logger.info("\nTotal models: {}".format(len(models)))
logger.info("Fail models: {}".format(len(failing_models)))
logger.info("Models FAIL/SUCCESS: {}/{}".format(len(failing_models), len(models) - len(failing_models)))
path = "result"
if not os.path.exists(path):
os.mkdir(path)
path = os.path.join(os.getcwd(), path)
if not os.path.exists(path):
os.mkdir(path)
time_stamp = datetime.now().strftime("%Y%m%d-%H%M%S")
if len(failing_models) > 0:
logger.info("\n============================================")
logger.info("========== Failing Models/EPs ==============")
logger.info("============================================")
logger.info(failing_models)
write_model_ep_fail_map_to_file(failing_models)
if latency_comparison_map:
logger.info("\n=========================================")
logger.info("=========== CUDA/TRT latency ===========")
logger.info("=========================================")
add_improvement_information(latency_comparison_map)
pp.pprint(latency_comparison_map)
csv_filename = f"benchmark_latency_{time_stamp}.csv"
csv_filename = os.path.join(path, csv_filename)
output_latency(latency_comparison_map, csv_filename)
if len(profile_metrics_map) > 0:
logger.info("\n========================================")
logger.info("========== TRT detail metrics ==========")
logger.info("========================================")
pp.pprint(profile_metrics_map)
csv_filename = f"benchmark_ratio_{time_stamp}.csv"
csv_filename = os.path.join(path, csv_filename)
output_ratio(profile_metrics_map, csv_filename)
logger.info("\n===========================================")
logger.info("=========== System information ===========")
logger.info("===========================================")
info = {}
get_system_info(info)
pp.pprint(info)
csv_filename = os.path.join(path, f"system_info_{time_stamp}.csv")
output_system_info(info, csv_filename)
if fail_results:
csv_filename = f"benchmark_fail_{time_stamp}.csv"
csv_filename = os.path.join(path, csv_filename)
output_fail(fail_results, csv_filename)
if success_results:
csv_filename = f"benchmark_success_{time_stamp}.csv"
csv_filename = os.path.join(path, csv_filename)
output_details(success_results, csv_filename)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
from django.db import models
# Create your models here.
class Person(models.Model):
name = models.CharField(max_length=255)
surname = models.CharField(max_length=255)
image = models.ImageField(upload_to='person_images')
|
nilq/baby-python
|
python
|
import os
import click
from flask import current_app
from sqlalchemy import text
from app import db
def register(app):
@app.cli.group()
def translate():
"""Translation and localization commands."""
pass
@translate.command()
@click.argument('lang')
def init(lang):
"""Initialize a new language."""
if os.system('pybabel extract -F babel.cfg -k _l -o messages.pot .'):
raise RuntimeError('extract command failed')
if os.system(
'pybabel init -i messages.pot -d app/translations -l ' + lang):
raise RuntimeError('init command failed')
os.remove('messages.pot')
@translate.command()
def update():
"""Update all languages."""
if os.system('pybabel extract -F babel.cfg -k _l -o messages.pot .'):
raise RuntimeError('extract command failed')
if os.system('pybabel update -i messages.pot -d app/translations'):
raise RuntimeError('update command failed')
os.remove('messages.pot')
@translate.command()
def compile():
"""Compile all languages."""
if os.system('pybabel compile -d app/translations'):
raise RuntimeError('compile command failed')
@app.cli.group()
def sqlite():
"""Run SQLite commands."""
pass
@sqlite.command()
def create():
"""Create the initial database."""
db.drop_all()
db.create_all()
scripts = [
'./docs/scripts/country.sql',
'./docs/scripts/folder.sql',
'./docs/scripts/preference.sql',
'./docs/scripts/profile.sql'
]
for script in scripts:
with open(script) as f:
script_file = f.read()
for statement in script_file.split(';'):
db.session.execute(statement)
@app.cli.group()
def test():
"""Unit testing framework commands."""
pass
@test.command()
def run():
"""Run unit testing framework."""
if os.system('coverage run -m unittest discover'):
raise RuntimeError('')
@test.command()
def report():
"""Report unit testing framework."""
if os.system('coverage report -m'):
raise RuntimeError('')
@app.cli.group()
def doc():
"""Build documentation."""
pass
@doc.command()
def generate():
"Generate entity relationship diagram."
if os.system('./schemaspy/schemaspy'):
raise RuntimeError('')
|
nilq/baby-python
|
python
|
from __future__ import annotations
import attr
__all__ = ("AllowedMentions",)
@attr.s(kw_only=True)
class AllowedMentions:
"""Represents an allowed mentions object.
This is used to determine the allowed mentions of any messages being sent from the client's user.
Parameters
----------
everyone: :class:`bool`
If mentioning everyone is allowed. By default True.
roles: :class:`bool` | :class:`list`
Either a list of role IDs, or a boolean value. Determines the allowed roles to be mentioned.
users: :class:`bool` | :class:`list`
Either a list of user IDs, or a boolean value. Dtermines the allowed users to be mentioned.
replied_user: :class:`bool`
If mentioning the replied user to the message is allowed.
"""
everyone: bool = attr.field(default=True)
roles: bool | list[int] = attr.field(default=True)
users: bool | list[int] = attr.field(default=True)
replied_user: bool = attr.field(default=True)
@classmethod
def none(cls: type[AllowedMentions]) -> AllowedMentions:
"""Creates a :class:`.AllowedMentions` instance that has no
allowed mentions set.
Returns
-------
:class:`.AllowedMentions`
The created instance.
"""
return cls(everyone=False, roles=False, users=False, replied_user=False)
def to_dict(self) -> dict[str, bool | list[int] | list[str]]:
"""Turns the AllowedMentions instance into a usable dict.
Returns
-------
:class:`dict`
The created dict from the AllowedMentions instance.
"""
payload: dict[str, bool | list[int] | list[str]] = {
"everyone": self.everyone,
"replied_user": self.replied_user,
}
parse: list[str] = []
if self.roles is True:
parse.append("roles")
if isinstance(self.roles, list):
payload["roles"] = self.roles
if self.users is True:
parse.append("users")
if isinstance(self.users, list):
payload["users"] = self.users
payload["parse"] = parse
return payload
|
nilq/baby-python
|
python
|
import csv
class PlayerThumbnails:
def getThumbnailsID():
ThumbnailsID = []
with open('Logic/Files/assets/csv_logic/player_thumbnails.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count == 0 or line_count == 1:
line_count += 1
else:
if row[8].lower() == 'true':
ThumbnailsID.append(line_count - 2)
if row[0] != "":
line_count += 1
return ThumbnailsID
|
nilq/baby-python
|
python
|
"""
This file contains all the HTTP routes for basic pages (usually HTML)
"""
from flask import Blueprint, render_template, request
import _config as config
pages = Blueprint('controller', __name__)
@pages.route('/')
def index():
"""
A basic landing page for this web service
:return: HTTP Response (HTML page only)
"""
return render_template(
'page_index.html',
api_endpoint=config.API_ENDPOINT,
request=request
)
@pages.route('/about')
def about():
return render_template(
'page_about.html'
)
|
nilq/baby-python
|
python
|
"""The Snooty documentation writer's tool."""
__version__ = "0.9.6.dev"
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
from subprocess import Popen
from subprocess import PIPE
class ChromeHtmlToPdf():
def __init__(self, url, output_path=None, verbose=False):
'''
Initialize class with google chrome parameters
Params:
Return:
'''
# Base command
self.command = 'google-chrome --headless --disable-gpu'
# Set output path
self.command += ' --print-to-pdf'
if output_path:
self.command += '=' + output_path
# Set url
self.command += ' ' + url
if verbose:
print self.command
def render(self):
''' Actually render html to pdf '''
try:
p = Popen(self.command, shell=True, stdout=PIPE, stderr=PIPE, close_fds=True)
stdout, stderr = p.communicate()
retcode = p.returncode
if retcode == 0:
# call was successful
return
elif retcode < 0:
raise Exception("Terminated by signal: ", -retcode)
else:
raise Exception(stderr)
except OSError, exc:
raise exc
|
nilq/baby-python
|
python
|
from streaming.app import app
from streaming.config import config
from streaming.phishtank.api import Reported
# Topics
reports_topic = app.topic('phishtank-reports')
# Tables
states_table = app.Table('phishtank-state', default=str)
@app.agent(reports_topic)
async def get_phishtank_reports(states):
async for state in states:
try:
phishtank_reports = await Reported('API_KEY').get(states_table['size'], state['size'])
for report in phishtank_reports:
print(report)
# Do things
await update_etag.send(state)
except Exception as err:
print(err)
pass
@app.agent()
async def update_etag(states):
async for state in states:
states_table['etag'] = state['etag']
states_table['size'] = state['size']
@app.task
async def hallo():
phishtank_state = await Reported('API_KEY').latest()
if not states_table['etag'] or states_table['etag'] != phishtank_state['etag']:
await get_phishtank_reports.send(value=phishtank_state)
|
nilq/baby-python
|
python
|
#!/usr/bin/python
# -*- coding: UTF-8 -*
from common.common_time import get_system_datetime
from db.base import DbBase
from db.connection_pool import MysqlConn
import copy
import datetime
from utils.status_code import response_code
from config import configuration
import traceback
import json
import os
from config import config
import logging
logger = logging.getLogger("main." + __name__)
config_name = os.getenv('FLASK_CONFIG') or 'default'
Config = config[config_name]
class DbOrgMgr(DbBase):
"""
User related db operation
"""
'''
0. Default there is an admin account
1. Use default admin account for first login
2. Setup the org_name in the UI portal
if org_name is empty:
then go to org setup
-- fill in the org info
-- setup ldap login
-- setup smtp and airflow url
'''
def __delete_admin(self):
conn = MysqlConn()
try:
db_name = configuration.get_database_name()
condition = "ID=%s and ACCOUNT_NAME =%s" % ('1', 'TorroAdmin')
delete_table_sql = self.create_delete_sql(db_name, "userTable", condition)
self.delete_exec(conn, delete_table_sql)
return response_code.SUCCESS
except Exception as e:
logger.error("FN:__delete_admin error:{}".format(traceback.format_exc()))
return response_code.DELETE_DATA_FAIL
finally:
conn.close()
def __set_ldap(self, ldap_info):
conn = MysqlConn()
try:
host = ldap_info['host']
port = ldap_info['port']
cer_path = ldap_info['cer_path']
use_ssl = ldap_info['use_ssl']
admin = ldap_info['admin_dn']
admin_pwd = ldap_info['admin_pwd']
user_search_base = ldap_info['user_search_base']
user_search_filter = ldap_info['user_search_filter']
display_name_attribute = ldap_info['display_name_attribute']
email_address_attribute = ldap_info['email_address_attribute']
adgroup_attribute = ldap_info['adgroup_attribute']
group_search_base = ldap_info['group_search_base']
group_search_filter = ldap_info['group_search_filter']
group_member_attribute = ldap_info['group_member_attribute']
email_suffix = ldap_info['email_suffix']
create_time = ldap_info['create_time']
time_modify = ldap_info['time_modify']
db_name = configuration.get_database_name()
# insert form
fields = ('HOST', 'PORT', 'CER_PATH', 'USE_SSL', 'ADMIN_DN', 'ADMIN_PWD',
'USER_SEARCH_BASE', 'USER_SERACH_FILTER', 'DISPLAY_NAME_LDAP_ATTRIBUTE', 'EMAIL_ADDRESS_LDAP_ATTRIBUTE', 'USER_ADGROUP_ATTRIBUTE',
'GROUP_SEARCH_BASE', 'GROUP_SERACH_FILTER', 'GROUP_MEMBER_ATTRIBUTE', 'GROUP_EMAIL_SUFFIX',
'CREATE_TIME', 'TIME_MODIFY')
values = (host, port, cer_path, use_ssl, admin, admin_pwd,
user_search_base, user_search_filter, display_name_attribute, email_address_attribute, adgroup_attribute,
group_search_base, group_search_filter, group_member_attribute, email_suffix,
create_time, time_modify)
sql = self.create_insert_sql(db_name, 'ldapTable', '({})'.format(', '.join(fields)), values)
logger.debug('FN:__set_ldap ldapTable_sql:{}'.format(sql))
ldap_id = self.insert_exec(conn, sql, return_insert_id=True)
ldap_info['id'] = ldap_id
data = response_code.SUCCESS
data['data'] = ldap_info
return data
except Exception as e:
logger.error("FN:__set_ldap error:{}".format(traceback.format_exc()))
return response_code.GET_DATA_FAIL
finally:
conn.close()
def __set_smtp(self, smtp_info):
conn = MysqlConn()
try:
smtp_host = smtp_info['smtp_host']
smtp_account = smtp_info['smtp_account']
smtp_mail_box = smtp_info['Smtp_mail_box']
smtp_pwd = smtp_info['smtp_pwd']
smtp_port = smtp_info['smtp_port']
smtp_tls = smtp_info['smtp_tls']
create_time = smtp_info['create_time']
db_name = configuration.get_database_name()
# insert form
fields = ('MAIL_HOST', 'MAIL_USER', 'MAIL_BOX', 'MAIL_PASS', 'PORT', 'USE_TLS', 'CREATE_TIME',
'TIME_MODIFY')
values = (smtp_host, smtp_account, smtp_mail_box, smtp_pwd, smtp_port, smtp_tls, create_time, create_time)
sql = self.create_insert_sql(db_name, 'smtpTable', '({})'.format(', '.join(fields)), values)
logger.debug('FN:__set_smtp smtpTable_sql:{}'.format(sql))
smtp_id = self.insert_exec(conn, sql, return_insert_id=True)
smtp_info['id'] = smtp_id
data = response_code.SUCCESS
data['data'] = smtp_info
return data
except Exception as e:
logger.error("FN:__set_smtp error:{}".format(traceback.format_exc()))
return response_code.GET_DATA_FAIL
finally:
conn.close()
def __delete_ldap(self):
conn = MysqlConn()
try:
db_name = configuration.get_database_name()
condition = "1=1"
delete_table_sql = self.create_delete_sql(db_name, "ldapTable", condition)
logger.debug('FN:__delete_ldap delete_ldapTable_sql:{}'.format(sql))
self.delete_exec(conn, delete_table_sql)
return response_code.SUCCESS
except Exception as e:
logger.error("FN:__delete_ldap error:{}".format(traceback.format_exc()))
return response_code.DELETE_DATA_FAIL
finally:
conn.close()
def __delete_smtp(self):
conn = MysqlConn()
try:
db_name = configuration.get_database_name()
condition = "1=1"
delete_table_sql = self.create_delete_sql(db_name, "smtpTable", condition)
logger.debug('FN:__delete_smtp delete_smtpTable_sql:{}'.format(sql))
self.delete_exec(conn, delete_table_sql)
return response_code.SUCCESS
except Exception as e:
logger.error("FN:__delete_smtp error:{}".format(traceback.format_exc()))
return response_code.DELETE_DATA_FAIL
finally:
conn.close()
def __set_org(self, org_info):
conn = MysqlConn()
try:
admin_group = org_info['admin_group']
visitor_group = org_info['base_group']
org_name = org_info['org_name']
airflow_url = org_info['airflow_url']
create_time = org_info['create_time']
des = org_info['des']
db_name = configuration.get_database_name()
# insert org
fields = ('ORG_NAME', 'AIRFLOW_URL', 'CREATE_TIME', 'DES', 'PROJECT_NAME')
values = (org_name, airflow_url, create_time, des, Config.DEFAULT_PROJECT)
sql = self.create_insert_sql(db_name, 'orgTable', '({})'.format(', '.join(fields)), values)
logger.debug('FN:__set_org orgTable_sql:{}'.format(sql))
org_id = self.insert_exec(conn, sql, return_insert_id=True)
select_condition = "GROUP_MAIL='%s' " % admin_group
select_table_sql = self.create_select_sql(db_name, "adgroupTable", "*", select_condition)
ad_group_info = self.execute_fetch_one(conn, select_table_sql)
if ad_group_info:
admin_group_id = ad_group_info['ID']
else:
# insert admin group
fields = ('GROUP_MAIL', 'CREATE_TIME', 'DES')
values = (admin_group, create_time, des)
sql = self.create_insert_sql(db_name, 'adgroupTable', '({})'.format(', '.join(fields)), values)
logger.debug('FN:__set_org adgroupTable_sql:{}'.format(sql))
admin_group_id = self.insert_exec(conn, sql, return_insert_id=True)
# insert org_to_adgroupTable
fields = ('ORG_ID', 'AD_GROUP_ID', 'ROLE_LIST')
values = (org_id, admin_group_id, json.dumps(['admin']).replace('\\', '\\\\'))
sql = self.create_insert_sql(db_name, 'org_to_adgroupTable', '({})'.format(', '.join(fields)), values)
logger.debug('FN:__set_org org_to_adgroupTable_sql:{}'.format(sql))
self.insert_exec(conn, sql, return_insert_id=True)
select_condition = "GROUP_MAIL='%s' " % visitor_group
select_table_sql = self.create_select_sql(db_name, "adgroupTable", "*", select_condition)
logger.debug('FN:__set_org adgroupTable_sql:{}'.format(sql))
ad_group_info = self.execute_fetch_one(conn, select_table_sql)
if ad_group_info:
visitor_group_id = ad_group_info['ID']
# insert visitor group
else:
fields = ('GROUP_MAIL', 'CREATE_TIME', 'DES')
values = (visitor_group, create_time, des)
sql = self.create_insert_sql(db_name, 'adgroupTable', '({})'.format(', '.join(fields)), values)
logger.debug('FN:__set_org adgroupTable_sql:{}'.format(sql))
visitor_group_id = self.insert_exec(conn, sql, return_insert_id=True)
# insert org_to_adgroupTable
fields = ('ORG_ID', 'AD_GROUP_ID', 'ROLE_LIST')
values = (org_id, visitor_group_id, json.dumps(['viewer']).replace('\\', '\\\\'))
sql = self.create_insert_sql(db_name, 'org_to_adgroupTable', '({})'.format(', '.join(fields)), values)
logger.debug('FN:__set_org org_to_adgroupTable_sql:{}'.format(sql))
self.insert_exec(conn, sql, return_insert_id=True)
org_info['org_id'] = org_id
org_info['admin_id'] = admin_group_id
org_info['visitor_id'] = visitor_group_id
data = response_code.SUCCESS
data['data'] = org_info
return data
except Exception as e:
logger.error("FN:__set_org error:{}".format(traceback.format_exc()))
return response_code.GET_DATA_FAIL
finally:
conn.close()
def __delete_org(self):
conn = MysqlConn()
try:
db_name = configuration.get_database_name()
condition = "1=1"
delete_table_sql = self.create_delete_sql(db_name, "orgTable", condition)
logger.debug('FN:__delete_org delete_orgTable_sql:{}'.format(delete_table_sql))
self.delete_exec(conn, delete_table_sql)
return response_code.SUCCESS
except Exception as e:
logger.error("FN:__delete_org error:{}".format(traceback.format_exc()))
return response_code.DELETE_DATA_FAIL
finally:
conn.close()
def __delete_adgroup_to_org(self, org_id=None):
conn = MysqlConn()
try:
db_name = configuration.get_database_name()
if not org_id:
select_table_sql = self.create_select_sql(db_name, "orgTable", "*")
org_id = self.execute_fetch_one(conn, select_table_sql)['ID']
# select_condition = "ORG_ID=%s" % org_id
# select_table_sql = self.create_select_sql(db_name, "org_to_adgroupTable", "*", select_condition)
# ad_group_infos = self.execute_fetch_all(conn, select_table_sql)
# for ad_group_info in ad_group_infos:
# ad_group_id = ad_group_info['AD_GROUP_ID']
# ad_condition = "ID=%s" % ad_group_id
# delete_table_sql = self.create_delete_sql(db_name, "adgroupTable", ad_condition)
# # print('delete_table_sql ', delete_table_sql)
# self.delete_exec(conn, delete_table_sql)
delete_condition = "1=1"
delete_table_sql = self.create_delete_sql(db_name, "org_to_adgroupTable", delete_condition)
logger.debug('FN:__delete_adgroup_to_org delete_org_to_adgroupTable_sql:{}'.format(delete_table_sql))
self.delete_exec(conn, delete_table_sql)
return response_code.SUCCESS
except Exception as e:
logger.error("FN:__delete_adgroup_to_org error:{}".format(traceback.format_exc()))
return response_code.DELETE_DATA_FAIL
finally:
conn.close()
def add_new_org_setting(self, org):
conn = MysqlConn()
try:
db_name = configuration.get_database_name()
org_info = {}
org_info['admin_group'] = org['admin_group']
org_info['base_group'] = org['base_group']
org_info['org_name'] = org['org_name']
org_info['airflow_url'] = org['airflow_url']
create_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
org_info['create_time'] = create_time
org_info['des'] = org['des']
ldap_info = {}
ldap_info['host'] = org['host']
ldap_info['port'] = org['port']
ldap_info['cer_path'] = org['cer_path']
ldap_info['use_ssl'] = org['use_ssl']
ldap_info['admin_dn'] = org['admin_dn']
ldap_info['admin_pwd'] = org['admin_pwd']
ldap_info['user_search_base'] = org['user_search_base']
ldap_info['user_search_filter'] = org['user_search_filter']
ldap_info['display_name_attribute'] = org['display_name_attribute']
ldap_info['email_address_attribute'] = org['email_address_attribute']
ldap_info['adgroup_attribute'] = org['adgroup_attribute']
ldap_info['group_search_base'] = org['group_search_base']
ldap_info['group_search_filter'] = org['group_search_filter']
ldap_info['group_member_attribute'] = org['group_member_attribute']
ldap_info['email_suffix'] = org['email_suffix']
ldap_info['create_time'] = create_time
ldap_info['time_modify'] = create_time
smtp_info = {}
smtp_info['smtp_host'] = org['smtp_host']
smtp_info['smtp_account'] = org['smtp_account']
smtp_info['Smtp_mail_box'] = org['Smtp_mail_box']
smtp_info['smtp_pwd'] = org['smtp_pwd']
smtp_info['smtp_port'] = org['smtp_port']
smtp_info['smtp_tls'] = org['smtp_tls']
smtp_info['create_time'] = create_time
sql = self.create_select_sql(db_name, 'ldapTable', '*')
ldap_infos = self.execute_fetch_all(conn, sql)
if ldap_infos:
self.__delete_adgroup_to_org()
self.__delete_ldap()
self.__delete_org()
self.__delete_smtp()
# data = response_code.ADD_DATA_FAIL
# return data
# sql = self.create_select_sql(db_name, 'orgTable', '*')
# org_infos = self.execute_fetch_all(conn, sql)
# if org_infos:
# data = response_code.ADD_DATA_FAIL
# return data
org_insert = self.__set_org(org_info)
ldap_insert = self.__set_ldap(ldap_info)
smtp_insert = self.__set_smtp(smtp_info)
data = response_code.SUCCESS
# self.__delete_admin()
org['org_id'] = org_insert['data']['org_id']
org['ldap_id'] = ldap_insert['data']['id']
org['smtp_id'] = smtp_insert['data']['id']
data['data'] = org
return data
except Exception as e:
logger.error("FN:add_new_org_setting error:{}".format(traceback.format_exc()))
return response_code.GET_DATA_FAIL
finally:
conn.close()
def get_ldap_info(self):
conn = MysqlConn()
try:
db_name = configuration.get_database_name()
sql = self.create_select_sql(db_name, 'ldapTable', '*')
ldap_info = self.execute_fetch_one(conn, sql)
if ldap_info:
data = response_code.SUCCESS
data['data'] = ldap_info
else:
data = response_code.GET_DATA_FAIL
return data
except Exception as e:
logger.error("FN:get_ldap_info error:{}".format(traceback.format_exc()))
return response_code.GET_DATA_FAIL
finally:
conn.close()
# get org info
def get_org_info(self):
conn = MysqlConn()
try:
db_name = configuration.get_database_name()
sql = self.create_select_sql(db_name, 'orgTable', '*')
logger.debug('FN:get_org_info orgTable_sql:{}'.format(sql))
org_info = self.execute_fetch_one(conn, sql)
if org_info:
org_id = org_info['ID']
db_name = configuration.get_database_name()
condition = "ORG_ID=%s " % (org_id)
relations = [{"table_name": "adgroupTable", "join_condition": "adgroupTable.ID=org_to_adgroupTable.AD_GROUP_ID"}]
sql = self.create_get_relation_sql(db_name, 'org_to_adgroupTable', '*', relations, condition)
ad_group_info = self.execute_fetch_all(conn, sql)
org_info['ad_group_list'] = ad_group_info
data = response_code.SUCCESS
data['data'] = org_info
else:
data = response_code.GET_DATA_FAIL
return data
except Exception as e:
logger.error("FN:get_org_info error:{}".format(traceback.format_exc()))
return response_code.GET_DATA_FAIL
finally:
conn.close()
# get org info
def get_org_info_by_id(self, id):
conn = MysqlConn()
try:
db_name = configuration.get_database_name()
condition = "ID=%s " % (id)
sql = self.create_select_sql(db_name, 'orgTable', '*', condition)
logger.debug('FN:get_org_info_by_id orgTable_sql:{}'.format(sql))
org_info = self.execute_fetch_one(conn, sql)
if org_info:
data = response_code.SUCCESS
data['data'] = org_info
else:
data = response_code.GET_DATA_FAIL
return data
except Exception as e:
logger.error("FN:get_org_info_by_id error:{}".format(traceback.format_exc()))
return response_code.GET_DATA_FAIL
finally:
conn.close()
# modify org info
def update_org_info(self, org):
conn = MysqlConn()
try:
db_name = configuration.get_database_name()
data = self.get_org_info_by_id(org['id'])
if data['code'] != 200:
return response_code.UPDATE_DATA_FAIL
logger.debug("FN:update_org_info data".format(data))
self.__delete_adgroup_to_org()
self.__delete_ldap()
self.__delete_org()
self.__delete_smtp()
org_info = {}
org_info['admin_group'] = org['admin_group']
org_info['base_group'] = org['base_group']
org_info['org_name'] = org['org_name']
org_info['airflow_url'] = org['airflow_url']
create_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
org_info['create_time'] = create_time
org_info['des'] = org['des']
ldap_info = {}
ldap_info['host'] = org['host']
ldap_info['port'] = org['port']
ldap_info['cer_path'] = org['cer_path']
ldap_info['use_ssl'] = org['use_ssl']
ldap_info['admin_dn'] = org['admin_dn']
ldap_info['admin_pwd'] = org['admin_pwd']
ldap_info['user_search_base'] = org['user_search_base']
ldap_info['user_search_filter'] = org['user_search_filter']
ldap_info['display_name_attribute'] = org['display_name_attribute']
ldap_info['email_address_attribute'] = org['email_address_attribute']
ldap_info['adgroup_attribute'] = org['adgroup_attribute']
ldap_info['group_search_base'] = org['group_search_base']
ldap_info['group_search_filter'] = org['group_search_filter']
ldap_info['group_member_attribute'] = org['group_member_attribute']
ldap_info['email_suffix'] = org['email_suffix']
ldap_info['create_time'] = create_time
ldap_info['time_modify'] = create_time
smtp_info = {}
smtp_info['smtp_host'] = org['smtp_host']
smtp_info['smtp_account'] = org['smtp_account']
smtp_info['smtp_pwd'] = org['smtp_pwd']
smtp_info['smtp_port'] = org['smtp_port']
smtp_info['smtp_tls'] = org['smtp_tls']
smtp_info['create_time'] = create_time
sql = self.create_select_sql(db_name, 'ldapTable', '*')
logger.debug('FN:update_org_info ldapTable_sql:{}'.format(sql))
ldap_infos = self.execute_fetch_all(conn, sql)
if ldap_infos:
data = response_code.ADD_DATA_FAIL
return data
sql = self.create_select_sql(db_name, 'orgTable', '*')
logger.debug('FN:update_org_info orgTable_sql:{}'.format(sql))
org_infos = self.execute_fetch_all(conn, sql)
if org_infos:
data = response_code.ADD_DATA_FAIL
return data
org_insert = self.__set_org(org_info)
ldap_insert = self.__set_ldap(ldap_info)
smtp_insert = self.__set_smtp(smtp_info)
data = response_code.SUCCESS
self.__delete_admin()
org['org_id'] = org_insert['data']['org_id']
org['ldap_id'] = ldap_insert['data']['id']
org['smtp_id'] = smtp_insert['data']['id']
data['data'] = org
return data
except Exception as e:
logger.error("FN:update_org_info error:{}".format(traceback.format_exc()))
return response_code.GET_DATA_FAIL
finally:
conn.close()
def get_roles_info(self):
conn = MysqlConn()
try:
db_name = configuration.get_database_name()
sql = self.create_select_sql(db_name, 'roleTable', '*')
logger.debug('FN:get_roles_info roleTable_sql:{}'.format(sql))
org_info = self.execute_fetch_all(conn, sql)
if org_info:
data = response_code.SUCCESS
data['data'] = org_info
else:
data = response_code.GET_DATA_FAIL
return data
except Exception as e:
logger.error("FN:get_roles_info error:{}".format(traceback.format_exc()))
return response_code.GET_DATA_FAIL
finally:
conn.close()
def get_smtp(self):
conn = MysqlConn()
try:
db_name = configuration.get_database_name()
sql = self.create_select_sql(db_name, 'smtpTable', '*')
logger.debug('FN:get_smtp smtpTable_sql:{}'.format(sql))
smtp_info = self.execute_fetch_one(conn, sql)
if not smtp_info:
return None, None, None, None, None, None
else:
mail_host = smtp_info['MAIL_HOST']
mail_user = smtp_info['MAIL_USER']
mail_box = smtp_info['MAIL_BOX']
mail_pass = smtp_info['MAIL_PASS']
port = smtp_info['PORT']
is_tls = smtp_info['USE_TLS']
return mail_host, mail_user, mail_box, mail_pass, port, is_tls
except Exception as e:
logger.error("FN:get_smtp error:{}".format(traceback.format_exc()))
return None, None, None, None, None, None
finally:
conn.close()
def offline_ad_group(self, account_id):
conn = MysqlConn()
try:
db_name = configuration.get_database_name()
# db_name2 = configuration.get_database_name('DB')
condition = 'ACCOUNT_ID="%s"' % (account_id)
user_fields = '*'
sql = self.create_select_sql(db_name, 'userTable', user_fields, condition=condition)
logger.debug('FN:offline_ad_group userTable_sql:{}'.format(sql))
user_info = self.execute_fetch_one(conn, sql)
ad_group_list = json.loads(user_info.get('GROUP_LIST', "[]"), strict=False)
logger.debug('FN:offline_ad_group ad_group_list:{}'.format(ad_group_list))
return ad_group_list
except Exception as e:
logger.error("FN:offline_ad_group error:{}".format(traceback.format_exc()))
return None, None
finally:
conn.close()
def get_user_cn(self, account_id):
conn = MysqlConn()
try:
db_name = configuration.get_database_name()
# db_name2 = configuration.get_database_name('DB')
condition = 'ACCOUNT_ID="%s"' % (account_id)
user_fields = '*'
sql = self.create_select_sql(db_name, 'userTable', user_fields, condition=condition)
logger.debug('FN:get_user_cn userTable_sql:{}'.format(sql))
user_info = self.execute_fetch_one(conn, sql)
account_cn = user_info.get('ACCOUNT_CN', None)
logger.debug('FN:get_user_cn ACCOUNT_CN:{}'.format(sql))
return account_cn
except Exception as e:
logger.error("FN:get_user_cn error:{}".format(traceback.format_exc()))
return None, None
finally:
conn.close()
# get airflow info
def get_airflow_url(self):
conn = MysqlConn()
try:
db_name = configuration.get_database_name()
sql = self.create_select_sql(db_name, 'orgTable', 'AIRFLOW_URL')
logger.debug('FN:get_airflow_url orgTable_sql:{}'.format(sql))
org_info = self.execute_fetch_one(conn, sql)
if org_info:
return org_info['AIRFLOW_URL']
else:
return ''
except Exception as e:
logger.error("FN:get_airflow_url error:{}".format(traceback.format_exc()))
return ''
finally:
conn.close()
def insert_notification(self, emails, input_form_id, history_id, notify_msg):
conn = MysqlConn()
try:
db_name = configuration.get_database_name()
notify_id_list = []
create_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
emails = list(set(emails))
logger.debug('FN:insert_notification emails:{} notify_msg'.format(emails, notify_msg))
for email in emails:
values = (email, input_form_id, history_id, notify_msg, 0, create_time)
fields = ('account_id', 'input_form_id', 'history_id', 'comment', 'is_read', 'create_time')
sql = self.create_insert_sql(db_name, 'inputNotifyTable', '({})'.format(', '.join(fields)), values)
notify_id = self.insert_exec(conn, sql, return_insert_id=True)
notify_id_list.append(str(notify_id))
return notify_id_list
except Exception as e:
logger.error("FN:insert_notification error:{}".format(traceback.format_exc()))
return []
finally:
conn.close()
org_mgr = DbOrgMgr()
|
nilq/baby-python
|
python
|
from .api import process_large_corpus, process_small_corpus, \
process_belscript, process_pybel_graph, process_json_file, \
process_pybel_neighborhood, process_cbn_jgif_file, \
process_bel_stmt
|
nilq/baby-python
|
python
|
from distutils.core import setup
version = "1.2"
setup(
name='chainee',
packages=['chainee'],
version=version,
license='MIT',
description='Chain your predicates, easy way.',
author='Yaroslav Pankovych',
author_email='flower.moor@gmail.com',
url='https://github.com/ypankovych/chainee',
download_url=f'https://github.com/ypankovych/chainee/archive/refs/tags/{version}.tar.gz',
keywords=['chain', 'easy', 'predicate'],
long_description=__doc__,
Install_requires=["anytree"],
classifiers=[
'Topic :: Utilities',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
'Development Status :: 5 - Production/Stable',
'Operating System :: OS Independent',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: Implementation :: CPython'
],
)
|
nilq/baby-python
|
python
|
# The idea here is to store all the different things you need for Code in one module
# Code can then import the module and call whatever functions are needed
# R. Sheehan 27 - 10 - 2020
MOD_NAME_STR = "Measurement"
# import the necessary modules
import board
import time
import digitalio
from analogio import AnalogOut
from analogio import AnalogIn
import supervisor # for listening to serial ports
# Define the names of the pins being written to and listened to
Vout = AnalogOut(board.A0)
Vin1 = AnalogIn(board.A1)
Vin2 = AnalogIn(board.A2)
Vin3 = AnalogIn(board.A3)
Vin4 = AnalogIn(board.A4)
Vin5 = AnalogIn(board.A5)
# Define the names of the read / write commands
readCmdStr = 'r'; # read data command string for reading max AC input
writeCmdStr = 'w'; # write data command string for writing frequency values
writeAngStrA = 'a'; # write analog output from DCPINA
writeAngStrB = 'b'; # write analog output from DCPINB
readAngStr = 'l'; # read analog input
# Define the constants
Vmax = 3.3 # max AO/AI value
bit_scale = (64*1024) # 64 bits
# Need the following functions to convert voltages to 12-bit readings
# which can be understood by the board
def dac_value(volts):
# convert a voltage to 10-bit value
FUNC_NAME = ".dac_value()" # use this in exception handling messages
ERR_STATEMENT = "Error: " + MOD_NAME_STR + FUNC_NAME
try:
if Vmax > 0.0 and bit_scale > 0:
return int((volts / Vmax)*bit_scale)
else:
ERR_STATEMENT = ERR_STATEMENT + "\nvolt, bit scale factors not defined"
raise Exception
except Exception as e:
print(ERR_STATEMENT)
print(e)
def get_voltage(pin, offset = 0.0):
# convert pin reading to voltage value
# correct voltage by substracting offset
FUNC_NAME = ".get_voltage()" # use this in exception handling messages
ERR_STATEMENT = "Error: " + MOD_NAME_STR + FUNC_NAME
try:
if Vmax > 0.0 and bit_scale > 0:
ret_val = ((pin.value*Vmax)/bit_scale)
return ret_val - offset if offset > 0.0 else ret_val
else:
ERR_STATEMENT = ERR_STATEMENT + "\nvolt, bit scale factors not defined"
raise Exception
except Exception as e:
print(ERR_STATEMENT)
print(e)
# Determine the zero offset using A0 and A1
def get_zero_offset():
# Determine the zero offset using A0 and A1
# Ensure that Vout (A0) is set to zero
# There is a bit of an offset in voltage between the Read and the Write,
# presumably because the pins are floating.
Vout.value = dac_value(0)
time.sleep(0.5)
deltaV = get_voltage(Vin1, 0.0) # offset in the voltage reading at A2 O(10 mV)
# print("deltaV Reading at A1: ", deltaV)
return deltaV;
def Blink():
# The first script that is run using CircuitPy
# Use this to check that everything is operational
# The led near the re-set button should flash count_limit times and switch off
# If this doesn't work something is wrong
# R. Sheehan 19 - 10 - 2020
FUNC_NAME = ".Voltage_Divider_Test()" # use this in exception handling messages
ERR_STATEMENT = "Error: " + MOD_NAME_STR + FUNC_NAME
try:
led = digitalio.DigitalInOut(board.D13)
led.direction = digitalio.Direction.OUTPUT
count = 0
count_limit = 20
while count < count_limit:
led.value = True
time.sleep(0.5)
led.value = False
time.sleep(0.5)
count = count + 1
except Exception as e:
print(ERR_STATEMENT)
print(e)
def Voltage_Divider_Test():
# Check the operation of the voltage-dividers and buffer amplifiers
# that are attached to the various inputs of the board
# R. Sheehan 23 - 10 - 2020
FUNC_NAME = ".Voltage_Divider_Test()" # use this in exception handling messages
ERR_STATEMENT = "Error: " + MOD_NAME_STR + FUNC_NAME
try:
# determine the zero offset
deltaV = get_zero_offset()
# define the voltage-divider scaling
Vscale = (5.0/3.0)
# Read the values here
# Determine the readings at pins A2, A3, A4, A5
Vin1val = get_voltage(Vin1, deltaV)
Vin2val = get_voltage(Vin2, deltaV)
Vin3val = get_voltage(Vin3, deltaV)
Vin4val = get_voltage(Vin4, deltaV)
Vin5val = get_voltage(Vin5, deltaV)
print("deltaV Reading at A1: ", deltaV)
print("Reading at A2: ", Vin2val, ", Real Reading at A2: ", Vin2val*Vscale)
print("Reading at A2: ", Vin3val, ", Real Reading at A2: ", Vin3val*Vscale)
print("Reading at A2: ", Vin4val, ", Real Reading at A2: ", Vin4val*Vscale)
print("Reading at A2: ", Vin5val, ", Real Reading at A2: ", Vin5val*Vscale)
except Exception as e:
print(ERR_STATEMENT)
print(e)
def Current_Source_Measurement():
# this method performs the current-source measurements
# R. Sheehan 23 - 10 - 2020
FUNC_NAME = ".Current_Source_Measurement()" # use this in exception handling messages
ERR_STATEMENT = "Error: " + MOD_NAME_STR + FUNC_NAME
try:
deltaV = get_zero_offset()
# define the voltage-divider scaling
Vscale = (5.0/3.0)
# Set the output value here
Vset = 0.0
R1 = (54.9/1000.0) # units of kOhm
R2 = (10.3/1000.0) # units of kOhm
R3 = (4.8/1000.0) # units of kOhm
ratio = R2 / (R1*R3)
Rload = (10.0/1000.0) # unit of kOhm
Vout.value = dac_value(Vset)
# Read the values here
# Determine the readings at pins A2, A3, A4, A5
Vin1val = get_voltage(Vin1, deltaV)
Vin2val = get_voltage(Vin2, deltaV)
Vin3val = get_voltage(Vin3, deltaV)
Vin4val = get_voltage(Vin4, deltaV)
Vin5val = get_voltage(Vin5, deltaV)
time.sleep(1.0) # give the board time to power everything
# print the real readings
print("\nVset: ",Vin1val)
print("Vctrl: ",Vin2val*Vscale)
print("VR3: ",Vin3val*Vscale - Vin4val*Vscale)
print("IR3 Measured: ",(Vin3val*Vscale - Vin4val*Vscale)/R3)
print("Iload predicted: ",Vin1val * ratio)
print("Vload predicted: ",Vin1val * ratio * Rload)
print("Vload: ", Vin5val*Vscale)
except Exception as e:
print(ERR_STATEMENT)
print(e)
def Cuffe_Iface():
# method that listens for input from LabVIEW and responds appropriately
# John Cuffe 10 - 10 - 2020
# Edited R. Sheehan 27 - 10 - 2020
FUNC_NAME = ".Cuffe_Iface()" # use this in exception handling messages
ERR_STATEMENT = "Error: " + MOD_NAME_STR + FUNC_NAME
try:
while True:
if supervisor.runtime.serial_bytes_available: # Listens for a serial command
command = input()
if command.startswith(writeAngStrA): # If the command starts with writeAngStrA it knows it is an output (Write)
try: # In case user inputs NAN somehow
SetVoltage = float(command[1:]) # Everything after the writeAngStrA is the voltage
if SetVoltage >= 0.0 and SetVoltage < 3.3: # Sets limits on the Output voltage to board specs
Vout.value = dac_value(SetVoltage) # Set the voltage
else:
Vout.value = dac_value(0.0) # Set the voltage to zero in the event of SetVoltage range error
except ValueError:
ERR_STATEMENT = ERR_STATEMENT + '\nVin must be a float'
raise Exception
elif command.startswith(readAngStr): # If the command starts with readAngStr it knows user is looking for Vin. (Read)
# in the scheme I have set up
# A1 measures Ground, A2 measures Vctrl-high, A3 measures Vr3-high, A4 measures Vr3-low, A5 measures Vrl-high
# Measurement at ground can be substracted off where required
print(get_voltage(Vin1), get_voltage(Vin2), get_voltage(Vin3), get_voltage(Vin4), get_voltage(Vin5)) # Prints to serial to be read by LabView
#print(get_voltage(Vin2))
#print(Vin1.value)
else:
print(get_voltage(Vin1), get_voltage(Vin2), get_voltage(Vin3), get_voltage(Vin4), get_voltage(Vin5)) # Prints to serial to be read by LabView
#print(get_voltage(Vin2))
#print(Vin1.value)
except Exception as e:
print(ERR_STATEMENT)
print(e)
def Ser_Test():
# method that prints a data reading continuously
# Trying to get Python to read data from port
# R. Sheehan 30 - 11 - 2020
FUNC_NAME = ".Ser_Test()" # use this in exception handling messages
ERR_STATEMENT = "Error: " + MOD_NAME_STR + FUNC_NAME
try:
print("Test String")
except Exception as e:
print(ERR_STATEMENT)
print(e)
def AC_Read():
# The idea is to investigate exactly what the sample rate of the IBM4 is
# CircuitPython is a ReadOnly filesystem, this means that it cannot create files on its drive
# It can only write info to the console / buffer
# This buffer can be read by LabVIEW
# The aim here is to get the IBM4 to read an AC signal continuously and then write the data being read
# To the console and then read this console data into LabVIEW
# To speed up the process I will not perform any voltage conversions here
# This can be done quite easily in LabVIEW
# I want to be able to read AC signals on at least 2 channels.
# This works to some extent the IBM4 is not able to sample at high enough frequency
# R. Sheehan 30 - 1 - 2020
FUNC_NAME = "AC_Read.()" # use this in exception handling messages
ERR_STATEMENT = "Error: " + MOD_NAME_STR + FUNC_NAME
try:
while True:
if supervisor.runtime.serial_bytes_available: # Listens for a serial command
command = input()
if command.startswith(readCmdStr): # If the command starts with readCmdStr it knows user is looking for Vin. (Read)
count = 0
count_lim = 500
#count_lim = 3e+4 # i think this is close to the upper limit
bit_readings_1 = []
#bit_readings_2 = []
start_time = time.time() # start the clock
while count < count_lim:
#bit_readings_1.append(Vin1.value) # no voltage conversions here
bit_readings_1.append(Vin2.value) # no voltage conversions here
count = count + 1
elapsed_time = (time.time() - start_time)
delta_T = float(elapsed_time / count_lim)
# output the data to the buffer
count = 0
print("Elapsed Time: %(v1)0.15f"%{"v1":elapsed_time})
print("Time-step: %(v1)0.15f"%{"v1":delta_T})
print("Start")
for i in range(0, count_lim, 1):
print(bit_readings_1[i])
print("End")
del bit_readings_1
else:
raise Exception
except Exception as e:
print(ERR_STATEMENT)
print(e)
def AC_Max():
# The IBM4 is not able to accurately sample a sine wave
# The aim now is to see if IBM4 can find the largest value of a sine wave
# in a given reading request period
# R. Sheehan 3 - 11 - 2020
FUNC_NAME = "AC_Max.()" # use this in exception handling messages
ERR_STATEMENT = "Error: " + MOD_NAME_STR + FUNC_NAME
try:
while True:
if supervisor.runtime.serial_bytes_available: # Listens for a serial command
command = input()
if command.startswith(readCmdStr): # If the command starts with readCmdStr it knows user is looking for Vin. (Read)
#print(get_voltage(Vin1), get_voltage(Vin2), get_voltage(Vin3), get_voltage(Vin4), get_voltage(Vin5)) # Prints to serial to be read by LabView
max_val = 0.0
count = 0
count_lim = 500
while count < count_lim:
t1 = get_voltage(Vin2) # read the voltage from the pin
if t1 > max_val: max_val = t1
count = count + 1
time.sleep(0.001)
print(max_val)
else:
raise Exception
except Exception as e:
print(ERR_STATEMENT)
print(e)
def IO_Simple():
# Check to ensure that ports A0 and A1 are working correctly
# R. Sheehan 27 - 10 - 2020
FUNC_NAME = ".IO_Simple()" # use this in exception handling messages
ERR_STATEMENT = "Error: " + MOD_NAME_STR + FUNC_NAME
try:
deltaV = get_zero_offset()
Vset = 2.315
Vout.value = dac_value(Vset) # tell A0 to output Vset Volts
time.sleep(0.1) # pause for 100 ms
print("Reading A1: ",get_voltage(Vin1, 0.0)) # Read the value that is input into A1
except Exception as e:
print(ERR_STATEMENT)
print(e)
|
nilq/baby-python
|
python
|
import pytest
import main
from time import time
import hmac
import hashlib
from unittest.mock import Mock
from unittest.mock import MagicMock
def test_valid_signature():
timestamp = int(time())
slack_signing_secret = 'abcdefg'
main.slack_signing_secret = slack_signing_secret
req_body = 'abcdefgabcdefgabcdefgabcdefg'
signature = create_signature(timestamp,req_body,slack_signing_secret)
headers = {
'X-Slack-Signature': signature,
'X-Slack-Request-Timestamp': str(timestamp)
}
decode = Mock(decode=Mock(return_value=req_body))
req = Mock(get_data=Mock(return_value=decode), headers=headers)
assert main.verify_slack_signature(req) == True
def test_invalid_signature_secret_invalid():
timestamp = int(time())
slack_signing_secret = 'abcdefg'
main.slack_signing_secret = 'qwerty'
req_body = 'abcdefgabcdefgabcdefgabcdefg'
signature = create_signature(timestamp,req_body,slack_signing_secret)
headers = {
'X-Slack-Signature': signature,
'X-Slack-Request-Timestamp': str(timestamp)
}
decode = Mock(decode=Mock(return_value=req_body))
req = Mock(get_data=Mock(return_value=decode), headers=headers)
assert main.verify_slack_signature(req) == False
def test_invalid_signature_old_timestamp():
timestamp = int(time()) - 86400
slack_signing_secret = 'abcdefg'
main.slack_signing_secret = slack_signing_secret
req_body = 'abcdefgabcdefgabcdefgabcdefg'
signature = create_signature(timestamp,req_body,slack_signing_secret)
headers = {
'X-Slack-Signature': signature,
'X-Slack-Request-Timestamp': str(timestamp)
}
decode = Mock(decode=Mock(return_value=req_body))
req = Mock(get_data=Mock(return_value=decode), headers=headers)
assert main.verify_slack_signature(req) == False
def test_invalid_signature_signature_missing():
timestamp = int(time())
req_body = 'abcdefgabcdefgabcdefgabcdefg'
headers = {
'X-Slack-Request-Timestamp': str(timestamp)
}
decode = Mock(decode=Mock(return_value=req_body))
req = Mock(get_data=Mock(return_value=decode), headers=headers)
assert main.verify_slack_signature(req) == False
def test_invalid_signature_timestamp_missing():
req_body = 'abcdefgabcdefgabcdefgabcdefg'
headers = {
'X-Slack-Signature': 'dadsdasadsads'
}
decode = Mock(decode=Mock(return_value=req_body))
req = Mock(get_data=Mock(return_value=decode), headers=headers)
assert main.verify_slack_signature(req) == False
def create_signature(timestamp,req_body,slack_signing_secret):
signature_string = str.encode('v0:' + str(timestamp) + ':' + req_body)
signature = 'v0=' + hmac.new(str.encode(slack_signing_secret),
signature_string,
hashlib.sha256).hexdigest()
return signature
|
nilq/baby-python
|
python
|
from matchbook.apiclient import APIClient
from matchbook.exceptions import MBError
__title__ = 'matchbook'
__version__ = '0.0.9'
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Test Generic Map
"""
import os
import pytest
import numpy as np
import astropy.units as u
from astropy.coordinates import SkyCoord
import matplotlib.pyplot as plt
import sunpy
import sunpy.map
import sunpy.coordinates
import sunpy.data.test
from sunpy.tests.helpers import figure_test
testpath = sunpy.data.test.rootdir
@pytest.fixture
def aia171_test_map():
return sunpy.map.Map(os.path.join(testpath, 'aia_171_level1.fits'))
@pytest.fixture
def heliographic_test_map():
return sunpy.map.Map(os.path.join(testpath, 'heliographic_phase_map.fits.gz'))
@pytest.fixture
def aia171_test_map_with_mask(aia171_test_map):
shape = aia171_test_map.data.shape
mask = np.zeros_like(aia171_test_map.data, dtype=bool)
mask[0:shape[0] // 2, 0:shape[1] // 2] = True
return sunpy.map.Map(np.ma.array(
aia171_test_map.data, mask=mask),
aia171_test_map.meta)
@figure_test
def test_plot_aia171(aia171_test_map):
aia171_test_map.plot()
@figure_test
def test_plot_aia171_clip(aia171_test_map):
aia171_test_map.plot(clip_interval=(5., 99.)*u.percent)
@figure_test
def test_peek_aia171(aia171_test_map):
aia171_test_map.peek()
@figure_test
def test_peek_grid_aia171(aia171_test_map):
aia171_test_map.peek(draw_grid=True)
@figure_test
def test_peek_grid_spacing_aia171(aia171_test_map):
aia171_test_map.peek(draw_grid=(5, 5) * u.deg)
@figure_test
def test_peek_limb_aia171(aia171_test_map):
aia171_test_map.peek(draw_limb=True)
@figure_test
def test_draw_grid_aia171(aia171_test_map):
aia171_test_map.plot()
aia171_test_map.draw_grid(grid_spacing=(30, 40) * u.deg)
@figure_test
def test_peek_grid_limb_aia171(aia171_test_map):
aia171_test_map.peek(draw_grid=True, draw_limb=True)
@figure_test
def test_plot_aia171_nowcsaxes(aia171_test_map):
ax = plt.gca()
aia171_test_map.plot(axes=ax)
@figure_test
def test_rectangle_aia171(aia171_test_map):
aia171_test_map.plot()
bottom_left = SkyCoord(
0 * u.arcsec, 0 * u.arcsec, frame=aia171_test_map.coordinate_frame)
w = 100 * u.arcsec
h = 100 * u.arcsec
aia171_test_map.draw_rectangle(bottom_left, w, h)
@figure_test
def test_plot_masked_aia171(aia171_test_map_with_mask):
aia171_test_map_with_mask.plot()
@figure_test
def test_plot_masked_aia171_nowcsaxes(aia171_test_map_with_mask):
ax = plt.gca()
aia171_test_map_with_mask.plot(axes=ax)
@figure_test
def test_plot_aia171_superpixel(aia171_test_map):
aia171_test_map.superpixel((9, 7) * u.pix, offset=(4, 4) * u.pix).plot()
@figure_test
def test_plot_aia171_superpixel_nowcsaxes(aia171_test_map):
ax = plt.gca()
aia171_test_map.superpixel(
(9, 7) * u.pix, offset=(4, 4) * u.pix).plot(axes=ax)
@figure_test
def test_plot_masked_aia171_superpixel(aia171_test_map_with_mask):
aia171_test_map_with_mask.superpixel(
(9, 7) * u.pix, offset=(4, 4) * u.pix).plot()
@figure_test
def test_plot_masked_aia171_superpixel_nowcsaxes(aia171_test_map_with_mask):
ax = plt.gca()
aia171_test_map_with_mask.superpixel(
(9, 7) * u.pix, offset=(4, 4) * u.pix).plot(axes=ax)
@figure_test
def test_draw_contours_aia(aia171_test_map):
aia171_test_map.plot()
aia171_test_map.draw_contours(u.Quantity(np.arange(1, 100, 10), 'percent'))
@figure_test
def test_heliographic_peek(heliographic_test_map):
heliographic_test_map.peek()
@figure_test
def test_heliographic_rectangle(heliographic_test_map):
heliographic_test_map.plot()
bottom = SkyCoord(
60 * u.deg, 50 * u.deg, frame=heliographic_test_map.coordinate_frame)
w = 13 * u.deg
h = 13 * u.deg
heliographic_test_map.draw_rectangle(bottom, w, h, color='cyan')
@figure_test
def test_heliographic_grid_annotations(heliographic_test_map):
heliographic_test_map.plot()
heliographic_test_map.draw_grid(annotate=False)
|
nilq/baby-python
|
python
|
# -*- Mode: Python -*-
# vi:si:et:sw=4:sts=4:ts=4
# this should be fine, but failed with pychecker 0.8.18 on python 2.6
def func():
d = { 'a': 1, 'b': 2}
print d.keys()
|
nilq/baby-python
|
python
|
import torch
import torch.nn as nn
import torch.nn.init as init
import numpy as np
import math
import torchvision.utils as tvu
from torch.autograd import Variable
import matplotlib.pyplot as plt
def generate_images(generator, centers, num_clusters, alpha, z_dim, device):
idx_centers = torch.from_numpy(np.random.choice(np.arange(num_clusters), 16))
eps = torch.FloatTensor(16, z_dim).uniform_(-alpha, alpha).to(device)
noise = centers[idx_centers] + eps
num_images = noise.shape[0]
rows = int(math.sqrt(num_images))
images = generator(noise).cpu().detach()
grid_img = tvu.make_grid(images, nrow=rows)
return grid_img
def reconstrct_images(model, dataloader, device):
model.eval()
(x, _) = next(iter(dataloader))
x = x.to(device)
x_pre_vq = model._pre_vq_conv(model._encoder(x))
_, x_quantize, _, _ = model._vq_vae(x_pre_vq)
x_hat = model._decoder(x_quantize).cpu().detach()
#grid_img = tvu.make_grid(x_hat, nrow=rows)
x = x[:10].cpu().view(10 * 32, 32)
x_hat = x_hat[:10].cpu().view(10 * 32, 32)
comparison = torch.cat((x, x_hat), 1).view(10 * 32, 2 * 32)
return comparison
def type_tdouble(use_cuda=False):
return torch.cuda.DoubleTensor if use_cuda else torch.DoubleTensor
def init_weights(module):
for m in module.modules():
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
init.xavier_normal_(m.weight.data)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias, 0.0)
elif isinstance(m, nn.Sequential):
for sub_mod in m:
init_weights(sub_mod)
def one_hot(labels, n_class, use_cuda=False):
# Ensure labels are [N x 1]
if len(list(labels.size())) == 1:
labels = labels.unsqueeze(1)
mask = type_tdouble(use_cuda)(labels.size(0), n_class).fill_(0)
# scatter dimension, position indices, fill_value
return mask.scatter_(1, labels, 1)
def to_cuda(tensor):
if isinstance(tensor, torch.Tensor):
tensor = tensor.cuda()
return tensor
def conv_size(H_in, k_size, stride, padd, dil=1):
H_out = np.floor((H_in + 2 * padd - dil * (k_size - 1) - 1) / stride + 1)
return np.int(H_out)
def shuffle(X):
np.take(X, np.random.permutation(X.shape[0]), axis=0, out=X)
def numpy2torch(x):
return torch.from_numpy(x)
def extract_batch(data, it, batch_size):
x = numpy2torch(data[it * batch_size:(it + 1) * batch_size, :, :]) / 255.0
#x.sub_(0.5).div_(0.5)
return Variable(x)
def plot_scatter_outliers(mse_score_inlier, discriminator_score_inlier, mse_score_outlier, discriminator_score_outlier, epoch):
plt.scatter(mse_score_inlier, discriminator_score_inlier)
plt.scatter(mse_score_outlier, discriminator_score_outlier)
plt.xlabel('MSE_distance')
plt.ylabel('Discriminator_distance')
#plt.legend()
plt.grid(True)
plt.savefig('results/inlier_vs_outlier_{}.png'.format(epoch))
plt.close()
def get_mse_score(model, x, device):
N = x.size(0)
x = x.to(device)
_, x_hat, _ = model(x)
x = x.squeeze().cpu().detach().numpy()
x_hat = x_hat.squeeze().cpu().detach().numpy()
mse_score= []
for i in range(N):
distance = np.sum(np.power(x_hat[i].flatten() - x[i].flatten(), 2.0))
mse_score.append(distance)
return mse_score
def plot_mse_outliers(mse_score_inlier, mse_score_outlier, filename):
plt.hist(mse_score_inlier, 10, density=1, facecolor='g', alpha=0.75)
plt.hist(mse_score_outlier, 10, density=1, facecolor='r', alpha=0.75)
plt.xlabel('MSE_distance')
plt.ylabel('Histogram')
#plt.legend()
plt.grid(True)
plt.savefig(filename)
plt.close()
def save_checkpoint(state, filename):
torch.save(state, filename)
def save_img(img, filename):
npimg = img.numpy()
fig = plt.imshow(np.transpose(npimg, (1, 2, 0)), interpolation='nearest')
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
plt.savefig(filename)
plt.close()
|
nilq/baby-python
|
python
|
import os
import numpy as np
def _download_and_extract(url, path, filename):
import shutil, zipfile
import requests
fn = os.path.join(path, filename)
while True:
try:
with zipfile.ZipFile(fn) as zf:
zf.extractall(path)
print('Unzip finished.')
break
except Exception:
os.makedirs(path, exist_ok=True)
f_remote = requests.get(url, stream=True)
sz = f_remote.headers.get('content-length')
assert f_remote.status_code == 200, 'fail to open {}'.format(url)
with open(fn, 'wb') as writer:
for chunk in f_remote.iter_content(chunk_size=1024*1024):
writer.write(chunk)
print('Download finished. Unzipping the file...')
class KGDataset1:
'''Load a knowledge graph with format 1
In this format, the folder with a knowledge graph has five files:
* entities.dict stores the mapping between entity Id and entity name.
* relations.dict stores the mapping between relation Id and relation name.
* train.txt stores the triples in the training set.
* valid.txt stores the triples in the validation set.
* test.txt stores the triples in the test set.
The mapping between entity (relation) Id and entity (relation) name is stored as 'id\tname'.
The triples are stored as 'head_name\trelation_name\ttail_name'.
'''
def __init__(self, path, name, read_triple=True, only_train=False):
url = 'https://s3.us-east-2.amazonaws.com/dgl.ai/dataset/{}.zip'.format(name)
if not os.path.exists(os.path.join(path, name)):
print('File not found. Downloading from', url)
_download_and_extract(url, path, name + '.zip')
path = os.path.join(path, name)
with open(os.path.join(path, 'entities.dict')) as f:
entity2id = {}
for line in f:
eid, entity = line.strip().split('\t')
entity2id[entity] = int(eid)
self.entity2id = entity2id
with open(os.path.join(path, 'relations.dict')) as f:
relation2id = {}
for line in f:
rid, relation = line.strip().split('\t')
relation2id[relation] = int(rid)
self.relation2id = relation2id
# TODO: to deal with contries dataset.
self.n_entities = len(self.entity2id)
self.n_relations = len(self.relation2id)
if read_triple == True:
self.train = self.read_triple(path, 'train')
if only_train == False:
self.valid = self.read_triple(path, 'valid')
self.test = self.read_triple(path, 'test')
def read_triple(self, path, mode):
# mode: train/valid/test
heads = []
tails = []
rels = []
with open(os.path.join(path, '{}.txt'.format(mode))) as f:
for line in f:
h, r, t = line.strip().split('\t')
heads.append(self.entity2id[h])
rels.append(self.relation2id[r])
tails.append(self.entity2id[t])
heads = np.array(heads, dtype=np.int64)
tails = np.array(tails, dtype=np.int64)
rels = np.array(rels, dtype=np.int64)
return (heads, rels, tails)
class KGDataset2:
'''Load a knowledge graph with format 2
In this format, the folder with a knowledge graph has five files:
* entity2id.txt stores the mapping between entity name and entity Id.
* relation2id.txt stores the mapping between relation name relation Id.
* train.txt stores the triples in the training set.
* valid.txt stores the triples in the validation set.
* test.txt stores the triples in the test set.
The mapping between entity (relation) name and entity (relation) Id is stored as 'name\tid'.
The triples are stored as 'head_nid\trelation_id\ttail_nid'.
'''
def __init__(self, path, name, read_triple=True, only_train=False):
url = 'https://s3.us-east-2.amazonaws.com/dgl.ai/dataset/{}.zip'.format(name)
if not os.path.exists(os.path.join(path, name)):
print('File not found. Downloading from', url)
_download_and_extract(url, path, '{}.zip'.format(name))
self.path = os.path.join(path, name)
f_rel2id = os.path.join(self.path, 'relation2id.txt')
with open(f_rel2id) as f_rel:
self.n_relations = int(f_rel.readline()[:-1])
if only_train == True:
f_ent2id = os.path.join(self.path, 'local_to_global.txt')
with open(f_ent2id) as f_ent:
self.n_entities = len(f_ent.readlines())
else:
f_ent2id = os.path.join(self.path, 'entity2id.txt')
with open(f_ent2id) as f_ent:
self.n_entities = int(f_ent.readline()[:-1])
if read_triple == True:
self.train = self.read_triple(self.path, 'train')
if only_train == False:
self.valid = self.read_triple(self.path, 'valid')
self.test = self.read_triple(self.path, 'test')
def read_triple(self, path, mode, skip_first_line=False):
heads = []
tails = []
rels = []
print('Reading {} triples....'.format(mode))
with open(os.path.join(path, '{}.txt'.format(mode))) as f:
if skip_first_line:
_ = f.readline()
for line in f:
h, t, r = line.strip().split('\t')
heads.append(int(h))
tails.append(int(t))
rels.append(int(r))
heads = np.array(heads, dtype=np.int64)
tails = np.array(tails, dtype=np.int64)
rels = np.array(rels, dtype=np.int64)
print('Finished. Read {} {} triples.'.format(len(heads), mode))
return (heads, rels, tails)
def get_dataset(data_path, data_name, format_str):
if data_name == 'Freebase':
dataset = KGDataset2(data_path, data_name)
elif format_str == '1':
dataset = KGDataset1(data_path, data_name)
else:
dataset = KGDataset2(data_path, data_name)
return dataset
def get_partition_dataset(data_path, data_name, format_str, part_id):
part_name = os.path.join(data_name, 'part_'+str(part_id))
if data_name == 'Freebase':
dataset = KGDataset2(data_path, part_name, read_triple=True, only_train=True)
elif format_str == '1':
dataset = KGDataset1(data_path, part_name, read_triple=True, only_train=True)
else:
dataset = KGDataset2(data_path, part_name, read_triple=True, only_train=True)
path = os.path.join(data_path, part_name)
partition_book = []
with open(os.path.join(path, 'partition_book.txt')) as f:
for line in f:
partition_book.append(int(line))
local_to_global = []
with open(os.path.join(path, 'local_to_global.txt')) as f:
for line in f:
local_to_global.append(int(line))
return dataset, partition_book, local_to_global
def get_server_partition_dataset(data_path, data_name, format_str, part_id):
part_name = os.path.join(data_name, 'part_'+str(part_id))
if data_name == 'Freebase':
dataset = KGDataset2(data_path, part_name, read_triple=False, only_train=True)
elif format_str == '1':
dataset = KGDataset1(data_path, part_name, read_triple=False, only_train=True)
else:
dataset = KGDataset2(data_path, part_name, read_triple=False, only_train=True)
path = os.path.join(data_path, part_name)
n_entities = len(open(os.path.join(path, 'partition_book.txt')).readlines())
local_to_global = []
with open(os.path.join(path, 'local_to_global.txt')) as f:
for line in f:
local_to_global.append(int(line))
global_to_local = [0] * n_entities
for i in range(len(local_to_global)):
global_id = local_to_global[i]
global_to_local[global_id] = i
local_to_global = None
return global_to_local, dataset
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
__author__ = 'Chao Wu'
r'''
python C:\Users\cwu\Desktop\Software\Papers\pH_effect\plot_ph_effect_contour\plot_contour.py
'''
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
EPCS_FILE = 'path\to\epcs.xlsx'
DGPS_FILE = 'path\to\dgps.xlsx'
OUT_DIR = 'path\to\output'
ENZYME_CAT = {'glycolysis': ['pgi', 'pfk', 'fbp', 'fba', 'tpi', 'gap', 'pgk', 'gpm', 'eno', 'pyk', 'pps', 'pdh'],
'pentose phosphate pathway': ['zwf', 'pgl', 'gnd', 'rpi', 'rpe', 'tkt1', 'tal', 'tkt2'],
'TCA cycle': ['cs', 'acn1', 'acn2', 'icd', 'kgd', 'suc', 'sdh', 'fum', 'mdh', 'icl', 'mals'],
'glutamate metabolism': ['gs', 'gdh', 'gls', 'gogat'],
'pyruvate metabolism': ['aldh', 'adh', 'pta', 'ak', 'ldh', 'pfl'],
'anaplerotic reactions': ['me1', 'me2', 'ppc', 'ppck'],
'ATP metabolism': ['atps4r', 'atpm']}
PATHWAYS = ['glycolysis', 'pentose phosphate pathway', 'TCA cycle', 'glutamate metabolism', 'pyruvate metabolism', 'anaplerotic reactions']
NPOINTS = 20
def plot_contour(out_dir, filename, data_file, cmap, constant_color):
dataInfo = pd.read_excel(data_file, header = 0, index_col = 0)
X_phin = dataInfo['ph_in'].values.reshape(NPOINTS, NPOINTS).T
Y_phout = dataInfo['ph_out'].values.reshape(NPOINTS, NPOINTS).T
data = dataInfo.iloc[:, 2:].copy()
data = (data.fillna(method = 'bfill') + data.fillna(method = 'ffill'))/2 # impute by mean
if filename == 'epcs':
for i, row in data.iterrows():
if (row < 0).any() or (row >= 1).any() or row.sum() >= 1:
data.loc[i, :] = 0
#data.to_excel(r'C:\Users\cwu\Desktop\all.xlsx')#!!!
if filename == 'epcs':
ndigits = 5
elif filename == 'dgps':
ndigits = 2
# plot per enzyme
for pathway in PATHWAYS:
enzymes = ENZYME_CAT[pathway]
ncols = 3
nrows = int(np.ceil((len(enzymes)+1)/ncols))
fig, axes = plt.subplots(nrows = nrows, ncols = ncols, figsize = (12, nrows*3), sharex = 'all', sharey = 'all')
for i, enz in enumerate(enzymes+['sum']):
if enz == 'sum':
Z = data[enzymes].sum(axis = 1).values.reshape(NPOINTS, NPOINTS).T
else:
Z = data[enz].values.reshape(NPOINTS, NPOINTS).T
if axes.ndim == 2:
indexer = (i//ncols, i%ncols)
elif axes.ndim == 1:
indexer = i
vmin = Z.min().min()
if vmin == 0:
vmin = 0.00001
vmax = Z.max().max()
levels = np.linspace(vmin, vmax, NPOINTS)
if vmax - vmin > 0.0001:
ctf = axes[indexer].contourf(X_phin, Y_phout, Z, vmin = vmin, vmax = vmax, levels = levels,
cmap = plt.cm.get_cmap(cmap).reversed())
cbar = fig.colorbar(mappable = ctf, ax = axes[indexer])
cbarTicks = cbar.get_ticks()
cbarTicksNew = np.linspace(cbarTicks.min(), cbarTicks.max(), 4)
cbar.set_ticks(cbarTicksNew)
cbar.ax.set_yticklabels(cbarTicksNew.round(ndigits))
cbar.ax.tick_params(labelsize = 13)
else:
Z = np.full_like(Z, (vmax + vmin)/2)
ctf = axes[indexer].contourf(X_phin, Y_phout, Z, NPOINTS, colors = constant_color)
cbar = fig.colorbar(mappable = ctf, ax = axes[indexer])
cbar.set_ticks([])
cbar.ax.set_yticklabels([])
cbar.set_label(round((vmax + vmin)/2, ndigits), horizontalalignment = 'left', rotation = 360,
labelpad = 5, fontsize = 13)
axes[indexer].locator_params(axis = 'x', nbins = 3)
axes[indexer].locator_params(axis = 'y', nbins = 4)
axes[indexer].tick_params(labelsize = 15)
axes[indexer].set_xlabel(enz, fontsize = 25)
ax_label = fig.add_subplot(111, frameon = False)
ax_label.tick_params(labelcolor = 'none', top = False, bottom = False, left = False, right = False)
ax_label.set_xlabel('Cytoplasmic pH', labelpad = 50, fontsize = 35)
ax_label.set_ylabel('Periplasmic pH', labelpad = 30, fontsize = 35)
for i in range(len(enzymes)+1, ncols*nrows):
if axes.ndim == 2:
indexer = (i//ncols, i%ncols)
elif axes.ndim == 1:
indexer = i
fig.delaxes(ax = axes[indexer])
os.makedirs(out_dir, exist_ok = True)
#plt.tight_layout()
fig.subplots_adjust(wspace = 0.4, hspace = 0.3)
plt.savefig('%s/%s_%s.jpg' % (out_dir, pathway, filename), dpi = 300, bbox_inches = 'tight')
def main():
plot_contour(OUT_DIR, 'epcs', EPCS_FILE, 'viridis', '#3C528B')
plot_contour(OUT_DIR, 'dgps', DGPS_FILE, 'plasma', '#D8556C')
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from matplotlib.patches import Polygon
import numpy as np
import stader
d = stader.load_aircraft('b747_flight_condition2')
ac = stader.Aircraft(d)
msec = 15
dt = msec/1000
show_state = False
if show_state:
fig = plt.figure(figsize=(16,8))
ax = plt.subplot2grid((6,2), (0,0), rowspan=4)
ax1 = plt.subplot2grid((6,2), (0,1))
ax2 = plt.subplot2grid((6,2), (1,1))
ax3 = plt.subplot2grid((6,2), (2,1))
ax4 = plt.subplot2grid((6,2), (3,1))
ax5 = plt.subplot2grid((6,2), (4,1))
ax6 = plt.subplot2grid((6,2), (5,1))
for ax_ in [ax1, ax2, ax3]:
ax_.set_ylim([-180, 180])
for ax_ in [ax1, ax2, ax3, ax4, ax5, ax6]:
ax_.set_xlim([-60, 0])
else:
fig, ax = plt.subplots()
ax.set_yticklabels([], visible=False)
ax.set_yticks([])
ax.set_xticklabels([], visible=False)
ax.set_xticks([])
ax.set_axis_bgcolor((30.0/255, 144.0/255, 1.0, 1))
ax.plot(0,0, marker='o', markersize=10, color='y')
ax.plot([-.5, -.25], [0, 0], marker=None, linestyle='-', linewidth=5, color='y')
ax.plot([.5, .25], [0, 0], marker=None, linestyle='-', linewidth=5, color='y')
ax.set_ylim(-1, 1)
ax.set_xlim(-1, 1)
gnd = Polygon(((-10,0), (10,0), (10,-10), (-10, -10)), closed=True,
facecolor=(139.0/255, 69.0/255, 19.0/255),
edgecolor='white')
gnd_xy = gnd.get_xy()
ax.add_artist(gnd)
markers = []
orig_xys = []
for tick in range(-50, 51, 5):
x = 0.1 if tick % 10 else 0.25
c = 'k' if tick > 0 else 'w'
if tick == 0:
continue
markers.append(ax.plot([-x, x], [tick/35, tick/35], marker=None,
linestyle='-', linewidth=1, color=c)[0])
orig_xys.append(markers[-1].get_xydata())
inputs = {'elevator':0, 'aileron':0}
tracking = False
def press(event):
global inputs
global tracking
if event.key == 'up':
inputs['elevator'] -= np.deg2rad(1)
if event.key == 'down':
inputs['elevator'] += np.deg2rad(1)
if event.key == 'left':
inputs['aileron'] -= np.deg2rad(1)
if event.key == 'right':
inputs['aileron'] += np.deg2rad(1)
if event.key == 't':
tracking = ~tracking
print(inputs)
frame = 0
track_a = np.array([.5 if i < 6 else .05 for i in range(12)])
track_k = np.array([7, 11, 16, 25, 38, 61, 103, 131, 151, 181, 313, 523])
track_w = np.array([0.18, 0.28, 0.42, 0.65, 0.99, 1.60, 2.70, 3.43, 3.95, 4.74, 8.19, 13.69])
track_p = np.array([-0.29, -1.03, -3.13, 3.08, -0.84, 0.46, -2.74, -2.18, -1.78, -2.26, -1.82, 0.46])
def tracker(self):
global frame
frame += 1
if tracking:
t = frame*dt
track = np.sum(track_a*2*np.pi*track_k/240.0*np.cos(2*np.pi*track_k*t/240.0 + track_p))
track *= 0.025
print(t, (track))
else:
track = 0
ac.update(dt, inputs)
gnd_center = (np.rad2deg(ac.pitch-track))/35
R = np.array(((np.cos(ac.roll), np.sin(ac.roll), 0),
(-np.sin(ac.roll), np.cos(ac.roll), gnd_center),
(0,0,1)))
xy = np.hstack((gnd_xy, np.ones((gnd_xy.shape[0], 1))))
new_xy = R.dot(xy.T).T[:,:2]
gnd.set_xy(new_xy)
for orig_xy, marker in zip(orig_xys, markers):
xy = np.hstack((orig_xy, np.ones((orig_xy.shape[0], 1))))
new_xy = R.dot(xy.T)[:2,:]
marker.set_data(new_xy)
anim = FuncAnimation(fig, tracker, interval=msec, blit=False, repeat=False)
fig.canvas.mpl_connect('key_press_event', press)
plt.show()
|
nilq/baby-python
|
python
|
import os.path
from lsst.meas.base import CircularApertureFluxAlgorithm
config.measurement.load(os.path.join(os.path.dirname(__file__), "apertures.py"))
config.measurement.load(os.path.join(os.path.dirname(__file__), "kron.py"))
config.measurement.load(os.path.join(os.path.dirname(__file__), "convolvedFluxes.py"))
config.load(os.path.join(os.path.dirname(__file__), "cmodel.py"))
|
nilq/baby-python
|
python
|
import pyglet
from pyglet.gl import *
blueDot = pyglet.resource.image('blue-dot.png')
redDot = pyglet.resource.image('red-dot.png')
class Window(pyglet.window.Window):
"""docstring for Window"""
def __init__(self):
print("Init Window")
super(Window, self).__init__(500, 400, vsync=False)
self.fps_display = pyglet.clock.ClockDisplay()
self.dict_objects = {}
redDot.width, redDot.height = 10, 10
blueDot.width, redDot.height = 10, 10
def setLogic(self, logic):
self.logic = logic
self.dict_objects = self.logic.getDictObjects()
def on_key_press(self, symbol, modifiers):
if symbol == pyglet.window.key.ESCAPE:
super(Window, self).close()
else:
print("Key pressed: " + str(symbol))
def on_mouse_press(self, x, y, button, modifiers):
# detect where the click goes (tower -> upgrade menu, building place, etc...)
# maybe a (x,y) grid might be useful instead ob the list so an iteration
# over the list is unnecessary
x = int(x / 50) * 50
y = int(y / 50) * 50
if button == pyglet.window.mouse.LEFT:
print("Left click at (" + str(x) + "," + str(y) + ")")
self.logic.placeTower(x, y)
elif button == pyglet.window.mouse.RIGHT:
print("Right click at (" + str(x) + "," + str(y) + ")")
self.logic.placeMob(x, y)
def redraw(self):
super(Window, self).clear()
self.fps_display.draw()
# late each Tower or Mob carries its own texture and its won draw method?
batch = pyglet.graphics.Batch()
pyglet.gl.glPointSize(3)
for mob in self.dict_objects['mobs']:
"""
pyglet.text.Label("M", font_size=30,
x=(mob[0] + 25), y=(mob[1] + 25),
anchor_x='center', anchor_y='center').draw()
"""
#print("Mob x=%d y=%d" %(mob[0], mob[1]))
#redDot.blit((mob[0] + 25), (mob[1] + 25))
vertex_list = batch.add(1, pyglet.gl.GL_POINTS,None,
('v2i', (mob[0]+25, mob[1]+25)),
('c3B', (0, 255, 0)))
for tower in self.dict_objects['towers']:
"""
pyglet.text.Label("T", font_size=30,
x=(tower[0] + 25), y=(tower[1] + 25),
anchor_x='center', anchor_y='center').draw()
"""
#print("Tower x=%d %s y=%d %s"
# %(tower[0], type(tower[0]), tower[1], type(tower[1])))
#blueDot.blit(tower[0], tower[1])
#redDot.blit(350,200)
vertex_list = batch.add(1, pyglet.gl.GL_POINTS,None,
('v2i', (tower[0]+25, tower[1]+25)),
('c3B', (0, 0, 255)))
batch.draw()
|
nilq/baby-python
|
python
|
# -*- coding:utf-8 -*-
import logging
from flask import request
from flask_restx import Namespace
from app.spider.csdn.csdn import CSDN
from app.spider.toutiao.toutiao_hotnews import ToutiaoNews
from app.api.util.base_resource import BaseResource
from app.api.util.web_response import WebResponse
from app.api.util.web_responsecode import WebResponseCode
log = logging.getLogger(__name__)
nsnews = Namespace('news', description='新闻资讯接口')
@nsnews.route("/toutiao")
class NewsController(BaseResource):
def get(self):
'''
获取头条热点新闻
refresh: 1,0,true,false
last: 最后一次的刷新索引值
:return:
'''
response = WebResponse()
refresh = request.values.get('refresh') in [1, '1', 'true', 'True', True]
last = request.values.get('last') if request.values.get('last') is not None else 0
news = ToutiaoNews().hotnews(refresh, last_max_behot_time=last)
if news:
response.data = {
'result': news.get('data'),
'has_more': news.get('has_more'),
'next': news.get('next')
}
else:
response.code = WebResponseCode.FAILED
return response.tojson()
@nsnews.route("/csdn")
class CSDNController(BaseResource):
def get(self):
'''
获取csdn热点科技资讯
:return:
'''
response = WebResponse()
last = request.values.get('last') if request.values.get('last') is not None else ''
news = CSDN().getHotNews(last)
if news:
response.data = news
else:
response.code = WebResponseCode.FAILED
return response.tojson()
|
nilq/baby-python
|
python
|
"""Singleton Class"""
# standard library
import threading
class Singleton(type):
"""A singleton Metaclass"""
_instances = {}
_lock = threading.Lock()
def __call__(cls, *args, **kwargs):
"""Evoke call method."""
with cls._lock:
if cls not in cls._instances:
cls._instances[cls] = super().__call__(*args, **kwargs)
return cls._instances[cls]
|
nilq/baby-python
|
python
|
def print_two(*args): # :TODO *args are for funcs and argv for inputs
arg1, arg2 = args
print('arg1: %r, arg2: %r' % (arg1, arg2))
def print_two_again(arg1, arg2):
print('arg1: %r, arg2: %r' % (arg1, arg2))
print_two("zdr", "zdr")
print_two_again("zdr", "zdr")
|
nilq/baby-python
|
python
|
import os
import sys
import time
import requests
from py2neo import Graph, Node, Relationship
graph = Graph()
graph.run("CREATE CONSTRAINT ON (u:User) ASSERT u.username IS UNIQUE")
graph.run("CREATE CONSTRAINT ON (t:Tweet) ASSERT t.id IS UNIQUE")
graph.run("CREATE CONSTRAINT ON (h:Hashtag) ASSERT h.name IS UNIQUE")
TWITTER_BEARER = os.environ["TWITTER_BEARER"]
headers = dict(accept="application/json", Authorization="Bearer " + TWITTER_BEARER)
payload = dict(
count=100,
result_type="recent",
lang="en",
q=sys.argv[1]
)
base_url = "https://api.twitter.com/1.1/search/tweets.json?"
def find_tweets(since_id):
payload["since_id"] = since_id
url = base_url + "q={q}&count={count}&result_type={result_type}&lang={lang}&since_id={since_id}".format(**payload)
r = requests.get(url, headers=headers)
tweets = r.json()["statuses"]
return tweets
def upload_tweets(tweets):
for t in tweets:
u = t["user"]
e = t["entities"]
tweet = Node("Tweet", id=t["id"])
graph.merge(tweet)
tweet["text"] = t["text"]
tweet.push()
user = Node("User", username=u["screen_name"])
graph.merge(user)
graph.merge(Relationship(user, "POSTS", tweet))
for h in e.get("hashtags", []):
hashtag = Node("Hashtag", name=h["text"].lower())
graph.merge(hashtag)
graph.merge(Relationship(hashtag, "TAGS", tweet))
for m in e.get('user_mentions', []):
mention = Node("User", username=m["screen_name"])
graph.merge(mention)
graph.merge(Relationship(tweet, "MENTIONS", mention))
reply = t.get("in_reply_to_status_id")
if reply:
reply_tweet = Node("Tweet", id=reply)
graph.merge(reply_tweet)
graph.merge(Relationship(tweet, "REPLY_TO", reply_tweet))
ret = t.get("retweeted_status", {}).get("id")
if ret:
retweet = Node("Tweet", id=ret)
graph.merge(retweet)
graph.merge(Relationship(tweet, "RETWEETS", retweet))
since_id = -1
while True:
try:
tweets = find_tweets(since_id=since_id)
if not tweets:
print("No tweets found.")
time.sleep(60)
continue
since_id = tweets[0].get("id")
upload_tweets(tweets)
print("{} tweets uploaded!".format(len(tweets)))
time.sleep(60)
except Exception as e:
print(e)
time.sleep(60)
continue
|
nilq/baby-python
|
python
|
import os
import shutil
def create_analysis_folder(folder_name):
if not os.path.exists(folder_name):
os.makedirs(folder_name)
shutil.copy('ffield', folder_name)
shutil.copy('parameters', folder_name)
|
nilq/baby-python
|
python
|
from collections import defaultdict
import boto3
import click
from halo import Halo
from termcolor import colored, cprint
from ..app import app
from ..utils import formatted_time_ago
def task_id(task_detail: dict) -> str:
tags = {t["key"]: t["value"] for t in task_detail["tags"]}
try:
return tags["paaws:buildNumber"]
except KeyError:
return task_detail["taskArn"].split("/")[-1]
@click.command()
def ps():
"""Show running containers"""
ecs = boto3.client("ecs")
with Halo(text="fetching container information", spinner="dots"):
tasks = app.get_tasks()
tasks_by_group = defaultdict(list)
task_definitions = {}
for t in tasks:
tasks_by_group[t["group"]].append(t)
if t["taskDefinitionArn"] not in task_definitions:
task_definitions[t["taskDefinitionArn"]] = ecs.describe_task_definition(
taskDefinition=t["taskDefinitionArn"]
)["taskDefinition"]
for group in sorted(tasks_by_group.keys()):
tasks = tasks_by_group[group]
defn = task_definitions[tasks[0]["taskDefinitionArn"]]
print(colored("===", attrs=["dark"]), colored(group, "green"))
for t in tasks:
task_line = [
task_id(t),
" ",
colored("(", "white"),
colored(
"cpu:{cpu} mem:{memory}".format(
cpu=int(t["cpu"]) / 1024, memory=t["memory"]
),
"blue",
attrs=["dark", "bold"],
),
colored(")", "white"),
": ",
t["lastStatus"].lower(),
" ",
]
if "startedAt" in t:
task_line.append(formatted_time_ago(t["startedAt"]))
print("".join(task_line))
for c in t["containers"]:
try:
command = [
o["command"]
for o in t["overrides"]["containerOverrides"]
if o["name"] == c["name"]
][0]
except (KeyError, IndexError):
command = [
cd.get("command", ["[container default cmd]"])
for cd in defn["containerDefinitions"]
if cd["name"] == c["name"]
][0]
print_name = f" {c['name']}:"
indent = len(print_name) + 1
print(print_name, colored(" ".join(command), "white"))
container_line2 = [
" " * indent,
"{image} {status}".format(
image=c["image"].split("/")[-1], status=c["lastStatus"].lower()
),
]
cprint("".join(container_line2), attrs=["dark"])
print("")
|
nilq/baby-python
|
python
|
from setuptools import setup
with open("README.md", "r", encoding="utf-8") as f:
long_description = f.read()
setup(
name="sku",
version="0.2",
description="scikit-learn Utilities",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/mythologic/sku",
author="Max Humber",
author_email="max.humber@gmail.com",
license="MIT",
packages=["sku"],
python_requires=">=3.6",
setup_requires=["setuptools>=38.6.0"],
)
|
nilq/baby-python
|
python
|
# sim_core/views.py
#################
#### imports ####
#################
from app import app
from flask import render_template, Blueprint
from logger import logger
from flask import current_app
################
#### config ####
################
sim_core_blueprint = Blueprint('sim_core', __name__, static_folder='../shared/static/dist', template_folder='./static/dist', url_prefix='/sim', static_url_path="")
################
#### routes ####
################
@sim_core_blueprint.route('/')
def index():
return render_template('sim.html')
|
nilq/baby-python
|
python
|
from unittest import TestCase
from demands.pagination import PaginatedResults, PaginationType
class PaginationTestsMixin(object):
args = (1, 2, 3)
kwargs = {'one': 1, 'two': 2}
def get(self, start, end, *args, **kwargs):
self.assertEqual(args, self.args)
self.assertEqual(kwargs, self.kwargs)
return self.responses[start:end]
def test_iterate_one_undersized_page(self):
self.responses = list(range(5))
r = list(self.psc)
self.assertEqual(r, self.responses)
def test_iterate_multiple_full_pages(self):
self.responses = list(range(20))
r = list(self.psc)
self.assertEqual(r, self.responses)
def test_iterate_multiple_pages(self):
self.responses = list(range(25))
r = list(self.psc)
self.assertEqual(r, self.responses)
class PagePaginationTest(TestCase, PaginationTestsMixin):
def get(self, *args, **kwargs):
page = kwargs.pop('page')
page_size = kwargs.pop('page_size')
start = (page - 1) * page_size
end = start + page_size
return super(PagePaginationTest, self).get(start, end, *args, **kwargs)
def setUp(self):
self.psc = PaginatedResults(
self.get, args=self.args, kwargs=self.kwargs, page_size=10,
results_key=None)
class PagePaginationTestWithNestedResults(PagePaginationTest):
def get(self, *args, **kwargs):
results = super(PagePaginationTestWithNestedResults, self).get(
*args, **kwargs)
return {'results': results}
def setUp(self):
self.psc = PaginatedResults(
self.get, args=self.args, kwargs=self.kwargs, page_size=10)
class ItemPaginationTest(TestCase, PaginationTestsMixin):
def get(self, *args, **kwargs):
start = kwargs.pop('offset')
end = start + kwargs.pop('limit')
return super(ItemPaginationTest, self).get(start, end, *args, **kwargs)
def setUp(self):
self.psc = PaginatedResults(
self.get, args=self.args, kwargs=self.kwargs, page_size=10,
page_param='offset', page_size_param='limit',
pagination_type=PaginationType.ITEM, results_key=None)
class ItemPaginationTestWithNestedResults(ItemPaginationTest):
def get(self, *args, **kwargs):
results = super(ItemPaginationTestWithNestedResults, self).get(
*args, **kwargs)
return {'results': results}
def setUp(self):
self.psc = PaginatedResults(
self.get, args=self.args, kwargs=self.kwargs, page_size=10,
page_param='offset', page_size_param='limit',
pagination_type=PaginationType.ITEM)
class ItemPaginationTestWithNestedResultsAndNextLink(TestCase):
def setUp(self):
self.psc = PaginatedResults(
self.get, page_size=10,
page_param='offset', page_size_param='limit',
pagination_type=PaginationType.ITEM, next_key='next_page')
def get(self, *args, **kwargs):
# Emulate 5 full pages (offset 0-4), then emulate error.
offset = kwargs['offset']
if offset > 4 * 10:
raise ValueError('No Data')
next = 'next_url' if offset < 4 * 10 else None
return {'results': list(
range(offset, offset + kwargs['limit'])), 'next_page': next}
def test_iteration_stops_on_empty_next(self):
self.assertEqual(list(self.psc), list(range(0, 50)))
|
nilq/baby-python
|
python
|
N, arr = int(input()), input().split()
print(all([int(i) > 0 for i in arr]) and any([i == i[::-1] for i in arr]))
|
nilq/baby-python
|
python
|
import tkinter as tk
import tkinter.filedialog as fd
import src.helper.gui as hg
from src.image.extractor import Extractor
from src.helper.file import File
class ImageExtractForm(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
self.controller = controller
self.initialize()
hg.insert_header(self, 'Steganografi Extract Image')
self.render_file_frame()
self.render_key_frame()
self.render_output_frame()
self.render_execute_frame()
def initialize(self):
self.TITLE_ROW = 0
self.FILE_ROW = 1
self.KEY_ROW = 2
self.OUTPUT_ROW = 3
self.EXECUTE_ROW = 4
self.DEFAULT_OUT_FILENAME = 'extract_result'
self.image_dir = tk.StringVar()
self.image_dir.set('')
self.output_filename = tk.StringVar()
self.output_filename.set(self.DEFAULT_OUT_FILENAME)
def render_file_frame(self):
file_frame = hg.create_frame(self, self.FILE_ROW + 1)
hg.create_label(file_frame, 'Image', 0, 0)
hg.create_label(file_frame, self.image_dir, 0, 1, fix_text=False)
hg.create_button(file_frame, 'Choose',
lambda: self.load_image_file(), 1, 0)
def render_key_frame(self):
key_frame = hg.create_frame(self, self.KEY_ROW + 1)
hg.create_label(key_frame, 'Stegano Key:', 0, 0)
self.key_entry = hg.create_entry(key_frame, "", 1, 0)
def render_output_frame(self):
output_frame = hg.create_frame(self, self.OUTPUT_ROW + 1)
hg.create_label(output_frame, 'Output file\'s name:', 0, 0)
self.output_name = hg.create_entry(
output_frame, self.DEFAULT_OUT_FILENAME, 1, 0)
def render_execute_frame(self):
execute_frame = hg.create_frame(self, self.EXECUTE_ROW + 1)
hg.create_button(execute_frame, 'Execute',
lambda: self.execute(), 0, 0)
hg.create_button(execute_frame, 'Back',
lambda: self.controller.show_frame("StartPage"), 0, 1)
def load_image_file(self):
dialog = fd.askopenfilename(
filetypes=(("Image File", ('.bmp', '.png')),)
)
self.image_dir.set(dialog)
def execute(self):
print('Extract Started!')
print('> Image dir:', self.image_dir.get())
print('> Key:', self.key_entry.get())
file_dir = self.image_dir.get()
key = self.key_entry.get()
output_filename = self.output_name.get()
if file_dir == '' or key == '' or output_filename == '':
return
extract = Extractor(file_dir, key)
extract.extract_messages()
extract.parse_message()
file_name = "output/" + output_filename + "." + extract.extension
output_file = File(file_name)
byte = extract.write_secret_message()
output_file.write_files(byte)
print('Extraction Finished!')
title = "Finish Extract Secret Message from Image"
self.controller.show_end_frame(title, "None", file_name, 0)
|
nilq/baby-python
|
python
|
import copy
import operator
from functools import cached_property, reduce
from typing import Dict, List, Optional
import torch
import torch.nn as nn
from torch import Tensor
from torch.nn.functional import mse_loss
from torch.optim import Adam
from ai_traineree import DEVICE
from ai_traineree.agents import AgentBase
from ai_traineree.agents.agent_utils import hard_update, soft_update
from ai_traineree.buffers import ReplayBuffer
from ai_traineree.buffers.buffer_factory import BufferFactory
from ai_traineree.loggers import DataLogger
from ai_traineree.networks.bodies import ActorBody, CriticBody
from ai_traineree.noise import GaussianNoise
from ai_traineree.types import ActionType, AgentState, BufferState, DoneType, NetworkState, ObsType, RewardType
from ai_traineree.types.dataspace import DataSpace
from ai_traineree.utils import to_numbers_seq, to_tensor
class DDPGAgent(AgentBase):
"""
Deep Deterministic Policy Gradients (DDPG).
Instead of popular Ornstein-Uhlenbeck (OU) process for noise this agent uses Gaussian noise.
This agent is intended for continuous tasks.
"""
model = "DDPG"
def __init__(
self, obs_space: DataSpace, action_space: DataSpace, noise_scale: float=0.2, noise_sigma: float=0.1, **kwargs
):
"""
Parameters:
obs_space (DataSpace): Dataspace describing the input.
action_space (DataSpace): Dataspace describing the output.
noise_scale (float): Added noise amplitude. Default: 0.2.
noise_sigma (float): Added noise variance. Default: 0.1.
Keyword parameters:
hidden_layers (tuple of ints): Shape of the hidden layers in fully connected network. Default: (64, 64).
gamma (float): Discount value. Default: 0.99.
tau (float): Soft-copy factor. Default: 0.002.
actor_lr (float): Learning rate for the actor (policy). Default: 0.0003.
critic_lr (float): Learning rate for the critic (value function). Default: 0.0003.
max_grad_norm_actor (float) Maximum norm value for actor gradient. Default: 10.
max_grad_norm_critic (float): Maximum norm value for critic gradient. Default: 10.
batch_size (int): Number of samples used in learning. Default: 64.
buffer_size (int): Maximum number of samples to store. Default: 1e6.
warm_up (int): Number of samples to observe before starting any learning step. Default: 0.
update_freq (int): Number of steps between each learning step. Default 1.
number_updates (int): How many times to use learning step in the learning phase. Default: 1.
"""
super().__init__(**kwargs)
self.device = self._register_param(kwargs, "device", DEVICE)
self.obs_space = obs_space
self.action_space = action_space
self._config['obs_space'] = self.obs_space
self._config['action_space'] = self.action_space
action_shape = action_space.to_feature()
action_size = reduce(operator.mul, action_shape)
# Reason sequence initiation.
hidden_layers = to_numbers_seq(self._register_param(kwargs, 'hidden_layers', (64, 64)))
self.actor = ActorBody(
obs_space.shape, action_shape, hidden_layers=hidden_layers, gate_out=torch.tanh).to(self.device)
self.critic = CriticBody(
obs_space.shape, action_size, hidden_layers=hidden_layers).to(self.device)
self.target_actor = ActorBody(
obs_space.shape, action_shape, hidden_layers=hidden_layers, gate_out=torch.tanh).to(self.device)
self.target_critic = CriticBody(
obs_space.shape, action_size, hidden_layers=hidden_layers).to(self.device)
# Noise sequence initiation
self.noise = GaussianNoise(
shape=action_shape, mu=1e-8, sigma=noise_sigma, scale=noise_scale, device=self.device)
# Target sequence initiation
hard_update(self.target_actor, self.actor)
hard_update(self.target_critic, self.critic)
# Optimization sequence initiation.
self.actor_lr = float(self._register_param(kwargs, 'actor_lr', 3e-4))
self.critic_lr = float(self._register_param(kwargs, 'critic_lr', 3e-4))
self.actor_optimizer = Adam(self.actor.parameters(), lr=self.actor_lr)
self.critic_optimizer = Adam(self.critic.parameters(), lr=self.critic_lr)
self.max_grad_norm_actor = float(self._register_param(kwargs, "max_grad_norm_actor", 10.0))
self.max_grad_norm_critic = float(self._register_param(kwargs, "max_grad_norm_critic", 10.0))
self.gamma = float(self._register_param(kwargs, 'gamma', 0.99))
self.tau = float(self._register_param(kwargs, 'tau', 0.02))
self.batch_size = int(self._register_param(kwargs, 'batch_size', 64))
self.buffer_size = int(self._register_param(kwargs, 'buffer_size', int(1e6)))
self.buffer = ReplayBuffer(self.batch_size, self.buffer_size)
self.warm_up = int(self._register_param(kwargs, 'warm_up', 0))
self.update_freq = int(self._register_param(kwargs, 'update_freq', 1))
self.number_updates = int(self._register_param(kwargs, 'number_updates', 1))
# Breath, my child.
self.reset_agent()
self.iteration = 0
self._loss_actor = 0.
self._loss_critic = 0.
def reset_agent(self) -> None:
self.actor.reset_parameters()
self.critic.reset_parameters()
self.target_actor.reset_parameters()
self.target_critic.reset_parameters()
@property
def loss(self) -> Dict[str, float]:
return {'actor': self._loss_actor, 'critic': self._loss_critic}
@loss.setter
def loss(self, value):
if isinstance(value, dict):
self._loss_actor = value['actor']
self._loss_critic = value['critic']
else:
self._loss_actor = value
self._loss_critic = value
def __eq__(self, o: object) -> bool:
return super().__eq__(o) \
and isinstance(o, type(self)) \
and self._config == o._config \
and self.buffer == o.buffer \
and self.get_network_state() == o.get_network_state()
@cached_property
def action_min(self):
return to_tensor(self.action_space.low)
@cached_property
def action_max(self):
return to_tensor(self.action_space.high)
@torch.no_grad()
def act(self, obs: ObsType, noise: float=0.0) -> List[float]:
"""Acting on the observations. Returns action.
Parameters:
obs (array_like): current state
eps (optional float): epsilon, for epsilon-greedy action selection. Default 0.
Returns:
action: (list float) Action values.
"""
t_obs = to_tensor(obs).float().to(self.device)
action = self.actor(t_obs)
action += noise*self.noise.sample()
action = torch.clamp(action, self.action_min, self.action_max)
return action.cpu().numpy().tolist()
def step(self, obs: ObsType, action: ActionType, reward: RewardType, next_obs: ObsType, done: DoneType) -> None:
self.iteration += 1
self.buffer.add(state=obs, action=action, reward=reward, next_state=next_obs, done=done)
if self.iteration < self.warm_up:
return
if len(self.buffer) > self.batch_size and (self.iteration % self.update_freq) == 0:
for _ in range(self.number_updates):
self.learn(self.buffer.sample())
def compute_value_loss(self, states, actions, next_states, rewards, dones):
next_actions = self.target_actor.act(next_states)
assert next_actions.shape == actions.shape, f"{next_actions.shape} != {actions.shape}"
Q_target_next = self.target_critic.act(next_states, next_actions)
Q_target = rewards + self.gamma * Q_target_next * (1 - dones)
Q_expected = self.critic(states, actions)
assert Q_expected.shape == Q_target.shape == Q_target_next.shape
return mse_loss(Q_expected, Q_target)
def compute_policy_loss(self, states) -> Tensor:
"""Compute Policy loss based on provided states.
Loss = Mean(-Q(s, _a) ),
where _a is actor's estimate based on state, _a = Actor(s).
"""
pred_actions = self.actor(states)
return -self.critic(states, pred_actions).mean()
def learn(self, experiences) -> None:
"""Update critics and actors"""
rewards = to_tensor(experiences['reward']).float().to(self.device).unsqueeze(1)
dones = to_tensor(experiences['done']).type(torch.int).to(self.device).unsqueeze(1)
states = to_tensor(experiences['state']).float().to(self.device)
actions = to_tensor(experiences['action']).float().to(self.device).view((-1,) + self.action_space.shape)
next_states = to_tensor(experiences['next_state']).float().to(self.device)
assert rewards.shape == dones.shape == (self.batch_size, 1), f"R.shape={rewards.shape}, D.shap={dones.shape}"
assert states.shape == next_states.shape == (self.batch_size,) + self.obs_space.shape, f"states.shape: {states.shape}"
assert actions.shape == (self.batch_size,) + self.action_space.shape, f"actions.shape: {actions.shape}" # type: ignore
# Value (critic) optimization
loss_critic = self.compute_value_loss(states, actions, next_states, rewards, dones)
self.critic_optimizer.zero_grad()
loss_critic.backward()
nn.utils.clip_grad_norm_(self.critic.parameters(), self.max_grad_norm_critic)
self.critic_optimizer.step()
self._loss_critic = float(loss_critic.item())
# Policy (actor) optimization
loss_actor = self.compute_policy_loss(states)
self.actor_optimizer.zero_grad()
loss_actor.backward()
nn.utils.clip_grad_norm_(self.actor.parameters(), self.max_grad_norm_actor)
self.actor_optimizer.step()
self._loss_actor = loss_actor.item()
# Soft update target weights
soft_update(self.target_actor, self.actor, self.tau)
soft_update(self.target_critic, self.critic, self.tau)
def state_dict(self) -> Dict[str, dict]:
"""Describes agent's networks.
Returns:
state: (dict) Provides actors and critics states.
"""
return {
"actor": self.actor.state_dict(),
"target_actor": self.target_actor.state_dict(),
"critic": self.critic.state_dict(),
"target_critic": self.target_critic.state_dict()
}
def log_metrics(self, data_logger: DataLogger, step: int, full_log: bool=False):
data_logger.log_value("loss/actor", self._loss_actor, step)
data_logger.log_value("loss/critic", self._loss_critic, step)
if full_log:
for idx, layer in enumerate(self.actor.layers):
if hasattr(layer, "weight"):
data_logger.create_histogram(f"actor/layer_weights_{idx}", layer.weight, step)
if hasattr(layer, "bias") and layer.bias is not None:
data_logger.create_histogram(f"actor/layer_bias_{idx}", layer.bias, step)
for idx, layer in enumerate(self.critic.layers):
if hasattr(layer, "weight"):
data_logger.create_histogram(f"critic/layer_weights_{idx}", layer.weight, step)
if hasattr(layer, "bias") and layer.bias is not None:
data_logger.create_histogram(f"critic/layer_bias_{idx}", layer.bias, step)
def get_state(self) -> AgentState:
return AgentState(
model=self.model,
obs_space=self.obs_space,
action_space=self.action_space,
config=self._config,
buffer=copy.deepcopy(self.buffer.get_state()),
network=copy.deepcopy(self.get_network_state()),
)
def get_network_state(self) -> NetworkState:
net = dict(
actor=self.actor.state_dict(),
target_actor=self.target_actor.state_dict(),
critic=self.critic.state_dict(),
target_critic=self.target_critic.state_dict(),
)
return NetworkState(net=net)
@staticmethod
def from_state(state: AgentState) -> AgentBase:
config = copy.copy(state.config)
config.update({'obs_space': state.obs_space, 'action_space': state.action_space})
agent = DDPGAgent(**config)
if state.network is not None:
agent.set_network(state.network)
if state.buffer is not None:
agent.set_buffer(state.buffer)
return agent
def set_buffer(self, buffer_state: BufferState) -> None:
self.buffer = BufferFactory.from_state(buffer_state)
def set_network(self, network_state: NetworkState) -> None:
self.actor.load_state_dict(copy.deepcopy(network_state.net['actor']))
self.target_actor.load_state_dict(network_state.net['target_actor'])
self.critic.load_state_dict(network_state.net['critic'])
self.target_critic.load_state_dict(network_state.net['target_critic'])
def save_state(self, path: str) -> None:
agent_state = self.get_state()
torch.save(agent_state, path)
def load_state(self, *, path: Optional[str]=None, agent_state: Optional[dict]=None):
if path is None and agent_state:
raise ValueError("Either `path` or `agent_state` must be provided to load agent's state.")
if path is not None and agent_state is None:
agent_state = torch.load(path)
self._config = agent_state.get('config', {})
self.__dict__.update(**self._config)
self.actor.load_state_dict(agent_state['actor'])
self.critic.load_state_dict(agent_state['critic'])
self.target_actor.load_state_dict(agent_state['target_actor'])
self.target_critic.load_state_dict(agent_state['target_critic'])
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
flask_jsonschema
~~~~~~~~~~~~~~~~
flask_jsonschema
"""
import os
from functools import wraps
try:
import simplejson as json
except ImportError:
import json
from flask import current_app, request
from jsonschema import ValidationError, validate
class _JsonSchema(object):
def __init__(self, schemas):
self._schemas = schemas
def get_schema(self, path):
rv = self._schemas[path[0]]
for p in path[1:]:
rv = rv[p]
return rv
class JsonSchema(object):
def __init__(self, app=None):
self.app = app
if app is not None:
self._state = self.init_app(app)
def init_app(self, app):
default_dir = os.path.join(app.root_path, 'jsonschema')
schema_dir = app.config.get('JSONSCHEMA_DIR', default_dir)
schemas = {}
for fn in os.listdir(schema_dir):
key = fn.split('.')[0]
fn = os.path.join(schema_dir, fn)
if os.path.isdir(fn) or not fn.endswith('.json'):
continue
with open(fn) as f:
schemas[key] = json.load(f)
state = _JsonSchema(schemas)
app.extensions['jsonschema'] = state
return state
def validate(self, *path):
def wrapper(fn):
@wraps(fn)
def decorated(*args, **kwargs):
schema = current_app.extensions['jsonschema'].get_schema(path)
validate(request.json, schema)
return fn(*args, **kwargs)
return decorated
return wrapper
def __getattr__(self, name):
return getattr(self._state, name, None)
|
nilq/baby-python
|
python
|
from tortoise.contrib.pydantic import pydantic_model_creator
from typing import Optional
from pydantic import BaseModel
from db.models import Meals
MealsInSchema = pydantic_model_creator(
Meals, name="MealIn", exclude_readonly=True
)
MealsOutSchema = pydantic_model_creator(
Meals, name="MealOut", exclude=["created_on"]
)
MealsDatabaseSchema = pydantic_model_creator(
Meals, name="Meal", exclude=["created_on"]
)
class UpdateMeal(BaseModel):
name: Optional[str]
|
nilq/baby-python
|
python
|
"""
Host Guest Complex
==================
"""
from __future__ import annotations
import typing
from collections import abc
from ...molecules import BuildingBlock
from ...reactions import GenericReactionFactory
from ..topology_graph import (
ConstructionState,
NullOptimizer,
Optimizer,
TopologyGraph,
Vertex,
)
from .vertices import GuestVertex, HostVertex
class Guest:
"""
Holds the data defining the placement of a guest molecule.
"""
def __init__(
self,
building_block: BuildingBlock,
start_vector: tuple[float, float, float] = (1., 0., 0.),
end_vector: tuple[float, float, float] = (1., 0., 0.),
displacement: tuple[float, float, float] = (1., 0., 0.),
) -> None:
"""
Initialize a :class:`.Guest` instance.
Parameters:
building_block: The guest molecule.
start_vector: A direction vector which gets aligned with
`end_vector`.
end_vector: A direction vector which determines the
rotation applied to the `building_block`. A rotation
such that `start_vector` is transformed into
`end_vector` is applied.
displacement: The translational offset of the guest.
"""
self._building_block = building_block
self._start_vector = start_vector
self._end_vector = end_vector
self._displacement = displacement
def get_building_block(self) -> BuildingBlock:
"""
Return the building block.
Returns:
The building block.
"""
return self._building_block
def get_start_vector(self) -> tuple[float, float, float]:
"""
Return the start vector.
Returns:
The start vector.
"""
return self._start_vector
def get_end_vector(self) -> tuple[float, float, float]:
"""
Return the end vector.
Returns:
The end vector.
"""
return self._end_vector
def get_displacement(self) -> tuple[float, float, float]:
"""
Return the displacement.
Returns:
The displacement.
"""
return self._displacement
def __repr__(self) -> str:
return (
f'{self.__class__.__name__}('
f'{self._building_block!r}, '
f'start_vector={self._start_vector!r}, '
f'end_vector={self._end_vector!r}, '
f'displacement={self._displacement!r})'
)
class Complex(TopologyGraph):
"""
Represents a host-guest complex topology graph.
Host and guest building blocks do not require functional groups.
Examples:
*Construction*
You can use :class:`.ConstructedMolecule` instances as the
host, but you should turn them into a :class:`.BuildingBlock`
first
.. testcode:: construction
import stk
host = stk.ConstructedMolecule(
topology_graph=stk.cage.FourPlusSix(
building_blocks=(
stk.BuildingBlock(
smiles='NC1CCCCC1N',
functional_groups=[
stk.PrimaryAminoFactory(),
],
),
stk.BuildingBlock(
smiles='O=Cc1cc(C=O)cc(C=O)c1',
functional_groups=[stk.AldehydeFactory()],
),
),
optimizer=stk.MCHammer(),
),
)
complex = stk.ConstructedMolecule(
topology_graph=stk.host_guest.Complex(
host=stk.BuildingBlock.init_from_molecule(host),
guests=stk.host_guest.Guest(
building_block=stk.BuildingBlock('[Br][Br]'),
),
),
)
.. moldoc::
import moldoc.molecule as molecule
import stk
host = stk.ConstructedMolecule(
topology_graph=stk.cage.FourPlusSix(
building_blocks=(
stk.BuildingBlock(
smiles='NC1CCCCC1N',
functional_groups=[
stk.PrimaryAminoFactory(),
],
),
stk.BuildingBlock(
smiles='O=Cc1cc(C=O)cc(C=O)c1',
functional_groups=[stk.AldehydeFactory()],
),
),
optimizer=stk.MCHammer(),
),
)
complex = stk.ConstructedMolecule(
topology_graph=stk.host_guest.Complex(
host=stk.BuildingBlock.init_from_molecule(host),
guests=stk.host_guest.Guest(
building_block=stk.BuildingBlock('[Br][Br]'),
),
),
)
moldoc_display_molecule = molecule.Molecule(
atoms=(
molecule.Atom(
atomic_number=atom.get_atomic_number(),
position=position,
) for atom, position in zip(
complex.get_atoms(),
complex.get_position_matrix(),
)
),
bonds=(
molecule.Bond(
atom1_id=bond.get_atom1().get_id(),
atom2_id=bond.get_atom2().get_id(),
order=bond.get_order(),
) for bond in complex.get_bonds()
),
)
You can also generate complexes with multiple guests.
.. testcode:: multi-guest-construction
import stk
host = stk.ConstructedMolecule(
topology_graph=stk.cage.FourPlusSix(
building_blocks=(
stk.BuildingBlock(
smiles='NC1CCCCC1N',
functional_groups=[
stk.PrimaryAminoFactory(),
],
),
stk.BuildingBlock(
smiles='O=Cc1cc(C=O)cc(C=O)c1',
functional_groups=[stk.AldehydeFactory()],
),
),
optimizer=stk.MCHammer(),
),
)
guest1 = stk.host_guest.Guest(
building_block=stk.BuildingBlock('BrBr'),
displacement=(0., 3., 0.),
)
guest2 = stk.host_guest.Guest(
building_block=stk.BuildingBlock('C1CCCC1'),
)
complex = stk.ConstructedMolecule(
topology_graph=stk.host_guest.Complex(
host=stk.BuildingBlock.init_from_molecule(host),
guests=(guest1, guest2),
),
)
.. moldoc::
import moldoc.molecule as molecule
import stk
host = stk.ConstructedMolecule(
topology_graph=stk.cage.FourPlusSix(
building_blocks=(
stk.BuildingBlock(
smiles='NC1CCCCC1N',
functional_groups=[
stk.PrimaryAminoFactory(),
],
),
stk.BuildingBlock(
smiles='O=Cc1cc(C=O)cc(C=O)c1',
functional_groups=[stk.AldehydeFactory()],
),
),
optimizer=stk.MCHammer(),
),
)
guest1 = stk.host_guest.Guest(
building_block=stk.BuildingBlock('BrBr'),
displacement=(0., 3., 0.),
)
guest2 = stk.host_guest.Guest(
building_block=stk.BuildingBlock('C1CCCC1'),
)
complex = stk.ConstructedMolecule(
topology_graph=stk.host_guest.Complex(
host=stk.BuildingBlock.init_from_molecule(host),
guests=(guest1, guest2),
),
)
moldoc_display_molecule = molecule.Molecule(
atoms=(
molecule.Atom(
atomic_number=atom.get_atomic_number(),
position=position,
) for atom, position in zip(
complex.get_atoms(),
complex.get_position_matrix(),
)
),
bonds=(
molecule.Bond(
atom1_id=bond.get_atom1().get_id(),
atom2_id=bond.get_atom2().get_id(),
order=bond.get_order(),
) for bond in complex.get_bonds()
),
)
*Suggested Optimization*
For :class:`.Complex` topologies, it is recommended to use the
:class:`.Spinner` optimizer. It is also recommended that the
building blocks are already optimized prior to construction.
This optimizer will work on multi-guest systems.
.. testcode:: suggested-optimization
import stk
host = stk.ConstructedMolecule(
topology_graph=stk.cage.FourPlusSix(
building_blocks=(
stk.BuildingBlock(
smiles='NC1CCCCC1N',
functional_groups=[
stk.PrimaryAminoFactory(),
],
),
stk.BuildingBlock(
smiles='O=Cc1cc(C=O)cc(C=O)c1',
functional_groups=[stk.AldehydeFactory()],
),
),
optimizer=stk.MCHammer(),
),
)
guest1 = stk.host_guest.Guest(
building_block=stk.BuildingBlock('BrBr'),
displacement=(0., 3., 0.),
)
guest2 = stk.host_guest.Guest(
building_block=stk.BuildingBlock('C1CCCC1'),
)
complex = stk.ConstructedMolecule(
topology_graph=stk.host_guest.Complex(
host=stk.BuildingBlock.init_from_molecule(host),
guests=(guest1, guest2),
optimizer=stk.Spinner(),
),
)
.. moldoc::
import moldoc.molecule as molecule
import stk
host = stk.ConstructedMolecule(
topology_graph=stk.cage.FourPlusSix(
building_blocks=(
stk.BuildingBlock(
smiles='NC1CCCCC1N',
functional_groups=[
stk.PrimaryAminoFactory(),
],
),
stk.BuildingBlock(
smiles='O=Cc1cc(C=O)cc(C=O)c1',
functional_groups=[stk.AldehydeFactory()],
),
),
optimizer=stk.MCHammer(),
),
)
guest1 = stk.host_guest.Guest(
building_block=stk.BuildingBlock('BrBr'),
displacement=(0., 3., 0.),
)
guest2 = stk.host_guest.Guest(
building_block=stk.BuildingBlock('C1CCCC1'),
)
complex = stk.ConstructedMolecule(
topology_graph=stk.host_guest.Complex(
host=stk.BuildingBlock.init_from_molecule(host),
guests=(guest1, guest2),
optimizer=stk.Spinner(),
),
)
moldoc_display_molecule = molecule.Molecule(
atoms=(
molecule.Atom(
atomic_number=atom.get_atomic_number(),
position=position,
) for atom, position in zip(
complex.get_atoms(),
complex.get_position_matrix(),
)
),
bonds=(
molecule.Bond(
atom1_id=bond.get_atom1().get_id(),
atom2_id=bond.get_atom2().get_id(),
order=bond.get_order(),
) for bond in complex.get_bonds()
),
)
*Changing the Position of the Guest*
You can change the position and orientation of the guest, as
well as its displacement
.. testcode:: changing-the-position-of-the-guest
import stk
host = stk.ConstructedMolecule(
topology_graph=stk.cage.FourPlusSix(
building_blocks=(
stk.BuildingBlock(
smiles='BrCCBr',
functional_groups=[stk.BromoFactory()],
),
stk.BuildingBlock(
smiles='BrCC(Br)CBr',
functional_groups=[stk.BromoFactory()],
),
),
),
)
guest_building_block = stk.BuildingBlock('[Br][Br]')
guest = stk.host_guest.Guest(
building_block=guest_building_block,
# Apply a rotation onto the guest molecule such that
# the vector returned by get_direction() has the same
# direction as [1, 1, 1].
start_vector=guest_building_block.get_direction(),
end_vector=[1, 1, 1],
# Change the displacement of the guest.
displacement=[5.3, 2.1, 7.1],
)
complex = stk.ConstructedMolecule(
topology_graph=stk.host_guest.Complex(
host=stk.BuildingBlock.init_from_molecule(host),
guests=guest,
),
)
"""
def __init__(
self,
host: BuildingBlock,
guests: typing.Union[Guest, typing.Iterable[Guest]],
num_processes: int = 1,
optimizer: Optimizer = NullOptimizer(),
) -> None:
"""
Initialize an instance of :class:`.Complex`.
Parameters:
host: The host molecule.
guests: The guest molecules. Can be a single
:class:`.Guest` instance if only one guest is being
used.
num_processes: The number of parallel processes to create
during :meth:`construct`.
optimizer: Used to optimize the structure of the
constructed molecule.
"""
building_block_vertices = self._get_vertices_from_guests(
host=host,
guests=guests,
)
super().__init__(
building_block_vertices=building_block_vertices,
edges=(),
reaction_factory=GenericReactionFactory(),
construction_stages=(),
num_processes=num_processes,
optimizer=optimizer,
edge_groups=(),
)
def _get_vertices_from_guests(
self,
host: BuildingBlock,
guests: typing.Union[Guest, typing.Iterable[Guest]],
) -> dict[BuildingBlock, abc.Sequence[Vertex]]:
if isinstance(guests, Guest):
guests = (guests, )
building_block_vertices: dict[
BuildingBlock, abc.Sequence[Vertex]
]
building_block_vertices = {
host: (HostVertex(0, (0., 0., 0.)), )
}
guest_vertices = {
guest.get_building_block(): (GuestVertex(
id=i+1,
position=guest.get_displacement(),
start=guest.get_start_vector(),
target=guest.get_end_vector(),
), )
for i, guest in enumerate(guests)
}
building_block_vertices.update(guest_vertices)
return building_block_vertices
def clone(self) -> Complex:
return self._clone()
def _run_reactions(
self,
state: ConstructionState,
) -> ConstructionState:
return state
def _get_scale(
self,
building_block_vertices: dict[
BuildingBlock, abc.Sequence[Vertex]
],
) -> float:
return 1.
def __repr__(self) -> str:
return 'host_guest.Complex()'
|
nilq/baby-python
|
python
|
import bs4
import json
import requests
import time
from utils import (get_content, get_soup, save_json, load_json)
MANGA_SEARCH_URL = 'https://myanimelist.net/manga.php?type=1&q='
# load series information
all_series = load_json("data.json")
for series in all_series:
# search on MyAnimeList
query_soup = get_soup(get_content(MANGA_SEARCH_URL + series['name']))
time.sleep(15) # rate limiting
table_row_tag = query_soup.find('div', class_='js-categories-seasonal').tr.next_sibling
link_tag = table_row_tag.find('a', class_='hoverinfo_trigger fw-b')
# series name in english
name_en = link_tag.strong.text
print(f'{series["name"]} | {name_en}')
# parse series page
info_url = link_tag['href']
info_soup = get_soup(get_content(info_url))
time.sleep(15) # rate limiting
container = info_soup.find('div', class_='js-scrollfix-bottom')
# author
author_tags = container.find('span', string='Authors:').parent.find_all('a')
author = ''
for tag in author_tags:
author_name = tag['href'].rsplit('/', 1)[1].replace('_', ' ')
author_work = tag.next_sibling # story, art or both
author += author_name + author_work
# update series information
series['name'] = name_en
series['author'] = author
# save updated series information
save_json("data.json", all_series)
|
nilq/baby-python
|
python
|
from random import randint
from game_map.direction import Direction
from game_map.rect import Rect
class Room(Rect):
"""
A Room is just a Rect that can tell you where its walls are
"""
def __init__(self, x, y, width, height):
super(Room, self).__init__(x, y, width, height)
def get_wall(self, direction):
"""
Find the first wall in a given direction
:param Direction direction: direction to look
:return int, int, int, int: x1,y1, x2,y2 defining the wall
"""
if direction == Direction.UP:
return self.x1, self.y1 - 1, self.x2, self.y1 - 1
elif direction == Direction.RIGHT:
return self.x2 + 1, self.y1, self.x2 + 1, self.y2
elif direction == Direction.DOWN:
return self.x1, self.y2 + 1, self.x2, self.y2 + 1
elif direction == Direction.LEFT:
return self.x1 - 1, self.y1, self.x1 - 1, self.y2
def get_wall_point(self, direction=None):
"""
Returns a random point from the wall in the indicated direction
:param Direction direction:
:return int, int: x, y point along wall
"""
if direction is None:
direction = Direction.random_direction()
x1, y1, x2, y2 = self.get_wall(direction)
x = randint(x1, x2)
y = randint(y1, y2)
return x, y
|
nilq/baby-python
|
python
|
"""
# Sample code to perform I/O:
name = input() # Reading input from STDIN
print('Hi, %s.' % name) # Writing output to STDOUT
# Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail
"""
# Write your code here
from collections import deque, defaultdict
def pay(adj, ln):
visited = [False] * ln
distances = [0] * ln
max_distance = 0
farthest_vertex = 0
visited[0] = True
queue = deque([0])
while queue:
u = queue.popleft()
for v, w in adj[u]:
if not visited[v]:
visited[v] = True
queue.append(v)
distances[v] = distances[u] + w
if distances[v] >= max_distance:
max_distance = distances[v]
farthest_vertex = v
visited = [False] * ln
distances = [0] * ln
max_distance = 0
visited[farthest_vertex] = True
queue.append(farthest_vertex)
while queue:
u = queue.popleft()
for v, w in adj[u]:
if not visited[v]:
visited[v] = True
queue.append(v)
distances[v] = distances[u] + w
if distances[v] > max_distance:
max_distance = distances[v]
if max_distance > 10000:
cost = 10000
elif max_distance > 1000:
cost = 1000
elif max_distance > 100:
cost = 100
else:
cost = 0
return cost, max_distance
t = int(input())
for _ in range(t):
n = int(input())
adjacency = defaultdict(list)
for _ in range(n - 1):
a, b, weight = map(int, input().strip().split())
a -= 1
b -= 1
adjacency[a].append((b, weight))
adjacency[b].append((a, weight))
print(*pay(adjacency, n))
|
nilq/baby-python
|
python
|
import unittest
from mock import Mock
from foundations_events.producers.jobs.run_job import RunJob
class TestProducerRunJob(unittest.TestCase):
def setUp(self):
from foundations_internal.foundations_job import FoundationsJob
self.route_name = None
self.message = None
self._foundations_job = FoundationsJob()
self._foundations_job.job_id = 'some_project'
self._router = Mock()
self._router.push_message.side_effect = self._push_message
self._producer = RunJob(self._router, self._foundations_job)
def test_push_message_sends_run_job_message_to_correct_channel(self):
self._producer.push_message()
self.assertEqual('run_job', self.route_name)
def test_push_message_sends_run_job_message_with_job_id(self):
self._foundations_job.job_id = 'my fantastic job'
self._foundations_job.project_name = 'this project'
self._producer.push_message()
self.assertEqual({'job_id': 'my fantastic job',
'project_name': 'this project',
'monitor_name': 'None'}, self.message)
def test_push_message_sends_run_job_message_with_job_id_different_job_different_project(self):
self._foundations_job.job_id = 'neural nets in space!'
self._foundations_job.project_name = 'that project'
self._producer.push_message()
self.assertEqual({'job_id': 'neural nets in space!',
'project_name': 'that project',
'monitor_name': 'None'}, self.message)
def _push_message(self, route_name, message):
self.route_name = route_name
self.message = message
|
nilq/baby-python
|
python
|
import os
TEST_DIR = os.path.realpath(os.path.dirname(__file__))
|
nilq/baby-python
|
python
|
# Copyright (c) 2017-2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
import errno
from yardstick.tests import STL_MOCKS
from yardstick.common import exceptions as y_exceptions
from yardstick.network_services.vnf_generic.vnf.prox_irq import ProxIrqGen
from yardstick.network_services.vnf_generic.vnf.prox_irq import ProxIrqVNF
from yardstick.benchmark.contexts import base as ctx_base
SSH_HELPER = 'yardstick.network_services.vnf_generic.vnf.sample_vnf.VnfSshHelper'
STLClient = mock.MagicMock()
stl_patch = mock.patch.dict("sys.modules", STL_MOCKS)
stl_patch.start()
if stl_patch:
from yardstick.network_services.vnf_generic.vnf import prox_vnf
from yardstick.tests.unit.network_services.vnf_generic.vnf.test_base import mock_ssh
VNF_NAME = "vnf__1"
class TestProxIrqVNF(unittest.TestCase):
SCENARIO_CFG = {
'task_path': "",
'nodes': {
'tg__1': 'trafficgen_1.yardstick',
'vnf__1': 'vnf.yardstick'},
'runner': {
'duration': 600, 'type': 'Duration'},
'topology': 'prox-tg-topology-2.yaml',
'traffic_profile': '../../traffic_profiles/prox_binsearch.yaml',
'type': 'NSPerf',
'options': {
'tg__1': {'prox_args': {'-e': '',
'-t': ''},
'prox_config': 'configs/l3-gen-2.cfg',
'prox_path':
'/root/dppd-PROX-v035/build/prox'},
'vnf__1': {
'prox_args': {'-t': ''},
'prox_config': 'configs/l3-swap-2.cfg',
'prox_path': '/root/dppd-PROX-v035/build/prox'}}}
VNFD_0 = {
'short-name': 'VpeVnf',
'vdu': [
{
'routing_table': [
{
'network': '152.16.100.20',
'netmask': '255.255.255.0',
'gateway': '152.16.100.20',
'if': 'xe0'
},
{
'network': '152.16.40.20',
'netmask': '255.255.255.0',
'gateway': '152.16.40.20',
'if': 'xe1'
},
],
'description': 'VPE approximation using DPDK',
'name': 'vpevnf-baremetal',
'nd_route_tbl': [
{
'network': '0064:ff9b:0:0:0:0:9810:6414',
'netmask': '112',
'gateway': '0064:ff9b:0:0:0:0:9810:6414',
'if': 'xe0'
},
{
'network': '0064:ff9b:0:0:0:0:9810:2814',
'netmask': '112',
'gateway': '0064:ff9b:0:0:0:0:9810:2814',
'if': 'xe1'
},
],
'id': 'vpevnf-baremetal',
'external-interface': [
{
'virtual-interface': {
'dst_mac': '00:00:00:00:00:03',
'vpci': '0000:05:00.0',
'local_ip': '152.16.100.19',
'type': 'PCI-PASSTHROUGH',
'netmask': '255.255.255.0',
'dpdk_port_num': 0,
'bandwidth': '10 Gbps',
'dst_ip': '152.16.100.20',
'local_mac': '00:00:00:00:00:01'
},
'vnfd-connection-point-ref': 'xe0',
'name': 'xe0'
},
{
'virtual-interface': {
'dst_mac': '00:00:00:00:00:04',
'vpci': '0000:05:00.1',
'local_ip': '152.16.40.19',
'type': 'PCI-PASSTHROUGH',
'netmask': '255.255.255.0',
'dpdk_port_num': 1,
'bandwidth': '10 Gbps',
'dst_ip': '152.16.40.20',
'local_mac': '00:00:00:00:00:02'
},
'vnfd-connection-point-ref': 'xe1',
'name': 'xe1'
},
],
},
],
'description': 'Vpe approximation using DPDK',
'mgmt-interface': {
'vdu-id': 'vpevnf-baremetal',
'host': '1.1.1.1',
'password': 'r00t',
'user': 'root',
'ip': '1.1.1.1'
},
'benchmark': {
'kpi': [
'packets_in',
'packets_fwd',
'packets_dropped',
],
},
'connection-point': [
{
'type': 'VPORT',
'name': 'xe0',
},
{
'type': 'VPORT',
'name': 'xe1',
},
],
'id': 'VpeApproxVnf', 'name': 'VPEVnfSsh'
}
VNFD = {
'vnfd:vnfd-catalog': {
'vnfd': [
VNFD_0,
]
}
}
TRAFFIC_PROFILE = {
"schema": "isb:traffic_profile:0.1",
"name": "fixed",
"description": "Fixed traffic profile to run UDP traffic",
"traffic_profile": {
"traffic_type": "FixedTraffic",
"frame_rate": 100, # pps
"flow_number": 10,
"frame_size": 64,
},
}
CONTEXT_CFG = {
'nodes': {
'tg__2': {
'member-vnf-index': '3',
'role': 'TrafficGen',
'name': 'trafficgen_2.yardstick',
'vnfd-id-ref': 'tg__2',
'ip': '1.2.1.1',
'interfaces': {
'xe0': {
'local_iface_name': 'ens513f0',
'vld_id': prox_vnf.ProxApproxVnf.DOWNLINK,
'netmask': '255.255.255.0',
'local_ip': '152.16.40.20',
'dst_mac': '00:00:00:00:00:01',
'local_mac': '00:00:00:00:00:03',
'dst_ip': '152.16.40.19',
'driver': 'ixgbe',
'vpci': '0000:02:00.0',
'dpdk_port_num': 0,
},
'xe1': {
'local_iface_name': 'ens513f1',
'netmask': '255.255.255.0',
'network': '202.16.100.0',
'local_ip': '202.16.100.20',
'local_mac': '00:1e:67:d0:60:5d',
'driver': 'ixgbe',
'vpci': '0000:02:00.1',
'dpdk_port_num': 1,
},
},
'password': 'r00t',
'VNF model': 'l3fwd_vnf.yaml',
'user': 'root',
},
'tg__1': {
'member-vnf-index': '1',
'role': 'TrafficGen',
'name': 'trafficgen_1.yardstick',
'vnfd-id-ref': 'tg__1',
'ip': '1.2.1.1',
'interfaces': {
'xe0': {
'local_iface_name': 'ens785f0',
'vld_id': prox_vnf.ProxApproxVnf.UPLINK,
'netmask': '255.255.255.0',
'local_ip': '152.16.100.20',
'dst_mac': '00:00:00:00:00:02',
'local_mac': '00:00:00:00:00:04',
'dst_ip': '152.16.100.19',
'driver': 'i40e',
'vpci': '0000:05:00.0',
'dpdk_port_num': 0,
},
'xe1': {
'local_iface_name': 'ens785f1',
'netmask': '255.255.255.0',
'local_ip': '152.16.100.21',
'local_mac': '00:00:00:00:00:01',
'driver': 'i40e',
'vpci': '0000:05:00.1',
'dpdk_port_num': 1,
},
},
'password': 'r00t',
'VNF model': 'tg_rfc2544_tpl.yaml',
'user': 'root',
},
'vnf__1': {
'name': 'vnf.yardstick',
'vnfd-id-ref': 'vnf__1',
'ip': '1.2.1.1',
'interfaces': {
'xe0': {
'local_iface_name': 'ens786f0',
'vld_id': prox_vnf.ProxApproxVnf.UPLINK,
'netmask': '255.255.255.0',
'local_ip': '152.16.100.19',
'dst_mac': '00:00:00:00:00:04',
'local_mac': '00:00:00:00:00:02',
'dst_ip': '152.16.100.20',
'driver': 'i40e',
'vpci': '0000:05:00.0',
'dpdk_port_num': 0,
},
'xe1': {
'local_iface_name': 'ens786f1',
'vld_id': prox_vnf.ProxApproxVnf.DOWNLINK,
'netmask': '255.255.255.0',
'local_ip': '152.16.40.19',
'dst_mac': '00:00:00:00:00:03',
'local_mac': '00:00:00:00:00:01',
'dst_ip': '152.16.40.20',
'driver': 'i40e',
'vpci': '0000:05:00.1',
'dpdk_port_num': 1,
},
},
'routing_table': [
{
'netmask': '255.255.255.0',
'gateway': '152.16.100.20',
'network': '152.16.100.20',
'if': 'xe0',
},
{
'netmask': '255.255.255.0',
'gateway': '152.16.40.20',
'network': '152.16.40.20',
'if': 'xe1',
},
],
'member-vnf-index': '2',
'host': '1.2.1.1',
'role': 'vnf',
'user': 'root',
'nd_route_tbl': [
{
'netmask': '112',
'gateway': '0064:ff9b:0:0:0:0:9810:6414',
'network': '0064:ff9b:0:0:0:0:9810:6414',
'if': 'xe0',
},
{
'netmask': '112',
'gateway': '0064:ff9b:0:0:0:0:9810:2814',
'network': '0064:ff9b:0:0:0:0:9810:2814',
'if': 'xe1',
},
],
'password': 'r00t',
'VNF model': 'prox_vnf.yaml',
},
},
}
def test___init__(self):
prox_irq_vnf = ProxIrqVNF('vnf1', self.VNFD_0)
self.assertEqual(prox_irq_vnf.name, 'vnf1')
self.assertDictEqual(prox_irq_vnf.vnfd_helper, self.VNFD_0)
@mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', return_value='mock_node')
@mock.patch(SSH_HELPER)
def test_collect_kpi(self, ssh, *args):
mock_ssh(ssh)
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
resource_helper = mock.MagicMock()
resource_helper = mock.MagicMock()
core_1 = {'bucket_1': 1, 'bucket_2': 2, 'bucket_3': 3, 'bucket_4': 4, 'bucket_5': 5,
'bucket_6': 6, 'bucket_7': 7, 'bucket_8': 8, 'bucket_9': 9, 'bucket_10': 10,
'bucket_11': 11, 'bucket_12': 12, 'bucket_0': 100, 'cpu': 1, 'max_irq': 12,
'overflow': 10}
core_2 = {'bucket_1': 1, 'bucket_2': 2, 'bucket_3': 3, 'bucket_4': 4, 'bucket_5': 5,
'bucket_6': 0, 'bucket_7': 0, 'bucket_8': 0, 'bucket_9': 0, 'bucket_10': 0,
'bucket_11': 0, 'bucket_12': 0, 'bucket_0': 100, 'cpu': 2, 'max_irq': 12,
'overflow': 10}
irq_data = {'core_1': core_1, 'core_2': core_2}
resource_helper.execute.return_value = (irq_data)
build_config_file = mock.MagicMock()
build_config_file.return_value = None
prox_irq_vnf = ProxIrqVNF(VNF_NAME, vnfd)
startup = ["global", [["eal", "-4"]]]
master_0 = ["core 0", [["mode", "master"]]]
core_1 = ["core 1", [["mode", "irq"]]]
core_2 = ["core 2", [["mode", "irq"], ["task", "2"]]]
prox_irq_vnf.setup_helper._prox_config_data = \
[startup, master_0, core_1, core_2]
prox_irq_vnf.scenario_helper.scenario_cfg = self.SCENARIO_CFG
prox_irq_vnf.resource_helper = resource_helper
prox_irq_vnf.setup_helper.build_config_file = build_config_file
result = prox_irq_vnf.collect_kpi()
self.assertDictEqual(result["collect_stats"], {})
result = prox_irq_vnf.collect_kpi()
self.assertFalse('bucket_10' in result["collect_stats"]['core_2'])
self.assertFalse('bucket_11' in result["collect_stats"]['core_2'])
self.assertFalse('bucket_12' in result["collect_stats"]['core_2'])
self.assertEqual(result["collect_stats"]['core_2']['max_irq'], 12)
@mock.patch(SSH_HELPER)
def test_vnf_execute_oserror(self, ssh, *args):
mock_ssh(ssh)
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
prox_irq_vnf = ProxIrqVNF(VNF_NAME, vnfd)
prox_irq_vnf.resource_helper = resource_helper = mock.Mock()
resource_helper.execute.side_effect = OSError(errno.EPIPE, "")
prox_irq_vnf.vnf_execute("", _ignore_errors=True)
resource_helper.execute.side_effect = OSError(errno.ESHUTDOWN, "")
prox_irq_vnf.vnf_execute("", _ignore_errors=True)
resource_helper.execute.side_effect = OSError(errno.EADDRINUSE, "")
with self.assertRaises(OSError):
prox_irq_vnf.vnf_execute("", _ignore_errors=True)
@mock.patch('yardstick.network_services.vnf_generic.vnf.prox_helpers.socket')
@mock.patch(SSH_HELPER)
def test_terminate(self, ssh, *args):
mock_ssh(ssh)
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
mock_ssh(ssh, exec_result=(1, "", ""))
prox_irq_vnf = ProxIrqVNF(VNF_NAME, vnfd)
prox_irq_vnf._terminated = mock.MagicMock()
prox_irq_vnf._traffic_process = mock.MagicMock()
prox_irq_vnf._traffic_process.terminate = mock.Mock()
prox_irq_vnf.ssh_helper = mock.MagicMock()
prox_irq_vnf.setup_helper = mock.MagicMock()
prox_irq_vnf.resource_helper = mock.MagicMock()
prox_irq_vnf._vnf_wrapper.setup_helper = mock.MagicMock()
prox_irq_vnf._vnf_wrapper._vnf_process = mock.MagicMock(**{"is_alive.return_value": False})
prox_irq_vnf._vnf_wrapper.resource_helper = mock.MagicMock()
prox_irq_vnf._run_prox = mock.Mock(return_value=0)
prox_irq_vnf.q_in = mock.Mock()
prox_irq_vnf.q_out = mock.Mock()
self.assertIsNone(prox_irq_vnf.terminate())
@mock.patch(SSH_HELPER)
def test_wait_for_instantiate_panic(self, ssh, *args):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
mock_ssh(ssh, exec_result=(1, "", ""))
prox_irq_vnf = ProxIrqVNF(VNF_NAME, vnfd)
prox_irq_vnf._terminated = mock.MagicMock()
prox_irq_vnf._traffic_process = mock.MagicMock()
prox_irq_vnf._traffic_process.terminate = mock.Mock()
prox_irq_vnf.ssh_helper = mock.MagicMock()
prox_irq_vnf.setup_helper = mock.MagicMock()
prox_irq_vnf.resource_helper = mock.MagicMock()
prox_irq_vnf._vnf_wrapper.setup_helper = mock.MagicMock()
prox_irq_vnf._vnf_wrapper._vnf_process = mock.MagicMock(**{"is_alive.return_value": False})
prox_irq_vnf._vnf_wrapper.resource_helper = mock.MagicMock()
prox_irq_vnf._run_prox = mock.Mock(return_value=0)
prox_irq_vnf.q_in = mock.Mock()
prox_irq_vnf.q_out = mock.Mock()
prox_irq_vnf.WAIT_TIME = 0
with self.assertRaises(RuntimeError):
prox_irq_vnf.wait_for_instantiate()
class TestProxIrqGen(unittest.TestCase):
SCENARIO_CFG = {
'task_path': "",
'nodes': {
'tg__1': 'trafficgen_1.yardstick',
'vnf__1': 'vnf.yardstick'},
'runner': {
'duration': 600, 'type': 'Duration'},
'topology': 'prox-tg-topology-2.yaml',
'traffic_profile': '../../traffic_profiles/prox_binsearch.yaml',
'type': 'NSPerf',
'options': {
'tg__1': {'prox_args': {'-e': '',
'-t': ''},
'prox_config': 'configs/l3-gen-2.cfg',
'prox_path':
'/root/dppd-PROX-v035/build/prox'},
'vnf__1': {
'prox_args': {'-t': ''},
'prox_config': 'configs/l3-swap-2.cfg',
'prox_path': '/root/dppd-PROX-v035/build/prox'}}}
VNFD_0 = {
'short-name': 'VpeVnf',
'vdu': [
{
'routing_table': [
{
'network': '152.16.100.20',
'netmask': '255.255.255.0',
'gateway': '152.16.100.20',
'if': 'xe0'
},
{
'network': '152.16.40.20',
'netmask': '255.255.255.0',
'gateway': '152.16.40.20',
'if': 'xe1'
},
],
'description': 'VPE approximation using DPDK',
'name': 'vpevnf-baremetal',
'nd_route_tbl': [
{
'network': '0064:ff9b:0:0:0:0:9810:6414',
'netmask': '112',
'gateway': '0064:ff9b:0:0:0:0:9810:6414',
'if': 'xe0'
},
{
'network': '0064:ff9b:0:0:0:0:9810:2814',
'netmask': '112',
'gateway': '0064:ff9b:0:0:0:0:9810:2814',
'if': 'xe1'
},
],
'id': 'vpevnf-baremetal',
'external-interface': [
{
'virtual-interface': {
'dst_mac': '00:00:00:00:00:03',
'vpci': '0000:05:00.0',
'driver': 'i40e',
'local_ip': '152.16.100.19',
'type': 'PCI-PASSTHROUGH',
'netmask': '255.255.255.0',
'dpdk_port_num': 0,
'bandwidth': '10 Gbps',
'dst_ip': '152.16.100.20',
'local_mac': '00:00:00:00:00:01'
},
'vnfd-connection-point-ref': 'xe0',
'name': 'xe0'
},
{
'virtual-interface': {
'dst_mac': '00:00:00:00:00:04',
'vpci': '0000:05:00.1',
'driver': 'ixgbe',
'local_ip': '152.16.40.19',
'type': 'PCI-PASSTHROUGH',
'netmask': '255.255.255.0',
'dpdk_port_num': 1,
'bandwidth': '10 Gbps',
'dst_ip': '152.16.40.20',
'local_mac': '00:00:00:00:00:02'
},
'vnfd-connection-point-ref': 'xe1',
'name': 'xe1'
},
],
},
],
'description': 'Vpe approximation using DPDK',
'mgmt-interface': {
'vdu-id': 'vpevnf-baremetal',
'host': '1.1.1.1',
'password': 'r00t',
'user': 'root',
'ip': '1.1.1.1'
},
'benchmark': {
'kpi': [
'packets_in',
'packets_fwd',
'packets_dropped',
],
},
'connection-point': [
{
'type': 'VPORT',
'name': 'xe0',
},
{
'type': 'VPORT',
'name': 'xe1',
},
],
'id': 'VpeApproxVnf', 'name': 'VPEVnfSsh'
}
VNFD = {
'vnfd:vnfd-catalog': {
'vnfd': [
VNFD_0,
],
},
}
TRAFFIC_PROFILE = {
"schema": "isb:traffic_profile:0.1",
"name": "fixed",
"description": "Fixed traffic profile to run UDP traffic",
"traffic_profile": {
"traffic_type": "FixedTraffic",
"frame_rate": 100, # pps
"flow_number": 10,
"frame_size": 64,
},
}
CONTEXT_CFG = {
'nodes': {
'tg__2': {
'member-vnf-index': '3',
'role': 'TrafficGen',
'name': 'trafficgen_2.yardstick',
'vnfd-id-ref': 'tg__2',
'ip': '1.2.1.1',
'interfaces': {
'xe0': {
'local_iface_name': 'ens513f0',
'vld_id': prox_vnf.ProxApproxVnf.DOWNLINK,
'netmask': '255.255.255.0',
'local_ip': '152.16.40.20',
'dst_mac': '00:00:00:00:00:01',
'local_mac': '00:00:00:00:00:03',
'dst_ip': '152.16.40.19',
'driver': 'ixgbe',
'vpci': '0000:02:00.0',
'dpdk_port_num': 0,
},
'xe1': {
'local_iface_name': 'ens513f1',
'netmask': '255.255.255.0',
'network': '202.16.100.0',
'local_ip': '202.16.100.20',
'local_mac': '00:1e:67:d0:60:5d',
'driver': 'ixgbe',
'vpci': '0000:02:00.1',
'dpdk_port_num': 1,
},
},
'password': 'r00t',
'VNF model': 'l3fwd_vnf.yaml',
'user': 'root',
},
'tg__1': {
'member-vnf-index': '1',
'role': 'TrafficGen',
'name': 'trafficgen_1.yardstick',
'vnfd-id-ref': 'tg__1',
'ip': '1.2.1.1',
'interfaces': {
'xe0': {
'local_iface_name': 'ens785f0',
'vld_id': prox_vnf.ProxApproxVnf.UPLINK,
'netmask': '255.255.255.0',
'local_ip': '152.16.100.20',
'dst_mac': '00:00:00:00:00:02',
'local_mac': '00:00:00:00:00:04',
'dst_ip': '152.16.100.19',
'driver': 'i40e',
'vpci': '0000:05:00.0',
'dpdk_port_num': 0,
},
'xe1': {
'local_iface_name': 'ens785f1',
'netmask': '255.255.255.0',
'local_ip': '152.16.100.21',
'local_mac': '00:00:00:00:00:01',
'driver': 'i40e',
'vpci': '0000:05:00.1',
'dpdk_port_num': 1,
},
},
'password': 'r00t',
'VNF model': 'tg_rfc2544_tpl.yaml',
'user': 'root',
},
'vnf__1': {
'name': 'vnf.yardstick',
'vnfd-id-ref': 'vnf__1',
'ip': '1.2.1.1',
'interfaces': {
'xe0': {
'local_iface_name': 'ens786f0',
'vld_id': prox_vnf.ProxApproxVnf.UPLINK,
'netmask': '255.255.255.0',
'local_ip': '152.16.100.19',
'dst_mac': '00:00:00:00:00:04',
'local_mac': '00:00:00:00:00:02',
'dst_ip': '152.16.100.20',
'driver': 'i40e',
'vpci': '0000:05:00.0',
'dpdk_port_num': 0,
},
'xe1': {
'local_iface_name': 'ens786f1',
'vld_id': prox_vnf.ProxApproxVnf.DOWNLINK,
'netmask': '255.255.255.0',
'local_ip': '152.16.40.19',
'dst_mac': '00:00:00:00:00:03',
'local_mac': '00:00:00:00:00:01',
'dst_ip': '152.16.40.20',
'driver': 'i40e',
'vpci': '0000:05:00.1',
'dpdk_port_num': 1,
},
},
'routing_table': [
{
'netmask': '255.255.255.0',
'gateway': '152.16.100.20',
'network': '152.16.100.20',
'if': 'xe0',
},
{
'netmask': '255.255.255.0',
'gateway': '152.16.40.20',
'network': '152.16.40.20',
'if': 'xe1',
},
],
'member-vnf-index': '2',
'host': '1.2.1.1',
'role': 'vnf',
'user': 'root',
'nd_route_tbl': [
{
'netmask': '112',
'gateway': '0064:ff9b:0:0:0:0:9810:6414',
'network': '0064:ff9b:0:0:0:0:9810:6414',
'if': 'xe0',
},
{
'netmask': '112',
'gateway': '0064:ff9b:0:0:0:0:9810:2814',
'network': '0064:ff9b:0:0:0:0:9810:2814',
'if': 'xe1',
},
],
'password': 'r00t',
'VNF model': 'prox_vnf.yaml',
},
},
}
def test__check_status(self):
prox_irq_gen = ProxIrqGen('tg1', self.VNFD_0)
with self.assertRaises(NotImplementedError):
prox_irq_gen._check_status()
def test_listen_traffic(self):
prox_irq_gen = ProxIrqGen('tg1', self.VNFD_0)
prox_irq_gen.listen_traffic(mock.Mock())
def test_verify_traffic(self):
prox_irq_gen = ProxIrqGen('tg1', self.VNFD_0)
prox_irq_gen.verify_traffic(mock.Mock())
mock.patch('yardstick.network_services.vnf_generic.vnf.prox_helpers.socket')
@mock.patch(SSH_HELPER)
def test_terminate(self, ssh, *args):
mock_ssh(ssh)
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
prox_traffic_gen = ProxIrqGen(VNF_NAME, vnfd)
prox_traffic_gen._terminated = mock.MagicMock()
prox_traffic_gen._traffic_process = mock.MagicMock()
prox_traffic_gen._traffic_process.terminate = mock.Mock()
prox_traffic_gen.ssh_helper = mock.MagicMock()
prox_traffic_gen.setup_helper = mock.MagicMock()
prox_traffic_gen.resource_helper = mock.MagicMock()
prox_traffic_gen._vnf_wrapper.setup_helper = mock.MagicMock()
prox_traffic_gen._vnf_wrapper._vnf_process = mock.MagicMock()
prox_traffic_gen._vnf_wrapper.resource_helper = mock.MagicMock()
self.assertIsNone(prox_traffic_gen.terminate())
def test__wait_for_process(self):
prox_irq_gen = ProxIrqGen('tg1', self.VNFD_0)
with mock.patch.object(prox_irq_gen, '_check_status',
return_value=0) as mock_status, \
mock.patch.object(prox_irq_gen, '_tg_process') as mock_proc:
mock_proc.is_alive.return_value = True
mock_proc.exitcode = 234
self.assertEqual(prox_irq_gen._wait_for_process(), 234)
mock_proc.is_alive.assert_called_once()
mock_status.assert_called_once()
def test__wait_for_process_not_alive(self):
prox_irq_gen = ProxIrqGen('tg1', self.VNFD_0)
with mock.patch.object(prox_irq_gen, '_tg_process') as mock_proc:
mock_proc.is_alive.return_value = False
self.assertRaises(RuntimeError, prox_irq_gen._wait_for_process)
mock_proc.is_alive.assert_called_once()
def test__wait_for_process_delayed(self):
prox_irq_gen = ProxIrqGen('tg1', self.VNFD_0)
with mock.patch.object(prox_irq_gen, '_check_status',
side_effect=[1, 0]) as mock_status, \
mock.patch.object(prox_irq_gen,
'_tg_process') as mock_proc:
mock_proc.is_alive.return_value = True
mock_proc.exitcode = 234
self.assertEqual(prox_irq_gen._wait_for_process(), 234)
mock_proc.is_alive.assert_has_calls([mock.call(), mock.call()])
mock_status.assert_has_calls([mock.call(), mock.call()])
def test_scale(self):
prox_irq_gen = ProxIrqGen('tg1', self.VNFD_0)
self.assertRaises(y_exceptions.FunctionNotImplemented,
prox_irq_gen.scale)
@mock.patch.object(ctx_base.Context, 'get_physical_node_from_server', return_value='mock_node')
@mock.patch(SSH_HELPER)
def test_collect_kpi(self, ssh, *args):
mock_ssh(ssh)
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
resource_helper = mock.MagicMock()
core_1 = {'bucket_1': 1, 'bucket_2': 2, 'bucket_3': 3, 'bucket_4': 4, 'bucket_5': 5,
'bucket_6': 6, 'bucket_7': 7, 'bucket_8': 8, 'bucket_9': 9, 'bucket_10': 10,
'bucket_11': 11, 'bucket_12': 12, 'bucket_0': 100, 'cpu': 1, 'max_irq': 12,
'overflow': 10}
core_2 = {'bucket_1': 1, 'bucket_2': 2, 'bucket_3': 3, 'bucket_4': 4, 'bucket_5': 5,
'bucket_6': 0, 'bucket_7': 0, 'bucket_8': 0, 'bucket_9': 0, 'bucket_10': 0,
'bucket_11': 0, 'bucket_12': 0, 'bucket_0': 100, 'cpu': 2, 'max_irq': 12,
'overflow': 10}
irq_data = {'core_1': core_1, 'core_2': core_2}
resource_helper.sut.irq_core_stats.return_value = (irq_data)
build_config_file = mock.MagicMock()
build_config_file.return_value = None
prox_irq_gen = ProxIrqGen(VNF_NAME, vnfd)
startup = ["global", [["eal", "-4"]]]
master_0 = ["core 0", [["mode", "master"]]]
core_1 = ["core 1", [["mode", "irq"]]]
core_2 = ["core 2", [["mode", "irq"], ["task", "2"]]]
prox_irq_gen.setup_helper._prox_config_data = \
[startup, master_0, core_1, core_2]
prox_irq_gen.scenario_helper.scenario_cfg = self.SCENARIO_CFG
prox_irq_gen.resource_helper = resource_helper
prox_irq_gen.setup_helper.build_config_file = build_config_file
result = prox_irq_gen.collect_kpi()
self.assertDictEqual(result["collect_stats"], {})
result = prox_irq_gen.collect_kpi()
self.assertFalse('bucket_10' in result["collect_stats"]['core_2'])
self.assertFalse('bucket_11' in result["collect_stats"]['core_2'])
self.assertFalse('bucket_12' in result["collect_stats"]['core_2'])
self.assertEqual(result["collect_stats"]['core_2']['max_irq'], 12)
|
nilq/baby-python
|
python
|
import logging
from logger_config import configure_logging
logger_name = 'root_logger'
configure_logging(logger_name, log_dir='logs')
logger = logging.getLogger(logger_name)
logger.warning('This is warning')
logger.error('This is exception')
logger.info('This is info message')
logger.debug('This is debug message')
|
nilq/baby-python
|
python
|
# All edits to original document Copyright 2016 Vincent Berthiaume.
#
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
from tensorflow.python.framework import dtypes
import collections
#sms-tools stuff
import sys, os, os.path
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../sms-tools/software/models/'))
import utilFunctions as UF
import stft as STFT
from scipy.signal import get_window
from scipy.fftpack import fft, ifft
#ffmpeg + audio stuff
import subprocess as sp
import scikits.audiolab
import bisect
#general stuff?
import numpy as np
import matplotlib.pyplot as plt
from six.moves import cPickle as pickle
import math
# we have 7 music genres
NUM_CLASSES = 7
s_iTrainSize = 8 * NUM_CLASSES # 200000
s_iValid_size = 6 * NUM_CLASSES # 10000
s_iTestSize = 6 * NUM_CLASSES # 10000
SAMPLE_COUNT = 1 * 44100 # first 10 secs of audio
exponent = math.log(SAMPLE_COUNT, 2)+1
TOTAL_INPUTS = 2 ** int(exponent)
FORCE_PICKLING = False
Datasets = collections.namedtuple('Datasets', ['train', 'validation', 'test'])
overall_song_id = 0
ONE_HOT = False
# LIBRARY_PATH = '/media/kxstudio/LUSSIER/music/'
# LIBRARY_PATH = '/media/sf_stuff_for_virtual_machines/music/'
# LIBRARY_PATH = '/Volumes/Untitled/music/'
#nicolai -- not installed at this point
#LIBRARY_PATH = '/Users/nicolai/Music/vblandr'
#hesse
LIBRARY_PATH = '/mnt/c/Users/barth/Documents/vblandr/'
#gris
#LIBRARY_PATH = '/home/gris/Music/vblandr/'
def write_test_wav(cur_song_samples, str_id = ""):
filename = LIBRARY_PATH +'test'+ str_id +'.wav'
print ("writing", filename)
scikits.audiolab.wavwrite(cur_song_samples, filename, fs=44100, enc='pcm16')
def getAllDataSets(train_dir, dtype=np.float32):
pickle_file = getAllDataPickle(FORCE_PICKLING)
with open(pickle_file, 'rb') as f:
save = pickle.load(f)
train_dataset = save['wholeTrainDataset']
train_labels = save['wholeTrainLabels']
valid_dataset = save['wholeValidDataset']
valid_labels = save['wholeValidLabels']
test_dataset = save['wholeTestDataset']
test_labels = save['wholeTestLabels']
del save # hint to help gc free up memory
#print('after pickling, Training set', train_dataset.shape, train_labels.shape)
#print('after pickling, Validation set', valid_dataset.shape, valid_labels.shape)
#print('after pickling, Test set', test_dataset.shape, test_labels.shape)
train = DataSet(train_dataset, train_labels, dtype=dtype)
validation = DataSet(valid_dataset, valid_labels, dtype=dtype)
test = DataSet(test_dataset, test_labels, dtype=dtype)
return Datasets(train=train, validation=validation, test=test)
def dense_to_one_hot(labels_dense, num_classes):
"""Convert class labels from scalars to one-hot vectors."""
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
class DataSet(object):
def __init__(self, songs, labels, dtype=np.float32):
#global overall_song_id
"""Construct a DataSet. `dtype` can be either `uint8` to leave the input as `[0, 255]`, or `float32` to rescale into `[0, 1]`."""
dtype = dtypes.as_dtype(dtype).base_dtype
if dtype not in (dtypes.uint8, dtypes.float32):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype)
#check that we have the same number of songs and labels
assert songs.shape[0] == labels.shape[0], ('songs.shape: %s labels.shape: %s' % (songs.shape, labels.shape))
self._num_examples = songs.shape[0]
#======================= DATA CONVERSION AND SHIT ===============================
#the original range for int16 is [-32768, 32767]
#if dtype == dtypes.float32:
# songs = songs.astype(np.float32) #cast the array into float32
# songs = np.multiply(songs, 1.0 / 65536) #convert int16 range into [-.5, .5]
# songs = np.add(songs, .5) #convert int16 [-.5, .5] range into [0,1.0]
# original code for pixels; #Convert from [0, 255] -> [0.0, 1.0].
#songs = np.multiply(songs, 1.0 / 255.0)
#check that song files are valid
for cur_song, cur_song_dft in enumerate(songs):
if cur_song == 0:
print ("-----DATASET CONSTRUCTOR--------")
print ("max: ", np.amax(cur_song_dft))
print ("min: ", np.amin(cur_song_dft))
print ("mean: ", np.mean(cur_song_dft))
#overall_song_id += 1
#check labels
#use this for issue #3
#labels = dense_to_one_hot(labels, NUM_CLASSES)
#================================================================================
self._songs = songs
self._labels = labels
self._epochs_completed = 0
self._index_in_epoch = 0
@property
def songs(self):
return self._songs
@property
def labels(self):
return self._labels
@property
def num_examples(self):
return self._num_examples
@property
def epochs_completed(self):
return self._epochs_completed
def next_batch(self, batch_size):
"""Return the next `batch_size` examples from this data set."""
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Shuffle the data
perm = np.arange(self._num_examples)
np.random.shuffle(perm)
self._songs = self._songs[perm]
self._labels = self._labels[perm]
# Start next epoch
start = 0
self._index_in_epoch = batch_size
assert batch_size <= self._num_examples
end = self._index_in_epoch
return self._songs[start:end], self._labels[start:end]
# ENDOF DataSet
def getAllDataPickle(p_bForce=False):
#get relevant paths
trainGenreNames, trainGenrePaths = getAllGenrePaths(LIBRARY_PATH + 'train_small/')
testGenreNames, testGenrePaths = getAllGenrePaths(LIBRARY_PATH + 'test_small/')
pickle_file = LIBRARY_PATH + 'allData.pickle'
#obtain data for each genre in their individual pickle file
allPickledTrainFilenames = getIndividualGenrePickles(trainGenrePaths, p_bForce)
allPickledTestFilenames = getIndividualGenrePickles(testGenrePaths, p_bForce)
#merge and randomize data from all genres into wholedatasets for training, validation, and test
wholeValidDataset, wholeValidLabels, wholeTrainDataset, wholeTrainLabels = getWholeDataFromIndividualGenrePickles(allPickledTrainFilenames, s_iTrainSize, s_iValid_size)
_, _, wholeTestDataset, wholeTestLabels = getWholeDataFromIndividualGenrePickles(allPickledTestFilenames, s_iTestSize)
wholeTrainDataset, wholeTrainLabels = randomize(wholeTrainDataset, wholeTrainLabels)
wholeTestDataset, wholeTestLabels = randomize(wholeTestDataset, wholeTestLabels)
wholeValidDataset, wholeValidLabels = randomize(wholeValidDataset, wholeValidLabels)
#save the data for later reuse:
try:
f = open(pickle_file, 'wb')
save = {'wholeTrainDataset': wholeTrainDataset,
'wholeTrainLabels': wholeTrainLabels,
'wholeValidDataset': wholeValidDataset,
'wholeValidLabels': wholeValidLabels,
'wholeTestDataset': wholeTestDataset,
'wholeTestLabels': wholeTestLabels}
pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)
f.close()
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
print ('\n================== DATASETS BUILT ================')
return pickle_file
# ENDOF BUILDDATASETS
def getAllGenrePaths(music_dir):
"""return a list of all music genres, e.g., 'audiobook', and their complete path"""
dirs = os.listdir(music_dir)
allAudioGenrePaths = []
allAudioGenres = []
for cur_dir in dirs:
if not cur_dir.startswith('.') and not cur_dir.endswith('pickle') :
allAudioGenrePaths.append(music_dir+cur_dir)
allAudioGenres.append(cur_dir)
return allAudioGenres, allAudioGenrePaths
def getIndividualGenrePickles(p_strDataFolderNames, p_bForce=False):
"""serialize list of data folders in their own pickle files, and return list of pickle filenames"""
all_pickle_filenames = []
for strCurFolderName in p_strDataFolderNames:
cur_pickle_filename = strCurFolderName + '.pickle'
all_pickle_filenames.append(cur_pickle_filename)
if os.path.exists(cur_pickle_filename) and not p_bForce:
print('%s already present - Skipping pickling.' % cur_pickle_filename)
else:
print '\nPickling',
print cur_pickle_filename,
dataset_cur_genre = getDataForGenre(strCurFolderName)
try:
#and try to pickle it
with open(cur_pickle_filename, 'wb') as f:
pickle.dump(dataset_cur_genre, f, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', cur_pickle_filename, ':', e)
return all_pickle_filenames
def removeInitialSilence(cur_song_pcm):
#using absolute value
env = abs(cur_song_pcm)
env = env.astype(np.float32) #cast the array into float32
max = np.max(env)
env = np.multiply(env, 1.0 / max) #normalize so that max value is == 1.0
#convolving as a way to do a fast moving average
N = 100
env = np.convolve(env, np.ones((N,))/N)[(N-1):]
#detect first non-silent sample
threshold = .01
endOfSilence = bisect.bisect(env,threshold)
print "\nend of silence: ", endOfSilence
return cur_song_pcm[endOfSilence:]
# load data for each genre
def getDataForGenre(genre_folder):
"""figure out the path to all the genre's song files, and how many songs we have"""
global overall_song_id
all_song_paths = []
for path, dirs, files in os.walk(genre_folder):
#insert file in correct label id
for file in files:
if not file.startswith('.') and (file.endswith('.wav') or file.endswith('.mp3')):
all_song_paths.append(path+"/"+file)
#data for cur genre will have shape all_song_paths x TOTAL_INPUTS
#dataset_cur_genre = np.ndarray(shape=(len(all_song_paths), TOTAL_INPUTS), dtype=np.int16)
dataset_cur_genre = np.ndarray(shape=(len(all_song_paths), TOTAL_INPUTS), dtype=np.float32)
songId = 0
#for each song in the current genre
for cur_song_file in all_song_paths:
try:
# convert current song to np.int16 array.
print cur_song_file
cur_song_pcm = songFile2pcm(cur_song_file)
cleaned_cur_song_pcm = removeInitialSilence(cur_song_pcm)
write_test_wav(cur_song_pcm, str(overall_song_id))
overall_song_id = overall_song_id +1
# only keep the first 2x TOTAL_INPUTS samples. since the fft is symetrical, we can use that to store more stuff
short_cur_song_pcm = cleaned_cur_song_pcm[:2*TOTAL_INPUTS]
#do the fft, keeping only the real numbers, ie the magnitude. mX has same len as cur_song_pcm, but is np.float64
mX = fft(short_cur_song_pcm).real
#only keep the first half since symmetrical, and we know len(mX) is multiple of 2
mX = mX[:len(mX)/2]
#PLOT THE THING
#if songId == 0:
# fft_buffer = np.insert(mX, np.zeros(len(mX)), 0)
# for i in np.arange (len(fft_buffer)/2):
# fft_buffer[i] = fft_buffer[len(fft_buffer)-i-1]
# plt.plot(fft_buffer)
# plt.show()
#need to convert to range 0,1 for tensorflow learning.
max = np.amax(mX)
min = np.amin(mX)
range = max - min
mX = mX - min
mX = mX/range
#and put it in the dataset_cur_genre
dataset_cur_genre[songId, :] = mX
songId += 1
except IOError as e:
print('skipping ', cur_song_file, ':', e)
#in case we skipped some songs, only keep the first songId songs in dataset_cur_genre
dataset_cur_genre = dataset_cur_genre[0:songId, :]
# print('Full dataset_cur_genre tensor:', dataset_cur_genre.shape)
# print('Mean:', np.mean(dataset_cur_genre))
# print('Standard deviation:', np.std(dataset_cur_genre))
return dataset_cur_genre
#END LOAD GENRE
def songFile2pcm(song_path):
song_path2 = song_path + '.wav'
command = [ 'ffmpeg',
'-i', song_path,
'-f', 's16le',
'-acodec', 'pcm_s16le',
'-ar', '44100', # sms tools wavread can only read 44100 Hz
'-ac', '1', # mono file
'-loglevel', 'quiet',
'-'] #instead of having an output file, using '-' sends it in the pipe. not actually sure how this works.
#run the command
#print(song_path)
print ".",
sys.stdout.flush()
pipe = sp.Popen(command, stdout=sp.PIPE)
#read the output into a numpy array
stdoutdata = pipe.stdout.read()
audio_array = np.fromstring(stdoutdata, dtype=np.int16)
# size = len(audio_array)
# print ("size: ", size)
#export this to a wav file, to test it
# write_test_wav(audio_array)
return audio_array
#END SONGFILE2PCM
# Merge individual genre datasets. Tune s_iTrainSize as needed to be able to fit all data in memory.
# Also create a validation dataset_cur_genre for hyperparameter tuning.
def getWholeDataFromIndividualGenrePickles(p_allPickledFilenames, p_iTrainSize, p_iValidSize=0):
iNum_classes = len(p_allPickledFilenames)
#make empty arrays for validation and training sets and labels
whole_valid_dataset, valid_labels = make_arrays(p_iValidSize, TOTAL_INPUTS, ONE_HOT)
whole_train_dataset, train_labels = make_arrays(p_iTrainSize, TOTAL_INPUTS, ONE_HOT)
#number of items per class. // is an int division in python3, not sure in python2
iNbrOfValidItemsPerClass = p_iValidSize // iNum_classes
iNbrOfTrainItemPerClass = p_iTrainSize // iNum_classes
#figure out useful indexes for the loop
iStartValidId, iStartTrainId = 0, 0
iEndValidId, iEndTrainId = iNbrOfValidItemsPerClass, iNbrOfTrainItemPerClass
iEndListId = iNbrOfValidItemsPerClass+iNbrOfTrainItemPerClass
#for each file in p_allPickledFilenames
for iPickleFileId, strPickleFilename in enumerate(p_allPickledFilenames):
try:
with open(strPickleFilename, 'rb') as f:
cur_genre_dataset = pickle.load(f)
# let's shuffle the items to have random validation and training set. np.random.shuffle suffles only first dimension
np.random.shuffle(cur_genre_dataset)
#if we asked for a validation set, use the first items for it
if whole_valid_dataset is not None:
#the first iNbrOfValidItemsPerClass items in letter_set are used for the validation set
whole_valid_dataset[iStartValidId:iEndValidId, :] = cur_genre_dataset[:iNbrOfValidItemsPerClass, :]
#label all images with the current file id
valid_labels[iStartValidId:iEndValidId] = iPickleFileId
#update ids for the train set
iStartValidId += iNbrOfValidItemsPerClass
iEndValidId += iNbrOfValidItemsPerClass
#the rest of the items are used for the training set
whole_train_dataset[iStartTrainId:iEndTrainId, :] = cur_genre_dataset[iNbrOfValidItemsPerClass:iEndListId, :]
train_labels[iStartTrainId:iEndTrainId] = iPickleFileId
iStartTrainId += iNbrOfTrainItemPerClass
iEndTrainId += iNbrOfTrainItemPerClass
except Exception as e:
print('Unable to process data from', strPickleFilename, ':', e)
raise
return whole_valid_dataset, valid_labels, whole_train_dataset, train_labels
#END OF getWholeDataFromIndividualGenrePickles
def make_arrays(p_iNb_rows, p_iNb_cols, one_hot):
if p_iNb_rows:
#dataset_cur_genre = np.ndarray((p_iNb_rows, p_iNb_cols), dtype=np.int16)
dataset_cur_genre = np.ndarray((p_iNb_rows, p_iNb_cols), dtype=np.float32)
if one_hot:
labels = np.ndarray((p_iNb_rows, NUM_CLASSES), dtype=np.int32)
else:
labels = np.ndarray(p_iNb_rows, dtype=np.int32)
else:
dataset_cur_genre, labels = None, None
return dataset_cur_genre, labels
# Next, we'll randomize the data. It's important to have the labels well shuffled for the training and test distributions to match.
def randomize(p_3ddataset_cur_genre, p_vLabels):
#with int x as parameter, np.random.permutation returns a random permutation of np.arange(x)
vPermutation = np.random.permutation(p_vLabels.shape[0])
threeDShuffleddataset_cur_genre = p_3ddataset_cur_genre[vPermutation,:]
threeDShuffledLabels = p_vLabels [vPermutation]
return threeDShuffleddataset_cur_genre, threeDShuffledLabels
|
nilq/baby-python
|
python
|
__version__ = '3.0.8'
__buildinfo__ = {'branch': 'BRANCH_NOT_SET', 'last_commit': 'COMMIT_NOT_SET'}
|
nilq/baby-python
|
python
|
from dash import dcc
import dash_bootstrap_components as dbc # pip install dash-bootstrap-components
from dash import Input, Output, State, html
from app import app
# Lotties: Emil at https://github.com/thedirtyfew/dash-extensions
url_sunlight = "https://assets8.lottiefiles.com/packages/lf20_bknKi1.json"
url_earth = "https://assets10.lottiefiles.com/datafiles/xjh641xEDuQg4qg/data.json"
url5 = "https://assets8.lottiefiles.com/packages/lf20_q6y5ptrh.json"
url6 = "https://assets4.lottiefiles.com/packages/lf20_tN5Ofx.json"
options = dict(loop=True, autoplay=True, rendererSettings=dict(preserveAspectRatio='xMidYMid slice'))
learn_card_1 = dbc.Card(
[
dbc.CardBody(
[
dbc.Row(
dbc.Label(
"What is KlimaDAO?", className='emission_card_topic',
),
),
dbc.Row(
dbc.Button(
'Click to learn', id='open_learn_card_1', n_clicks=0,
className='align-self-center'
),
),
dbc.Modal([
dbc.ModalHeader(dbc.ModalTitle('What is KlimaDAO?')),
dbc.ModalBody(
dcc.Markdown(
'''
Klima DAO is a Decentralized Autonomous Organization to drive climate action,
via our carbon-backed, algorithmic currency- the KLIMA token.
As the protocol grows, Klima DAO will solve the critical problems of the carbon markets:
- **Illiquidity**: Carbon Credits come in many different varieties; carbon brokers and
middlemen are used by buyers and sellers, fragmenting the total liquidity of the market.
- **Opacity**: Trades occur often behind closed doors, allowing buyers to underbuy the market.
- **Inefficiency**: buying and retiring carbon credits comes with friction and barriers,
by utilizing the polygon ecosystem, it removes this friction for all users
In delivery of its objectives, Klima DAO will become the single biggest disruptor of the
carbon markets and set a precedent for a new monetary system backed by carbon.
Klima DAO will serve the web3 ecosystem by offering accountability for those that
contribute, rewards for stakeholders, and a stake in governance for those that participate.
Klima DAO was inspired by Olympus DAO. It was conceptualized and built by a
distributed pseudo-anonymous team.
Klima is DAO-governed by it's community. All decisions are formed by community members on
the forum and made by KLIMA holders through snapshot voting.
'''
),
),
dbc.ModalFooter(
dbc.Button(
'close',
id='close_learn_card_1',
className='ms-auto',
n_clicks=0,
)
)
],
id="body_learn_card_1",
scrollable=True,
is_open=False,
),
]
),
],
color="success", # https://bootswatch.com/default/ for more card colors
inverse=True, # change color of text (black or white)
outline=False, # True = remove the block colors from the background and header
className='emission_card_style',
)
learn_card_2 = dbc.Card(
[
dbc.CardBody(
[
dbc.Row(
dbc.Label(
"Why KlimaDAO?", className='emission_card_topic',
),
),
dbc.Row(
dbc.Button(
'Click to learn', id='open_learn_card_2', n_clicks=0,
className='align-self-center'
),
),
dbc.Modal([
dbc.ModalHeader(dbc.ModalTitle('What is the point of KlimaDAO?')),
dbc.ModalBody(
dcc.Markdown(
'''
1. Driven Climate Action:
Klima DAO incentivizes new supply of Base Carbon Tonnes (BCT) on the blockchain
through the KLIMA token. By driving demand into BCT, it incentivizes carbon offset
producers to produce more carbon credits, assisting the adoption of new carbon mitigating
or sequestering technology, and disincentivizes companies wanting to offset their carbon
footprint with only C.Cs, and forces them to perform environmentally friendly actions.
KLIMA is the first building block for unlocking the carbon economy — an economy where more
economic activity leads to an acceleration in planetary regeneration rather than more
amage to our planet. Before, monetary incentives and environmental incentives aren't
typically aligned.
2. Become a Carbon-Based Reserve Currency:
The KLIMA ecosystem and monetary policy are managed by the Klima DAO.
This way we guarantee transparent decision making and long-term stability.
In the long term, we can use this system to optimize stability, to transition to a global
unit of account and medium of exchange. Currently, in the short term, we're focused on
growth and wealth creation, to incentivize users to join the new wave of carbon currency.
3. Facilitate the Climate Market:
The current carbon (and the climate in general) markets are illiquid, fragmented,
inefficient, and opaque. Because of this, we feel that carbon tonnage is heavily
undervalued, and is forced down because of these issues. By eliminating these issues,
the true price can be achieved.
'''
),
),
dbc.ModalFooter(
dbc.Button(
'close',
id='close_learn_card_2',
className='ms-auto',
n_clicks=0,
)
)
],
id="body_learn_card_2",
scrollable=True,
is_open=False,
),
]
),
],
color="success", # https://bootswatch.com/default/ for more card colors
inverse=True, # change color of text (black or white)
outline=False, # True = remove the block colors from the background and header
className='emission_card_style',
)
learn_card_3 = dbc.Card(
[
dbc.CardBody(
[
dbc.Row(
dbc.Label(
"What is Klima?", className='emission_card_topic',
),
),
dbc.Row(
dbc.Button(
'Click to learn', id='open_learn_card_4', n_clicks=0,
className='align-self-center'
),
),
dbc.Modal([
dbc.ModalHeader(dbc.ModalTitle('What is Klima?')),
dbc.ModalBody(
dcc.Markdown(
'''
KLIMA is an algorithmic carbon-backed currency,
inspired by [Olympus DAO](https://www.olympusdao.finance/) and their token mechanics.
KlimaDAO incentivises new supply of Base Carbon Tonnes (BCT) on the blockchain through
bonding with the Protocol. Each KLIMA token is backed at a 1:1 ratio with a BCT in the
treasury.
KlimaDAO leverages the [Toucan Protocol's](https://docs.toucan.earth/protocol/)
Carbon Bridge to retire real world Verified Carbon Units (VCUs) and convert them to a
tokenized form on the blockchain, VCUs can be verified from reputable carbon markets in a
transparent and traceable manner. The credits are then absorbed through the protocols'
bonding mechanism, building a treasury of verified tokenized carbon reductions.
This increases the amount of carbon assets locked within the treasury, thereby
reducing supply on the open market and leading to price appreciation within the
Voluntary Carbon Markets.
In summary, Klima serves two main purposes:
1. It serves as a floating currency and a form of money backed at a 1:1 ratio by voluntary
carbon credits.
2. It is used to govern the protocol and confer voting power to influence decisions on
various policies including supply expansion mechanics.
'''
),
),
dbc.ModalFooter(
dbc.Button(
'close',
id='close_learn_card_4',
className='ms-auto',
n_clicks=0,
)
)
],
id="body_learn_card_4",
scrollable=True,
is_open=False,
),
]
),
],
color="success", # https://bootswatch.com/default/ for more card colors
inverse=True, # change color of text (black or white)
outline=False, # True = remove the block colors from the background and header
className='emission_card_style',
)
learn_card_4 = dbc.Card(
[
dbc.CardBody(
[
dbc.Row(
dbc.Label(
"How do I participate?", className='emission_card_topic',
),
),
dbc.Row(
dbc.Button(
'Click to learn', id='open_learn_card_3', n_clicks=0,
className='align-self-center'
),
),
dbc.Modal([
dbc.ModalHeader(dbc.ModalTitle('How do I participate in KlimaDAO?')),
dbc.ModalBody(
dcc.Markdown(
'''
1. Klima DAO development:
Join the Discord to become a Klimate and hear about Protocol developments.
Those who wish to be involved in Protocol Governance should also join the Discord
to be onboarded by a member of the team.
2. Participation in the carbon economy:
BCTs are the underlying asset within the KlimaDAO treasury and their flow into the treasury
underpins protocol growth. BCTs can be created from real-world Verified Carbon Units (VCUs)
via the Toucan Protocol. Bonders provide BCT LP or BCT tokens in exchange for discounted
KLIMA tokens after a fixed vesting period. Once KLIMA tokens are held, stakers stake
their KLIMA tokens in return for more KLIMA tokens.
'''
),
),
dbc.ModalFooter(
dbc.Button(
'close',
id='close_learn_card_3',
className='ms-auto',
n_clicks=0,
)
)
],
id="body_learn_card_3",
scrollable=True,
is_open=False,
),
]
),
],
color="success", # https://bootswatch.com/default/ for more card colors
inverse=True, # change color of text (black or white)
outline=False, # True = remove the block colors from the background and header
className='emission_card_style',
)
learn_card_5 = dbc.Card(
[
dbc.CardBody(
[
dbc.Row(
dbc.Label(
"What is Staking?", className='emission_card_topic',
),
),
dbc.Row(
dbc.Button(
'Click to learn', id='open_learn_card_5', n_clicks=0,
className='align-self-center'
),
),
dbc.Modal([
dbc.ModalHeader(dbc.ModalTitle('What is Staking?')),
dbc.ModalBody(
dcc.Markdown(
'''
- Staking is the primary profit distribution mechanism of the protocol. It is designed
to be the primary mechanism of value accural for the majority of users.
For most, the best thing to do is to simply stake and compound the KLIMA acquired.
- Whenever the protocol has an excess of reserve per token, the protocol will mint
and distribute tokens to the stakers. The amount minted and distributed is controlled
by a variable called the reward rate.
The reward rate is the % percent supply that is rebased.
For a step by step guide on how to stake KLIMA, see the
[Community guide](https://klima-dao.notion.site/I-m-new-to-KLIMA-How-do-I-participate-bcf8881862e941a5b5550d1179e123f9)
'''
),
),
dbc.ModalFooter(
dbc.Button(
'close',
id='close_learn_card_5',
className='ms-auto',
n_clicks=0,
)
)
],
id="body_learn_card_5",
scrollable=True,
is_open=False,
),
]
),
],
color="success", # https://bootswatch.com/default/ for more card colors
inverse=True, # change color of text (black or white)
outline=False, # True = remove the block colors from the background and header
className='emission_card_style',
)
learn_card_6 = dbc.Card(
[
dbc.CardBody(
[
dbc.Row(
dbc.Label(
"What is Bonding?", className='emission_card_topic',
),
),
dbc.Row(
dbc.Button(
'Click to learn', id='open_learn_card_6', n_clicks=0,
className='align-self-center'
),
),
dbc.Modal([
dbc.ModalHeader(dbc.ModalTitle('What is Bonding?')),
dbc.ModalBody(
dcc.Markdown(
'''
What is Bonding?
Bonding is the process of trading assets to the protocol for KLIMA. The protocol will quote you an amount of KLIMA
for your asset, and the vesting period for the trade. Today, the protocol takes in:
1. Reserve Assets: BCT (Base Carbon Tonnes)
2. Liquidity Assets: KLIMA/BCT and BCT/USDC sushiswap LP pairs.
Bonding allows you to buy KLIMA at a lower cost basis. Because the protocol can sell at a discount to the market
price (as it can mint KLIMA at IV),you are able to more cheaply buy KLIMA
'''
),
),
dbc.ModalFooter(
dbc.Button(
'close',
id='close_learn_card_6',
className='ms-auto',
n_clicks=0,
)
)
],
id="body_learn_card_6",
scrollable=True,
is_open=False,
),
]
),
],
color="success", # https://bootswatch.com/default/ for more card colors
inverse=True, # change color of text (black or white)
outline=False, # True = remove the block colors from the background and header
className='emission_card_style',
)
learn_card_7 = dbc.Card(
[
dbc.CardBody(
[
dbc.Row(
dbc.Label(
"What is Rebasing?", className='emission_card_topic',
),
),
dbc.Row(
dbc.Button(
'Click to learn', id='open_learn_card_7', n_clicks=0,
className='align-self-center'
),
),
dbc.Modal([
dbc.ModalHeader(dbc.ModalTitle('What is Rebasing?')),
dbc.ModalBody(
dcc.Markdown(
'''
KLIMA is an algorithmic carbon-backed currency,
inspired by [Olympus DAO](https://www.olympusdao.finance/) and their token mechanics.
KlimaDAO incentivises new supply of Base Carbon Tonnes (BCT) on the blockchain through
bonding with the Protocol. Each KLIMA token is backed at a 1:1 ratio with a BCT in the
treasury.
KlimaDAO leverages the [Toucan Protocol's](https://docs.toucan.earth/protocol/)
Carbon Bridge to retire real world Verified Carbon Units (VCUs) and convert them to a
tokenized form on the blockchain, VCUs can be verified from reputable carbon markets in a
transparent and traceable manner. The credits are then absorbed through the protocols'
bonding mechanism, building a treasury of verified tokenized carbon reductions.
This increases the amount of carbon assets locked within the treasury, thereby
reducing supply on the open market and leading to price appreciation within the
Voluntary Carbon Markets.
In summary, Klima serves two main purposes:
1. It serves as a floating currency and a form of money backed at a 1:1 ratio by voluntary
carbon credits.
2. It is used to govern the protocol and confer voting power to influence decisions on
various policies including supply expansion mechanics.
'''
),
),
dbc.ModalFooter(
dbc.Button(
'close',
id='close_learn_card_7',
className='ms-auto',
n_clicks=0,
)
)
],
id="body_learn_card_7",
scrollable=True,
is_open=False,
),
]
),
],
color="success", # https://bootswatch.com/default/ for more card colors
inverse=True, # change color of text (black or white)
outline=False, # True = remove the block colors from the background and header
className='emission_card_style',
)
learn_card_8 = dbc.Card(
[
dbc.CardBody(
[
dbc.Row(
dbc.Label(
"Participant Goals?", className='emission_card_topic',
),
),
dbc.Row(
dbc.Button(
'Click to learn', id='open_learn_card_8', n_clicks=0,
className='align-self-center'
),
),
dbc.Modal([
dbc.ModalHeader(dbc.ModalTitle('Participant Goals?')),
dbc.ModalBody(
dcc.Markdown(
'''
Stakers care primarily about their KLIMA balance. While price is important in valuing their KLIMA and indicating
the market's perception of Klima DAO's utility and impact, it is not the main goal in the shorter-term.
**KLIMA is a long-term play, and maximizing holdings is the objective of stakers.**
A higher price of carbon will be achieved by increasing the quality of carbon removal projects, and creating a system
for producing carbon offsets at scale. A robust system will see a higher BCT price and a higher KLIMA price.
A smart staker cares about the long-term price exploration of BCT tokens and the quality of the TCO2s flowing into the
ecosystem.
Bonders care primarily about the On-chain Carbon Tonne supply and their KLIMA balance. Bonders have their KLIMA and
carbon assets locked in for a period of time, but can redeem KLIMA at a better rate than a staker by relinquishing
their BCTs to the treasury to lock it away indefinitely. Their carbon impact and KLIMA returns from bonding are
proportional to the amount bonded.
In the case where demand is greater than supply, purchasing BCTs and bonding them for new KLIMA will be cheaper
than purchasing KLIMA on the free market.
'''
),
),
dbc.ModalFooter(
dbc.Button(
'close',
id='close_learn_card_8',
className='ms-auto',
n_clicks=0,
)
)
],
id="body_learn_card_8",
scrollable=True,
is_open=False,
),
]
),
],
color="success", # https://bootswatch.com/default/ for more card colors
inverse=True, # change color of text (black or white)
outline=False, # True = remove the block colors from the background and header
className='emission_card_style',
)
learn_card_9 = dbc.Card(
[
dbc.CardBody(
[
dbc.Row(
dbc.Label(
"What are Carbon Markets?", className='emission_card_topic',
),
),
dbc.Row(
dbc.Button(
'Click to learn', id='open_learn_card_9', n_clicks=0,
className='align-self-center'
),
),
dbc.Modal([
dbc.ModalHeader(dbc.ModalTitle('What are Carbon Markets?')),
dbc.ModalBody(
dcc.Markdown(
'''
Carbon markets are a greenhouse gas trading system implemented to reduce CO2 and other greenhouse gas emissions by
putting a price on releasing carbon in the form of carbon offsets, sometimes called carbon credits.
Carbon markets are “Cap and Trade” markets. In this system the number of carbon offsets are capped for a particular
entity; a company, government, etc… This allows the entity to release a set amount of emissions.
If the entity wants to exceed their set emission level they need to trade carbon offsets with other
entities who are not using their carbon offsets or face a fine.
Extra credits can be created if participants voluntarily reduce their emissions by using cleaner energy sources or
other pollution controls. Over time the cap for emissions will be slowly lowered making carbon offsets more scarce
and more expensive, creating an economic incentive for entities to voluntarily reduce their emissions.
'''
),
),
dbc.ModalFooter(
dbc.Button(
'close',
id='close_learn_card_9',
className='ms-auto',
n_clicks=0,
)
)
],
id="body_learn_card_9",
scrollable=True,
is_open=False,
),
]
),
],
color="success", # https://bootswatch.com/default/ for more card colors
inverse=True, # change color of text (black or white)
outline=False, # True = remove the block colors from the background and header
className='emission_card_style',
)
learn_card_10 = dbc.Card(
[
dbc.CardBody(
[
dbc.Row(
dbc.Label(
"What is a Carbon Offset?", className='emission_card_topic',
),
),
dbc.Row(
dbc.Button(
'Click to learn', id='open_learn_card_10', n_clicks=0,
className='align-self-center'
),
),
dbc.Modal([
dbc.ModalHeader(dbc.ModalTitle('What is a Carbon Offset?')),
dbc.ModalBody(
dcc.Markdown(
'''
A carbon offset represents the removal of one tonne of carbon dioxide equivalent from the atmosphere or the avoidance
of one tonne of emissions. The term “carbon dioxide equivalent” is used because there are multiple greenhouse gasses,
all with a different Global Warming Potential (GWP), which illustrates impacts of different greenhouse gasses.
For instance methane has a GWP 28 times that of CO2. This means a company would need 28 carbon offsets to
emit 1 tonne of methane.
'''
),
),
dbc.ModalFooter(
dbc.Button(
'close',
id='close_learn_card_10',
className='ms-auto',
n_clicks=0,
)
)
],
id="body_learn_card_10",
scrollable=True,
is_open=False,
),
]
),
],
color="success", # https://bootswatch.com/default/ for more card colors
inverse=True, # change color of text (black or white)
outline=False, # True = remove the block colors from the background and header
className='emission_card_style',
)
learn_card_11 = dbc.Card(
[
dbc.CardBody(
[
dbc.Row(
dbc.Label(
"How are carbon offsets and renewable energy different?", className='emission_card_topic',
),
),
dbc.Row(
dbc.Button(
'Click to learn', id='open_learn_card_11', n_clicks=0,
className='align-self-center'
),
),
dbc.Modal([
dbc.ModalHeader(dbc.ModalTitle('How are carbon offsets and renewable energy different?')),
dbc.ModalBody(
dcc.Markdown(
'''
Renewable energy sources produce energy from natural sources, like wind or solar, with little to no carbon emissions.
Carbon offsets create a way to reduce the acceptable levels of current emissions over time, provide economic
incentive to reduce voluntarily and fund sources of renewable energy.
'''
),
),
dbc.ModalFooter(
dbc.Button(
'close',
id='close_learn_card_11',
className='ms-auto',
n_clicks=0,
)
)
],
id="body_learn_card_11",
scrollable=True,
is_open=False,
),
]
),
],
color="success", # https://bootswatch.com/default/ for more card colors
inverse=True, # change color of text (black or white)
outline=False, # True = remove the block colors from the background and header
className='emission_card_style',
)
@app.callback(
Output('body_learn_card_1', 'is_open'),
[
Input('open_learn_card_1', 'n_clicks'),
Input('close_learn_card_1', 'n_clicks'),
],
[State('body_learn_card_1', 'is_open')],
)
def toggle_modal1(n1, n2, is_open):
if n1 or n2:
return not is_open
return is_open
@app.callback(
Output('body_learn_card_2', 'is_open'),
[
Input('open_learn_card_2', 'n_clicks'),
Input('close_learn_card_2', 'n_clicks'),
],
[State('body_learn_card_2', 'is_open')],
)
def toggle_modal2(n1, n2, is_open):
if n1 or n2:
return not is_open
return is_open
@app.callback(
Output('body_learn_card_3', 'is_open'),
[
Input('open_learn_card_3', 'n_clicks'),
Input('close_learn_card_3', 'n_clicks'),
],
[State('body_learn_card_3', 'is_open')],
)
def toggle_modal3(n1, n2, is_open):
if n1 or n2:
return not is_open
return is_open
@app.callback(
Output('body_learn_card_4', 'is_open'),
[
Input('open_learn_card_4', 'n_clicks'),
Input('close_learn_card_4', 'n_clicks'),
],
[State('body_learn_card_4', 'is_open')],
)
def toggle_modal4(n1, n2, is_open):
if n1 or n2:
return not is_open
return is_open
@app.callback(
Output('body_learn_card_5', 'is_open'),
[
Input('open_learn_card_5', 'n_clicks'),
Input('close_learn_card_5', 'n_clicks'),
],
[State('body_learn_card_5', 'is_open')],
)
def toggle_modal5(n1, n2, is_open):
if n1 or n2:
return not is_open
return is_open
@app.callback(
Output('body_learn_card_6', 'is_open'),
[
Input('open_learn_card_6', 'n_clicks'),
Input('close_learn_card_6', 'n_clicks'),
],
[State('body_learn_card_6', 'is_open')],
)
def toggle_modal6(n1, n2, is_open):
if n1 or n2:
return not is_open
return is_open
@app.callback(
Output('body_learn_card_7', 'is_open'),
[
Input('open_learn_card_7', 'n_clicks'),
Input('close_learn_card_7', 'n_clicks'),
],
[State('body_learn_card_7', 'is_open')],
)
def toggle_modal7(n1, n2, is_open):
if n1 or n2:
return not is_open
return is_open
@app.callback(
Output('body_learn_card_8', 'is_open'),
[
Input('open_learn_card_8', 'n_clicks'),
Input('close_learn_card_8', 'n_clicks'),
],
[State('body_learn_card_8', 'is_open')],
)
def toggle_modal8(n1, n2, is_open):
if n1 or n2:
return not is_open
return is_open
@app.callback(
Output('body_learn_card_9', 'is_open'),
[
Input('open_learn_card_9', 'n_clicks'),
Input('close_learn_card_9', 'n_clicks'),
],
[State('body_learn_card_9', 'is_open')],
)
def toggle_modal9(n1, n2, is_open):
if n1 or n2:
return not is_open
return is_open
@app.callback(
Output('body_learn_card_10', 'is_open'),
[
Input('open_learn_card_10', 'n_clicks'),
Input('close_learn_card_10', 'n_clicks'),
],
[State('body_learn_card_10', 'is_open')],
)
def toggle_modal10(n1, n2, is_open):
if n1 or n2:
return not is_open
return is_open
@app.callback(
Output('body_learn_card_11', 'is_open'),
[
Input('open_learn_card_11', 'n_clicks'),
Input('close_learn_card_11', 'n_clicks'),
],
[State('body_learn_card_11', 'is_open')],
)
def toggle_modal11(n1, n2, is_open):
if n1 or n2:
return not is_open
return is_open
layout = html.Div([
dbc.Row([
dbc.Col(dbc.Label('Foundations',
className="page_section_topic"))
]),
dbc.Row([
dbc.Col(learn_card_1, xs=12, sm=12, md=12, lg=3, xl=3, style={'padding': '10px', 'height': '100%'}),
dbc.Col(learn_card_2, xs=12, sm=12, md=12, lg=3, xl=3, style={'padding': '10px', 'height': '100%'}),
dbc.Col(learn_card_3, xs=12, sm=12, md=12, lg=3, xl=3, style={'padding': '10px', 'height': '100%'}),
dbc.Col(learn_card_4, xs=12, sm=12, md=12, lg=3, xl=3, style={'padding': '10px', 'height': '100%'})
]),
dbc.Row([
dbc.Col(dbc.Label('Protocol Mechanics',
className="page_section_topic"))
]),
dbc.Row([
dbc.Col(learn_card_5, xs=12, sm=12, md=12, lg=3, xl=3, style={'padding': '10px', 'height': '100%'}),
dbc.Col(learn_card_6, xs=12, sm=12, md=12, lg=3, xl=3, style={'padding': '10px', 'height': '100%'}),
dbc.Col(learn_card_7, xs=12, sm=12, md=12, lg=3, xl=3, style={'padding': '10px', 'height': '100%'}),
dbc.Col(learn_card_8, xs=12, sm=12, md=12, lg=3, xl=3, style={'padding': '10px', 'height': '100%'})
]),
dbc.Row([
dbc.Col(dbc.Label('Carbon Markets',
className="page_section_topic"))
]),
dbc.Row([
dbc.Col(learn_card_9, xs=12, sm=12, md=12, lg=3, xl=3, style={'padding': '10px', 'height': '100%'}),
dbc.Col(learn_card_10, xs=12, sm=12, md=12, lg=3, xl=3, style={'padding': '10px', 'height': '100%'}),
dbc.Col(learn_card_11, xs=12, sm=12, md=12, lg=3, xl=3, style={'padding': '10px', 'height': '100%'}),
dbc.Col(learn_card_8, xs=12, sm=12, md=12, lg=3, xl=3, style={'padding': '10px', 'height': '100%'})
]),
])
|
nilq/baby-python
|
python
|
"""
Tatoeba (https://tatoeba.org/) is a collection of sentences and translation, mainly aiming for language learning.
It is available for more than 300 languages.
This script downloads the Tatoeba corpus and extracts the sentences & translations in the languages you like
"""
import os
import sentence_transformers
import tarfile
import gzip
# Note: Tatoeba uses 3 letter languages codes (ISO-639-2),
# while other datasets like OPUS / TED2020 use 2 letter language codes (ISO-639-1)
# For training of sentence transformers, which type of language code is used doesn't matter.
# For language codes, see: https://en.wikipedia.org/wiki/List_of_ISO_639-2_codes
source_languages = set(['eng'])
target_languages = set(['deu', 'ara', 'tur', 'spa', 'ita', 'fra'])
num_dev_sentences = 1000 #Number of sentences that are used to create a development set
tatoeba_folder = "../datasets/tatoeba"
output_folder = "parallel-sentences/"
sentences_file_bz2 = os.path.join(tatoeba_folder, 'sentences.tar.bz2')
sentences_file = os.path.join(tatoeba_folder, 'sentences.csv')
links_file_bz2 = os.path.join(tatoeba_folder, 'links.tar.bz2')
links_file = os.path.join(tatoeba_folder, 'links.csv')
download_url = "https://downloads.tatoeba.org/exports/"
os.makedirs(tatoeba_folder, exist_ok=True)
os.makedirs(output_folder, exist_ok=True)
#Download files if needed
for filepath in [sentences_file_bz2, links_file_bz2]:
if not os.path.exists(filepath):
url = download_url+os.path.basename(filepath)
print("Download", url)
sentence_transformers.util.http_get(url, filepath)
#Extract files if needed
if not os.path.exists(sentences_file):
print("Extract", sentences_file_bz2)
tar = tarfile.open(sentences_file_bz2, "r:bz2")
tar.extract('sentences.csv', path=tatoeba_folder)
tar.close()
if not os.path.exists(links_file):
print("Extract", links_file_bz2)
tar = tarfile.open(links_file_bz2, "r:bz2")
tar.extract('links.csv', path=tatoeba_folder)
tar.close()
#Read sentences
sentences = {}
all_langs = target_languages.union(source_languages)
print("Read sentences.csv file")
with open(sentences_file, encoding='utf8') as fIn:
for line in fIn:
id, lang, sentence = line.strip().split('\t')
if lang in all_langs:
sentences[id] = (lang, sentence)
#Read links that map the translations between different languages
print("Read links.csv")
translations = {src_lang: {trg_lang: {} for trg_lang in target_languages} for src_lang in source_languages}
with open(links_file, encoding='utf8') as fIn:
for line in fIn:
src_id, target_id = line.strip().split()
if src_id in sentences and target_id in sentences:
src_lang, src_sent = sentences[src_id]
trg_lang, trg_sent = sentences[target_id]
if src_lang in source_languages and trg_lang in target_languages:
if src_sent not in translations[src_lang][trg_lang]:
translations[src_lang][trg_lang][src_sent] = []
translations[src_lang][trg_lang][src_sent].append(trg_sent)
#Write everything to the output folder
print("Write output files")
for src_lang in source_languages:
for trg_lang in target_languages:
source_sentences = list(translations[src_lang][trg_lang])
train_sentences = source_sentences[num_dev_sentences:]
dev_sentences = source_sentences[0:num_dev_sentences]
print("{}-{} has {} sentences".format(src_lang, trg_lang, len(source_sentences)))
if len(dev_sentences) > 0:
with gzip.open(os.path.join(output_folder, 'Tatoeba-{}-{}-dev.tsv.gz'.format(src_lang, trg_lang)), 'wt', encoding='utf8') as fOut:
for sent in dev_sentences:
fOut.write("\t".join([sent]+translations[src_lang][trg_lang][sent]))
fOut.write("\n")
if len(train_sentences) > 0:
with gzip.open(os.path.join(output_folder, 'Tatoeba-{}-{}-train.tsv.gz'.format(src_lang, trg_lang)), 'wt', encoding='utf8') as fOut:
for sent in train_sentences:
fOut.write("\t".join([sent]+translations[src_lang][trg_lang][sent]))
fOut.write("\n")
print("---DONE---")
|
nilq/baby-python
|
python
|
from django.test import TestCase
from unittest2 import skipIf
from django.db import connection
import json
import re
from sqlshare_rest.util.db import get_backend
from sqlshare_rest.test import missing_url
from django.test.utils import override_settings
from django.test.client import Client
from django.core.urlresolvers import reverse
from sqlshare_rest.test.api.base import BaseAPITest
from sqlshare_rest.dao.dataset import create_dataset_from_query
from sqlshare_rest.util.db import is_mssql, is_mysql, is_sqlite3, is_pg
import six
if six.PY2:
from StringIO import StringIO
elif six.PY3:
from io import StringIO
@skipIf(missing_url("sqlshare_view_dataset_list"), "SQLShare REST URLs not configured")
@override_settings(MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.RemoteUserMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
),
SQLSHARE_QUERY_CACHE_DB="test_ss_query_db",
AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',)
)
class DownloadAPITest(BaseAPITest):
token = None
query_id = None
def test_download(self):
owner = "test_dataset_download2"
self.remove_users.append(owner)
auth_headers = self.get_auth_header_for_username(owner)
post_url = reverse("sqlshare_view_init_download")
response = self.client.post(post_url, data=json.dumps({'sql': 'SELECT (1)', 'downloads': 1}), content_type="application/json", **auth_headers)
self.assertEqual(response.status_code, 200)
download_url = response["Location"]
response2 = self.client.get(download_url, content_type='application/json')
self.assertEqual(response2.status_code, 200)
self.assertTrue(response2.streaming)
response_body = StringIO("".join(map(lambda x: x.decode("utf-8-sig"), response2.streaming_content))).read()
if is_mssql():
resp = '""\n"1"\n'
elif is_mysql():
resp = '"1"\n"1"\n'
elif is_pg():
resp = '"?column?"\n"1"\n'
else:
resp = '"(1)"\n"1"\n'
self.assertEqual(response_body, resp)
# Ensure download only works once
response = self.client.get(download_url, content_type='application/json')
self.assertEqual(response.status_code, 404)
def test_bad_query(self):
owner = "test_invalid_download_owner"
other = "test_invalid_download_sneak"
self.remove_users.append(owner)
self.remove_users.append(other)
get_backend().get_user(other)
model = create_dataset_from_query(username=owner, dataset_name="test_download_2", sql="SELECT (3)")
if is_mssql():
sql = "SELECT * FROM [test_invalid_download_owner].[test_download_2]"
elif is_mysql():
sql = "SELECT * FROM `test_invalid_download_owner`.`test_download_2`";
else:
sql = "SELECT * FROM test_download_2";
post_url = reverse("sqlshare_view_init_download")
other_auth_headers = self.get_auth_header_for_username(other)
# Now try just invalid sql
response = self.client.post(post_url, data=json.dumps({'sql': "SELECT (1", 'downloads': 1}), content_type="application/json", **other_auth_headers)
self.assertEqual(response.status_code, 200)
download_url = response["Location"]
response2 = self.client.get(download_url, content_type='application/json')
self.assertEqual(response2.status_code, 200)
if is_sqlite3():
# sqlite3 doesn't have permissions for the test below to fail on...
return
# Test a user w/ no access trying to download a dataset's content.
response = self.client.post(post_url, data=json.dumps({'sql': sql, 'downloads': 1}), content_type="application/json", **other_auth_headers)
self.assertEqual(response.status_code, 200)
download_url = response["Location"]
response2 = self.client.get(download_url, content_type='application/json')
self.assertEqual(response2.status_code, 200)
def test_bad_download(self):
owner = "query_user1"
self.remove_users.append(owner)
# bad query id
post_url = reverse("sqlshare_view_run_download", kwargs={'token': 'asd'})
auth_headers = self.get_auth_header_for_username(owner)
response = self.client.get(post_url, content_type='application/json')
self.assertEqual(response.status_code, 404)
def test_bad_methods(self):
owner = "query_user1"
auth_headers = self.get_auth_header_for_username(owner)
init_url = reverse("sqlshare_view_init_download")
init_response = self.client.get(init_url, content_type='application/json', **auth_headers)
self.assertEqual(init_response.status_code, 405)
download_url = reverse("sqlshare_view_run_download", kwargs={ 'token' : 'asd1234'})
download_response = self.client.post(download_url, content_type='application/json')
self.assertEqual(download_response.status_code, 405)
|
nilq/baby-python
|
python
|
from rest_framework import status
from rest_framework.test import APITestCase
from .. import models
from .. import serializers
class DocumentTopicTestCase(APITestCase):
URL = '/v1/m2m/document/topic/'
DOCUMENT_URL = '/v1/document/'
def test_create_document_topic(self):
topic_data = {
'short_descriptor': 'support',
'long_descriptor': 'support for developers'
}
document_data = {
'title': 'Test',
'content': 'This is the test content'
}
document = models.Document.objects.create(**document_data)
topic = models.Topic.objects.create(**topic_data)
data = {
'document': str(document.id),
'topic': str(topic.id)
}
res = self.client.post(self.URL, data)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
document_topic = models.DocumentTopic.objects.filter(
document__id=data['document']
).first()
self.assertDictEqual(res.data, serializers.DocumentTopicSerializer(
document_topic
).data)
res_document = self.client.get(f'{self.DOCUMENT_URL}{document.id}/')
self.assertIn(topic.id, res_document.data['topics'])
def test_delete_document_topic(self):
topic_data = {
'short_descriptor': 'support',
'long_descriptor': 'support for developers'
}
document_data = {
'title': 'Test',
'content': 'This is the test content'
}
document = models.Document.objects.create(**document_data)
topic = models.Topic.objects.create(**topic_data)
data = {
'document': document,
'topic': topic
}
document_topic = models.DocumentTopic.objects.create(**data)
res = self.client.delete(f'{self.URL}{document_topic.id}/')
self.assertEqual(res.status_code, status.HTTP_204_NO_CONTENT)
res_document = self.client.get(f'{self.DOCUMENT_URL}{document.id}/')
self.assertNotIn(topic.id, res_document.data['topics'])
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import sys
import numpy
from numpy import matrix
class Policy(object):
actions = None
policy = None
def __init__(self, num_states, num_actions, filename='policy/default.policy'):
try:
f = open(filename, 'r')
except:
print('\nError: unable to open file: ' + filename)
lines = f.readlines()
# the first three and the last lines are not related to the actual policy
lines = lines[3:]
self.actions = -1 * numpy.ones((len(lines)-1, 1, ))
self.policy = numpy.zeros((len(lines)-1, num_states, ))
for i in range(len(lines)-1):
# print("this line:\n\n" + lines[i])
if lines[i].find('/AlphaVector') >= 0:
break
l = lines[i].find('"')
r = lines[i].find('"', l + 1)
self.actions[i] = int(lines[i][l + 1 : r])
ll = lines[i].find('>')
rr = lines[i].find(' <')
# print(str(i))
self.policy[i] = numpy.matrix(lines[i][ll + 1 : rr])
f.close()
def select_action(self, b):
# sanity check if probabilities sum up to 1
if sum(b) - 1.0 > 0.00001:
print('Error: belief does not sum to 1, diff: ', sum(b)[0] - 1.0)
sys.exit()
return self.actions[numpy.argmax(numpy.dot(self.policy, b.T)), 0]
# return numpy.argmax(b) + 12
# return numpy.random.randint(24, size=1)[0]
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Credits: Benjamin Dartigues, Emmanuel Bouilhol, Hayssam Soueidan, Macha Nikolski
import pathlib
from loguru import logger
import constants
import plot
import numpy as np
import helpers
from image_set import ImageSet
from helpers import open_repo
from path import global_root_dir
import collections
def mrna_cytoplasmic_total_count(analysis_repo, keyorder):
gene2image_set = {}
gene2cyto_count = {}
gene2median_cyto_count = {}
gene2error = {}
gene2confidence_interval = {}
for gene in constants.analysis_config['MRNA_GENES']:
logger.info("Running mrna cytoplasmic total count analysis for {}", gene)
gene2image_set[gene] = ImageSet(analysis_repo, ['mrna/%s/' % gene])
gene2cyto_count[gene] = gene2image_set[gene].compute_cytoplasmic_spots_counts()
gene2median_cyto_count[gene] = np.median(gene2cyto_count[gene])
gene2error[gene] = helpers.sem(gene2cyto_count[gene], factor=0)
lower, higher = helpers.median_confidence_interval(gene2cyto_count[gene])
gene2confidence_interval[gene] = [lower, higher]
# generate bar plot image
gene2median_cyto_count = collections.OrderedDict(sorted(gene2median_cyto_count.items(), key=lambda i: keyorder.index(i[0])))
gene2error = collections.OrderedDict(sorted(gene2error.items(), key=lambda i: keyorder.index(i[0])))
gene2confidence_interval = collections.OrderedDict(sorted(gene2confidence_interval.items(), key=lambda i: keyorder.index(i[0])))
xlabels = constants.analysis_config['MRNA_GENES_LABEL']
tgt_image_name = constants.analysis_config['FIGURE_NAME_FORMAT'].format(molecule_type="mrna")
tgt_fp = pathlib.Path(constants.analysis_config['FIGURE_OUTPUT_PATH'].format(root_dir=global_root_dir),
tgt_image_name)
plot.bar_profile_median(gene2median_cyto_count,
gene2error.values(),
'mrna',
xlabels,
tgt_fp,
gene2confidence_interval,
annot=False,
data_to_annot=gene2cyto_count
)
# generate violin plot image
tgt_image_name = constants.analysis_config['FIGURE_NAME_VIOLIN_FORMAT'].format(molecule_type="mrna")
tgt_fp = pathlib.Path(constants.analysis_config['FIGURE_OUTPUT_PATH'].format(root_dir=global_root_dir),
tgt_image_name)
plot.violin_profile(gene2cyto_count, tgt_fp, xlabels, rotation=0, annot=False)
def intensities_cytoplasmic_total_count(analysis_repo, keyorder):
gene2cyto_count = {}
gene2median_cyto_count = {}
gene2error = {}
gene2confidence_interval = {}
for gene in constants.analysis_config['PROTEINS']:
logger.info("Running protein cytoplasmic total count analysis for {}", gene)
imageset = ImageSet(analysis_repo, ['protein/%s/' % gene])
gene2cyto_count[gene] = imageset.compute_cytoplasmic_intensities()
gene2median_cyto_count[gene] = np.median(gene2cyto_count[gene])
gene2error[gene] = helpers.sem(gene2cyto_count[gene], factor=0)
lower, higher = helpers.median_confidence_interval(gene2cyto_count[gene])
gene2confidence_interval[gene] = [lower, higher]
# generate bar plot image
gene2median_cyto_count = collections.OrderedDict(sorted(gene2median_cyto_count.items(), key=lambda i: keyorder.index(i[0])))
gene2error = collections.OrderedDict(sorted(gene2error.items(), key=lambda i: keyorder.index(i[0])))
gene2confidence_interval = collections.OrderedDict(sorted(gene2confidence_interval.items(), key=lambda i: keyorder.index(i[0])))
xlabels = constants.analysis_config['PROTEINS_LABEL']
tgt_image_name = constants.analysis_config['FIGURE_NAME_FORMAT'].format(molecule_type="protein")
tgt_fp = pathlib.Path(constants.analysis_config['FIGURE_OUTPUT_PATH'].format(root_dir=global_root_dir),
tgt_image_name)
plot.bar_profile_median(gene2median_cyto_count,
gene2error.values(),
'proteins',
xlabels,
tgt_fp,
gene2confidence_interval,
annot=False,
data_to_annot=gene2cyto_count
)
# generate violin plot image
tgt_image_name = constants.analysis_config['FIGURE_NAME_VIOLIN_FORMAT'].format(molecule_type="protein")
tgt_fp = pathlib.Path(constants.analysis_config['FIGURE_OUTPUT_PATH'].format(root_dir=global_root_dir),
tgt_image_name)
plot.violin_profile(gene2cyto_count, tgt_fp, xlabels, rotation=0, annot=True)
'''
Figure 5A left panel: arhgdia and arhgdia prrc2c mRNA cytoplasmic total count
Figure 5A left panel: arhgdia and arhgdia prrc2c protein cytoplasmic total count
Figure S6A top left panel: arhgdia and arhgdia nocodazole mRNA cytoplasmic total count
Figure S6A top right panel: arhgdia and arhgdia nocodazole protein cytoplasmic total count
Figure S6A middle left panel: pard3 and pard3 nocodazole mRNA cytoplasmic total count
Figure S6A middle right panel: arhgdia and arhgdia CytoD protein cytoplasmic total count
Figure S6A bottom left panel: arhgdia cytod mRNA cytoplasmic total count
Figure S6A bottom right panel: arhgdia cytod protein cytoplasmic total count
'''
configurations = [
["src/analysis/cytoplasmic_total_count/config_prrc2c.json", ["arhgdia/control", "arhgdia/prrc2c_depleted"], "Timepoint"],
["src/analysis/cytoplasmic_total_count/config_nocodazole_arhgdia.json", ["arhgdia", "arhgdia_nocodazole"], "Gene"],
["src/analysis/cytoplasmic_total_count/config_nocodazole_pard3.json", ["pard3", "pard3_nocodazole"], "Gene"],
["src/analysis/cytoplasmic_total_count/config_cytod.json", ["arhgdia_control", "arhgdia_cytod"], "Gene"]
]
if __name__ == '__main__':
for conf in configurations:
conf_full_path = pathlib.Path(global_root_dir, conf[0])
constants.init_config(analysis_config_js_path=conf_full_path)
repo = open_repo()
key_order = conf[1]
mrna_cytoplasmic_total_count(repo, key_order)
intensities_cytoplasmic_total_count(repo, key_order)
|
nilq/baby-python
|
python
|
import matplotlib
import matplotlib.colors as colors
import matplotlib.pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
import networkx as nx
import numpy as np
import sklearn.metrics as metrics
import torch
import torch.nn as nn
from torch.autograd import Variable
import tensorboardX
from tensorboardX import SummaryWriter
import argparse
import os
import pickle
import random
import shutil
import time
import cross_test
import cross_val
import encoders
import gen.feat as featgen
import gen.data as datagen
from graph_sampler import GraphSampler
import load_data
import util
import copy
import math
'''
改动的地方
1.把数据集分割成train,valid,test(test_num在cross_val.prepare_val_data 调用的时候是写死的)
2.调用benchmark_task_val(prog_args, writer=writer,feat='node-feat')加了参数feat,默认是只用node_label的onehot编码
3.模型改动:(1)加入残差网络
'''
def evaluate(dataset, model, args, name='Validation', max_num_examples=None):
model.eval()
labels = []
preds = []
for batch_idx, data in enumerate(dataset):
adj = Variable(data['adj'].float(), requires_grad=False)
h0 = Variable(data['feats'].float())
labels.append(data['label'].long().numpy())
batch_num_nodes = data['num_nodes'].int().numpy()
assign_input = Variable(data['assign_feats'].float(), requires_grad=False)
res = Variable(data['res'].float(), requires_grad=False)
ypred = model(h0, adj, batch_num_nodes, assign_x=assign_input,res_x=res)
_, indices = torch.max(ypred, 1)
preds.append(indices.cpu().data.numpy())
if max_num_examples is not None:
if (batch_idx+1)*args.batch_size > max_num_examples:
break
labels = np.hstack(labels)
preds = np.hstack(preds)
fpr, tpr, thresholds = metrics.roc_curve(labels, preds, pos_label=0)
ravel = metrics.confusion_matrix(labels, preds).ravel()
TP,FN ,FP,TN =ravel
a = TP + FP
b = TP + FN
c = TN + FP
d = TN + FN
mcc=((TP*TN)-(FP*FN))/(math.sqrt(float(a*b*c*d)+0.0001))
result = {'prec': metrics.precision_score(labels, preds, pos_label=0, average='binary'),#macro
'recall': metrics.recall_score(labels, preds, pos_label=0, average='binary'),#macro
'acc': metrics.accuracy_score(labels, preds),
'F1': metrics.f1_score(labels, preds, pos_label=0, average="binary"),#micro
'ravel': ravel,
'auc': metrics.auc(fpr, tpr),
'mcc': mcc}
print(name, " accuracy:", result['acc'])
# print(name, ' tn, fp, fn, tp=', result['ravel'])
# print(name, ' auc:', result['auc'])
# print(name, ' mcc:', result['mcc'])
# print(name, " recall:", result['recall'])
return result
def gen_prefix(args):
if args.bmname is not None:
name = args.bmname
else:
name = args.dataset
name += '_' + args.method
if args.method == 'soft-assign':
name += '_l' + str(args.num_gc_layers) + 'x' + str(args.num_pool)
name += '_ar' + str(int(args.assign_ratio*100))
if args.linkpred:
name += '_lp'
else:
name += '_l' + str(args.num_gc_layers)
name += '_h' + str(args.hidden_dim) + '_o' + str(args.output_dim)
if not args.bias:
name += '_nobias'
if len(args.name_suffix) > 0:
name += '_' + args.name_suffix
return name
def gen_train_plt_name(args):
return 'results/' + gen_prefix(args) + '.png'
def log_assignment(assign_tensor, writer, epoch, batch_idx):
plt.switch_backend('agg')
fig = plt.figure(figsize=(8,6), dpi=300)
# has to be smaller than args.batch_size
for i in range(len(batch_idx)):
plt.subplot(2, 2, i+1)
plt.imshow(assign_tensor.cpu().data.numpy()[batch_idx[i]], cmap=plt.get_cmap('BuPu'))
cbar = plt.colorbar()
cbar.solids.set_edgecolor("face")
plt.tight_layout()
fig.canvas.draw()
#data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
#data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
data = tensorboardX.utils.figure_to_image(fig)
writer.add_image('assignment', data, epoch)
def log_graph(adj, batch_num_nodes, writer, epoch, batch_idx, assign_tensor=None):
plt.switch_backend('agg')
fig = plt.figure(figsize=(8,6), dpi=300)
for i in range(len(batch_idx)):
ax = plt.subplot(2, 2, i+1)
num_nodes = batch_num_nodes[batch_idx[i]]
adj_matrix = adj[batch_idx[i], :num_nodes, :num_nodes].cpu().data.numpy()
G = nx.from_numpy_matrix(adj_matrix)
nx.draw(G, pos=nx.spring_layout(G), with_labels=True, node_color='#336699',
edge_color='grey', width=0.5, node_size=300,
alpha=0.7)
ax.xaxis.set_visible(False)
plt.tight_layout()
fig.canvas.draw()
#data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
#data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
data = tensorboardX.utils.figure_to_image(fig)
writer.add_image('graphs', data, epoch)
# log a label-less version
#fig = plt.figure(figsize=(8,6), dpi=300)
#for i in range(len(batch_idx)):
# ax = plt.subplot(2, 2, i+1)
# num_nodes = batch_num_nodes[batch_idx[i]]
# adj_matrix = adj[batch_idx[i], :num_nodes, :num_nodes].cpu().data.numpy()
# G = nx.from_numpy_matrix(adj_matrix)
# nx.draw(G, pos=nx.spring_layout(G), with_labels=False, node_color='#336699',
# edge_color='grey', width=0.5, node_size=25,
# alpha=0.8)
#plt.tight_layout()
#fig.canvas.draw()
#data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
#data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
#writer.add_image('graphs_no_label', data, epoch)
# colored according to assignment
assignment = assign_tensor.cpu().data.numpy()
fig = plt.figure(figsize=(8,6), dpi=300)
num_clusters = assignment.shape[2]
all_colors = np.array(range(num_clusters))
for i in range(len(batch_idx)):
ax = plt.subplot(2, 2, i+1)
num_nodes = batch_num_nodes[batch_idx[i]]
adj_matrix = adj[batch_idx[i], :num_nodes, :num_nodes].cpu().data.numpy()
label = np.argmax(assignment[batch_idx[i]], axis=1).astype(int)
label = label[: batch_num_nodes[batch_idx[i]]]
node_colors = all_colors[label]
G = nx.from_numpy_matrix(adj_matrix)
nx.draw(G, pos=nx.spring_layout(G), with_labels=False, node_color=node_colors,
edge_color='grey', width=0.4, node_size=50, cmap=plt.get_cmap('Set1'),
vmin=0, vmax=num_clusters-1,
alpha=0.8)
plt.tight_layout()
fig.canvas.draw()
#data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
#data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
data = tensorboardX.utils.figure_to_image(fig)
writer.add_image('graphs_colored', data, epoch)
def train(dataset, model, args, same_feat=True, val_dataset=None, test_dataset=None, writer=None,
mask_nodes = True,graphs=None,idx=0):
writer_batch_idx = [0, 3, 6, 9]
optimizer = torch.optim.Adam(filter(lambda p : p.requires_grad, model.parameters()), lr=0.001)
iter = 0
best_val_result = {
'epoch': 0,
'loss': 0,
'acc': 0}
test_result = {
'epoch': 0,
'loss': 0,
'acc': 0}
train_accs = []
train_epochs = []
best_val_accs = []
best_val_epochs = []
test_accs = []
test_epochs = []
val_accs = []
for epoch in range(args.num_epochs):
total_time = 0
avg_loss = 0.0
model.train()
print('idx:',idx,'Epoch: ', epoch)
for batch_idx, data in enumerate(dataset):
begin_time = time.time()
model.zero_grad()
adj = Variable(data['adj'].float(), requires_grad=False)
h0 = Variable(data['feats'].float(), requires_grad=False)
label = Variable(data['label'].long())
batch_num_nodes = data['num_nodes'].int().numpy() if mask_nodes else None
assign_input = Variable(data['assign_feats'].float(), requires_grad=False)
res = Variable(data['res'].float(), requires_grad=False)
ypred = model(h0, adj, batch_num_nodes, assign_x=assign_input, res_x=res)
if not args.method == 'soft-assign' or not args.linkpred:
loss = model.loss(ypred, label)
else:
loss = model.loss(ypred, label, adj, batch_num_nodes)
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), args.clip)
optimizer.step()
iter += 1
avg_loss += loss
#if iter % 20 == 0:
# print('Iter: ', iter, ', loss: ', loss.data[0])
elapsed = time.time() - begin_time
total_time += elapsed
# log once per XX epochs
if epoch % 10 == 0 and batch_idx == len(dataset) // 2 and args.method == 'soft-assign' and writer is not None:
log_assignment(model.assign_tensor, writer, epoch, writer_batch_idx)
if args.log_graph:
log_graph(adj, batch_num_nodes, writer, epoch, writer_batch_idx, model.assign_tensor)
avg_loss /= batch_idx + 1
if writer is not None:
writer.add_scalar('loss/avg_loss', avg_loss, epoch)
if args.linkpred:
writer.add_scalar('loss/linkpred_loss', model.link_loss, epoch)
print('Avg loss: ', avg_loss, '; epoch time: ', total_time)
result = evaluate(dataset, model, args, name='Train', max_num_examples=100)
train_accs.append(result['acc'])
train_epochs.append(epoch)
'''
'prec': metrics.precision_score(labels, preds, average='macro'),
'recall': metrics.recall_score(labels, preds, average='macro'),
'acc': metrics.accuracy_score(labels, preds),
'F1': metrics.f1_score(labels,
'''
if val_dataset is not None:
val_result = evaluate(val_dataset, model, args, name='Validation')
val_accs.append(val_result['acc'])
print('Val result:', val_result)
if val_result['acc'] > best_val_result['acc'] - 1e-7:
best_val_result['acc'] = val_result['acc']
best_val_result['epoch'] = epoch
best_val_result['loss'] = avg_loss
best_val_result['model'] = copy.deepcopy(model.state_dict())
print('Best val result: ', 'acc:', best_val_result['acc'], 'epoch:', best_val_result['epoch'], 'loss:',
best_val_result['loss'])
# print('acc:')
# print(best_val_result['acc'])
# print('epoch:')
# print(best_val_result['epoch'])
# print('loss:')
# print(best_val_result['loss'])
if test_dataset is not None:
test_result = evaluate(test_dataset, model, args, name='Test')
test_result['epoch'] = epoch
if writer is not None:
writer.add_scalar('acc/train_acc', result['acc'], epoch)
writer.add_scalar('acc/val_acc', val_result['acc'], epoch)
writer.add_scalar('loss/best_val_loss', best_val_result['loss'], epoch)
if test_dataset is not None:
writer.add_scalar('acc/test_acc', test_result['acc'], epoch)
best_val_epochs.append(best_val_result['epoch'])
best_val_accs.append(best_val_result['acc'])
if test_dataset is not None:
print('Test result: ', test_result)
test_epochs.append(test_result['epoch'])
test_accs.append(test_result['acc'])
for i in range(10):
test_loader = cross_test.prepare_test_data(graphs=graphs,args=args,max_nodes=args.max_nodes)
model.load_state_dict(best_val_result['model'])
evaluate(test_loader,model,args,name='Best model Test')
matplotlib.style.use('seaborn')
plt.switch_backend('agg')
plt.figure()
plt.plot(train_epochs, util.exp_moving_avg(train_accs, 0.85), '-', lw=1)
if test_dataset is not None:
plt.plot(best_val_epochs, best_val_accs, 'bo', test_epochs, test_accs, 'go')
plt.legend(['train', 'val', 'test'])
else:
plt.plot(best_val_epochs, best_val_accs, 'bo')
plt.legend(['train', 'val'])
plt.savefig(gen_train_plt_name(args), dpi=600)
plt.close()
matplotlib.style.use('default')
return model, val_accs
def prepare_data(graphs, args, test_graphs=None, max_nodes=0):
random.shuffle(graphs)
if test_graphs is None:
train_idx = int(len(graphs) * args.train_ratio)
test_idx = int(len(graphs) * (1-args.test_ratio))
train_graphs = graphs[:train_idx]
val_graphs = graphs[train_idx: test_idx]
test_graphs = graphs[test_idx:]
else:
train_idx = int(len(graphs) * args.train_ratio)
train_graphs = graphs[:train_idx]
val_graphs = graphs[train_idx:]
print('Num training graphs: ', len(train_graphs),
'; Num validation graphs: ', len(val_graphs),
'; Num testing graphs: ', len(test_graphs))
print('Number of graphs: ', len(graphs))
print('Number of edges: ', sum([G.number_of_edges() for G in graphs]))
print('Max, avg, std of graph size: ',
max([G.number_of_nodes() for G in graphs]), ', '
"{0:.2f}".format(np.mean([G.number_of_nodes() for G in graphs])), ', '
"{0:.2f}".format(np.std([G.number_of_nodes() for G in graphs])))
# minibatch
dataset_sampler = GraphSampler(train_graphs, normalize=False, max_num_nodes=max_nodes,
features=args.feature_type)
train_dataset_loader = torch.utils.data.DataLoader(
dataset_sampler,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.num_workers)
dataset_sampler = GraphSampler(val_graphs, normalize=False, max_num_nodes=max_nodes,
features=args.feature_type)
val_dataset_loader = torch.utils.data.DataLoader(
dataset_sampler,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.num_workers)
dataset_sampler = GraphSampler(test_graphs, normalize=False, max_num_nodes=max_nodes,
features=args.feature_type)
test_dataset_loader = torch.utils.data.DataLoader(
dataset_sampler,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.num_workers)
return train_dataset_loader, val_dataset_loader, test_dataset_loader, \
dataset_sampler.max_num_nodes, dataset_sampler.feat_dim, dataset_sampler.assign_feat_dim
def syn_community1v2(args, writer=None, export_graphs=False):
# data
graphs1 = datagen.gen_ba(range(40, 60), range(4, 5), 500,
featgen.ConstFeatureGen(np.ones(args.input_dim, dtype=float)))
for G in graphs1:
G.graph['label'] = 0
if export_graphs:
util.draw_graph_list(graphs1[:16], 4, 4, 'figs/ba')
graphs2 = datagen.gen_2community_ba(range(20, 30), range(4, 5), 500, 0.3,
[featgen.ConstFeatureGen(np.ones(args.input_dim, dtype=float))])
for G in graphs2:
G.graph['label'] = 1
if export_graphs:
util.draw_graph_list(graphs2[:16], 4, 4, 'figs/ba2')
graphs = graphs1 + graphs2
train_dataset, val_dataset, test_dataset, max_num_nodes, input_dim, assign_input_dim = prepare_data(graphs, args)
if args.method == 'soft-assign':
print('Method: soft-assign')
model = encoders.SoftPoolingGcnEncoder(
max_num_nodes,
input_dim, args.hidden_dim, args.output_dim, args.num_classes, args.num_gc_layers,
args.hidden_dim, assign_ratio=args.assign_ratio, num_pooling=args.num_pool,
bn=args.bn, linkpred=args.linkpred, assign_input_dim=assign_input_dim)
elif args.method == 'base-set2set':
print('Method: base-set2set')
model = encoders.GcnSet2SetEncoder(input_dim, args.hidden_dim, args.output_dim, 2,
args.num_gc_layers, bn=args.bn)
else:
print('Method: base')
model = encoders.GcnEncoderGraph(input_dim, args.hidden_dim, args.output_dim, 2,
args.num_gc_layers, bn=args.bn)
train(train_dataset, model, args, val_dataset=val_dataset, test_dataset=test_dataset,
writer=writer)
def syn_community2hier(args, writer=None):
# data
feat_gen = [featgen.ConstFeatureGen(np.ones(args.input_dim, dtype=float))]
graphs1 = datagen.gen_2hier(1000, [2,4], 10, range(4,5), 0.1, 0.03, feat_gen)
graphs2 = datagen.gen_2hier(1000, [3,3], 10, range(4,5), 0.1, 0.03, feat_gen)
graphs3 = datagen.gen_2community_ba(range(28, 33), range(4,7), 1000, 0.25, feat_gen)
for G in graphs1:
G.graph['label'] = 0
for G in graphs2:
G.graph['label'] = 1
for G in graphs3:
G.graph['label'] = 2
graphs = graphs1 + graphs2 + graphs3
train_dataset, val_dataset, test_dataset, max_num_nodes, input_dim, assign_input_dim = prepare_data(graphs, args)
if args.method == 'soft-assign':
print('Method: soft-assign')
model = encoders.SoftPoolingGcnEncoder(
max_num_nodes,
input_dim, args.hidden_dim, args.output_dim, args.num_classes, args.num_gc_layers,
args.hidden_dim, assign_ratio=args.assign_ratio, num_pooling=args.num_pool,
bn=args.bn, linkpred=args.linkpred, args=args, assign_input_dim=assign_input_dim)
elif args.method == 'base-set2set':
print('Method: base-set2set')
model = encoders.GcnSet2SetEncoder(input_dim, args.hidden_dim, args.output_dim, 2,
args.num_gc_layers, bn=args.bn, args=args, assign_input_dim=assign_input_dim)
else:
print('Method: base')
model = encoders.GcnEncoderGraph(input_dim, args.hidden_dim, args.output_dim, 2,
args.num_gc_layers, bn=args.bn, args=args)
train(train_dataset, model, args, val_dataset=val_dataset, test_dataset=test_dataset,
writer=writer)
def pkl_task(args, feat=None):
with open(os.path.join(args.datadir, args.pkl_fname), 'rb') as pkl_file:
data = pickle.load(pkl_file)
graphs = data[0]
labels = data[1]
test_graphs = data[2]
test_labels = data[3]
for i in range(len(graphs)):
graphs[i].graph['label'] = labels[i]
for i in range(len(test_graphs)):
test_graphs[i].graph['label'] = test_labels[i]
if feat is None:
featgen_const = featgen.ConstFeatureGen(np.ones(args.input_dim, dtype=float))
for G in graphs:
featgen_const.gen_node_features(G)
for G in test_graphs:
featgen_const.gen_node_features(G)
train_dataset, test_dataset, max_num_nodes = prepare_data(graphs, args, test_graphs=test_graphs)
model = encoders.GcnEncoderGraph(
args.input_dim, args.hidden_dim, args.output_dim, args.num_classes,
args.num_gc_layers, bn=args.bn)
train(train_dataset, model, args, test_dataset=test_dataset)
evaluate(test_dataset, model, args, 'Validation')
def benchmark_task(args, writer=None, feat='node-label'):
graphs = load_data.read_graphfile(args.datadir, args.bmname, max_nodes=args.max_nodes)
if feat == 'node-feat' and 'feat_dim' in graphs[0].graph:
print('Using node features')
input_dim = graphs[0].graph['feat_dim']
elif feat == 'node-label' and 'label' in graphs[0].node[0]:
print('Using node labels')
for G in graphs:
for u in G.nodes():
G.node[u]['feat'] = np.array(G.node[u]['label'])
else:
print('Using constant labels')
featgen_const = featgen.ConstFeatureGen(np.ones(args.input_dim, dtype=float))
for G in graphs:
featgen_const.gen_node_features(G)
train_dataset, val_dataset, test_dataset, max_num_nodes, input_dim, assign_input_dim = \
prepare_data(graphs, args, max_nodes=args.max_nodes)
if args.method == 'soft-assign':
print('Method: soft-assign')
model = encoders.SoftPoolingGcnEncoder(
max_num_nodes,
input_dim, args.hidden_dim, args.output_dim, args.num_classes, args.num_gc_layers,
args.hidden_dim, assign_ratio=args.assign_ratio, num_pooling=args.num_pool,
bn=args.bn, dropout=args.dropout, linkpred=args.linkpred, args=args,
assign_input_dim=assign_input_dim)
elif args.method == 'base-set2set':
print('Method: base-set2set')
model = encoders.GcnSet2SetEncoder(
input_dim, args.hidden_dim, args.output_dim, args.num_classes,
args.num_gc_layers, bn=args.bn, dropout=args.dropout, args=args)
else:
print('Method: base')
model = encoders.GcnEncoderGraph(
input_dim, args.hidden_dim, args.output_dim, args.num_classes,
args.num_gc_layers, bn=args.bn, dropout=args.dropout, args=args)
train(train_dataset, model, args, val_dataset=val_dataset, test_dataset=test_dataset,
writer=writer)
evaluate(test_dataset, model, args, 'Validation')
# benchmark:基准;标准检查程序
def benchmark_task_val(args, writer=None, feat='node-label'):
all_vals = []
graphs = load_data.read_graphfile(args.datadir, args.bmname, max_nodes=args.max_nodes)
example_node = util.node_dict(graphs[0])[0]
if feat == 'node-feat' and 'feat_dim' in graphs[0].graph:
print('Using node features')
input_dim = graphs[0].graph['feat_dim']
elif feat == 'node-label' and 'label' in example_node:
print('Using node labels')
for G in graphs:
for u in G.nodes():
util.node_dict(G)[u]['feat'] = np.array(util.node_dict(G)[u]['label'])
else:
print('Using constant labels')
featgen_const = featgen.ConstFeatureGen(np.ones(args.input_dim, dtype=float))
for G in graphs:
featgen_const.gen_node_features(G)
for i in range(1):
train_dataset, val_dataset, test_dataset,max_num_nodes, input_dim, assign_input_dim, res_dim = \
cross_val.prepare_val_data(graphs, args, i, max_nodes=args.max_nodes,train_num=500)#938
if args.method == 'soft-assign':
print('Method: soft-assign')
model = encoders.SoftPoolingGcnEncoder(
max_num_nodes,
input_dim, args.hidden_dim, args.output_dim, args.num_classes, args.num_gc_layers,
args.hidden_dim, assign_ratio=args.assign_ratio, num_pooling=args.num_pool,
bn=args.bn, dropout=args.dropout, linkpred=args.linkpred, args=args,
assign_input_dim=assign_input_dim,res_dim=res_dim)
elif args.method == 'base-set2set':
print('Method: base-set2set')
model = encoders.GcnSet2SetEncoder(
input_dim, args.hidden_dim, args.output_dim, args.num_classes,
args.num_gc_layers, bn=args.bn, dropout=args.dropout, args=args)
else:
print('Method: base')
model = encoders.GcnEncoderGraph(
input_dim, args.hidden_dim, args.output_dim, args.num_classes,
args.num_gc_layers,bn=args.bn, dropout=args.dropout, args=args,assign_input_dim=assign_input_dim,res_dim=res_dim)
_, val_accs = train(train_dataset, model, args, val_dataset=val_dataset, test_dataset=test_dataset,
writer=writer,graphs=graphs,idx=i)
all_vals.append(np.array(val_accs))
all_vals = np.vstack(all_vals)
all_vals = np.mean(all_vals, axis=0)
print(all_vals)
print(np.max(all_vals))
print(np.argmax(all_vals))
def arg_parse():
parser = argparse.ArgumentParser(description='GraphPool arguments.')
#add_mutually_exclusive_group 设置冲突参数,当需要设置指定输入参数只能包含其中一个时即可用此方法。
io_parser = parser.add_mutually_exclusive_group(required=False)
io_parser.add_argument('--dataset', dest='dataset',
help='Input dataset.')
#add_argument_group() 参数分组设置。当有分组命令的需求时可用,输入参数将归于所属分组下。
benchmark_parser = io_parser.add_argument_group()
benchmark_parser.add_argument('--bmname', dest='bmname',
help='Name of the benchmark dataset')
io_parser.add_argument('--pkl', dest='pkl_fname',
help='Name of the pkl data file')
#另外一个分组
softpool_parser = parser.add_argument_group()
softpool_parser.add_argument('--assign-ratio', dest='assign_ratio', type=float,
help='ratio of number of nodes in consecutive layers')
softpool_parser.add_argument('--num-pool', dest='num_pool', type=int,
help='number of pooling layers')
parser.add_argument('--linkpred', dest='linkpred', action='store_const',
const=True, default=False,
help='Whether link prediction side objective is used')
parser.add_argument('--datadir', dest='datadir',
help='Directory where benchmark is located')
parser.add_argument('--logdir', dest='logdir',
help='Tensorboard log directory')
parser.add_argument('--cpu', dest='cpu',
help='CPU.')
# parser.add_argument('--cuda', dest='cuda',
# help='CUDA.')
parser.add_argument('--max-nodes', dest='max_nodes', type=int,
help='Maximum number of nodes (ignore graghs with nodes exceeding the number.')
parser.add_argument('--lr', dest='lr', type=float,
help='Learning rate.')
parser.add_argument('--clip', dest='clip', type=float,
help='Gradient clipping.')
parser.add_argument('--batch-size', dest='batch_size', type=int,
help='Batch size.')
parser.add_argument('--epochs', dest='num_epochs', type=int,
help='Number of epochs to train.')
parser.add_argument('--train-ratio', dest='train_ratio', type=float,
help='Ratio of number of graphs training set to all graphs.')
parser.add_argument('--num_workers', dest='num_workers', type=int,
help='Number of workers to load data.')
parser.add_argument('--feature', dest='feature_type',
help='Feature used for encoder. Can be: id, deg')
parser.add_argument('--input-dim', dest='input_dim', type=int,
help='Input feature dimension')
parser.add_argument('--hidden-dim', dest='hidden_dim', type=int,
help='Hidden dimension')
parser.add_argument('--output-dim', dest='output_dim', type=int,
help='Output dimension')
parser.add_argument('--num-classes', dest='num_classes', type=int,
help='Number of label classes')
parser.add_argument('--num-gc-layers', dest='num_gc_layers', type=int,
help='Number of graph convolution layers before each pooling')
parser.add_argument('--nobn', dest='bn', action='store_const',
const=False, default=True,
help='Whether batch normalization is used')
parser.add_argument('--dropout', dest='dropout', type=float,
help='Dropout rate.')
parser.add_argument('--nobias', dest='bias', action='store_const',
const=False, default=True,
help='Whether to add bias. Default to True.')
parser.add_argument('--no-log-graph', dest='log_graph', action='store_const',
const=False, default=True,
help='Whether disable log graph')
parser.add_argument('--method', dest='method',
help='Method. Possible values: base, base-set2set, soft-assign')
parser.add_argument('--name-suffix', dest='name_suffix',
help='suffix added to the output filename')
parser.set_defaults(datadir='data',
logdir='log',
dataset='syn1v2',#syn1v2
max_nodes=500,#1000
device='0',
# cuda='1',
feature_type='default',
lr=0.001,
clip=2.0,
batch_size=20,
num_epochs=10,#1000
train_ratio=0.8,
test_ratio=0.1,
num_workers=1,
input_dim=10,
hidden_dim=20,
output_dim=20,
num_classes=2,
num_gc_layers=3,
dropout=0.0,
method='base',
name_suffix='',
assign_ratio=0.1,#0.1
num_pool=1,#1
res=True
)
return parser.parse_args()
def main():
prog_args = arg_parse()
# export scalar data to JSON for external processing
path = os.path.join(prog_args.logdir, gen_prefix(prog_args))
if os.path.isdir(path):
print('Remove existing log dir: ', path)
shutil.rmtree(path)
writer = SummaryWriter(path)
#writer = None
# os.environ['CUDA_VISIBLE_DEVICES'] = prog_args.cuda
# print('CUDA', prog_args.cuda)
if prog_args.bmname is not None:
benchmark_task_val(prog_args, writer=writer,feat='node-feat')#,feat='node-feat'
elif prog_args.pkl_fname is not None:
pkl_task(prog_args)
elif prog_args.dataset is not None:
if prog_args.dataset == 'syn1v2':
syn_community1v2(prog_args, writer=writer)
if prog_args.dataset == 'syn2hier':
syn_community2hier(prog_args, writer=writer)
writer.close()
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import pandas as pd
def cyclists():
df = pd.read_csv('src/Helsingin_pyorailijamaarat.csv', sep=';')
# [Same as line below]: df = df[df.notna().any(axis=1)]
df = df.dropna(how='all')
df = df.dropna(how='all', axis=1)
return df
def main():
print(cyclists())
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
def calc_fitness(pop):
from to_decimal import to_decimal
from math import sin, sqrt
for index, elem in enumerate(pop):
# só atribui a fitness a cromossomos que ainda não possuem fitness
# print(elem[0], elem[1])
x = to_decimal(elem[0])
y = to_decimal(elem[1])
# x = elem[0]
# y = elem[1]
f6 = 0.5 - ((sin(sqrt(x**2 + y**2)))**2 - 0.5) / (1 + 0.001 * (x**2 + y**2))**2
pop[index] = [f6, elem]
return 0
# populacao = [[0,0],[-3,1]]
# calc_fitness(pop=populacao)
# print(populacao)
|
nilq/baby-python
|
python
|
import os
from typing import Tuple, List
from tokenizers import BertWordPieceTokenizer, Tokenizer
import sentencepiece as spm
from enums.configuration import Configuration
from services.arguments.pretrained_arguments_service import PretrainedArgumentsService
from services.file_service import FileService
class BaseTokenizeService:
def __init__(self):
pass
def encode_tokens(self, tokens: List[str]) -> List[int]:
pass
def decode_tokens(self, character_ids: List[int]) -> List[str]:
pass
def decode_string(self, character_ids: List[int]) -> List[str]:
pass
def id_to_token(self, character_id: int) -> str:
pass
def encode_sequence(self, sequence: str) -> Tuple[List[int], List[str], List[Tuple[int,int]], List[int]]:
pass
def encode_sequences(self, sequences: List[str]) -> List[Tuple[List[int], List[str], List[Tuple[int,int]], List[int]]]:
pass
def tokenize_sequences(self, sequences: List[str]) -> List[List[str]]:
pass
@property
def vocabulary_size(self) -> int:
return 0
@property
def mask_token(self) -> str:
return '[MASK]'
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import fileinput
import hashlib
salt = ''
for line in fileinput.input():
salt = line.strip()
def md5(i):
return hashlib.md5((salt + str(i)).encode('utf-8')).hexdigest()
def checkNple(s, n):
i = 0
while i < len(s):
char = s[i]
consecutive = 0
while i < len(s) and s[i] == char:
consecutive += 1
i += 1
if consecutive >= n:
return char
return False
def checkTriple(s):
return checkNple(s, 3)
def checkPentuple(s):
return checkNple(s, 5)
def checkKey(n):
char = checkTriple(md5(n))
if char != False:
for i in range(n + 1, n + 1001):
if char * 5 in md5(i):
return True
return False
i = 0
keysFound = 0
while keysFound < 64:
if checkKey(i):
keysFound += 1
i += 1
print(i - 1)
|
nilq/baby-python
|
python
|
txt = ''.join(format(ord(x), 'b') for x in 'my foo is bar and baz')
print txt
from collections import Counter
secs = {
'60': '111100',
'30': '11110',
'20': '10100',
'12': '1100',
'10': '1010',
'6': '110',
'5': '101',
'3': '11',
'2': '10',
'1': '1',
'0': '0',
}
for sz, bitr in secs.iteritems():
cnt = 0
for i in range(len(txt)):
if txt[i:].startswith(bitr):
cnt += 1
print sz, str(cnt)
txt_bin = ''
bin1 = []
bin2 = []
bin3 = []
bin4 = []
bin5 = []
bin6 = []
bin7 = []
bin8 = []
with open('vid.mp4', 'rb') as f:
for l in f:
# print '{!r}'.format(l)
# break
for c in l:
# print '{}'.format(c)
c_b = ''.join(format(ord(b), '08b') for b in c)
bin1.append(c_b[0])
bin2.append(c_b[1])
bin3.append(c_b[2])
bin4.append(c_b[3])
bin5.append(c_b[4])
bin6.append(c_b[5])
bin7.append(c_b[6])
bin8.append(c_b[7])
txt_bin += c_b
if len(txt_bin) > 10000000:
break
# print txt_bin
bin1_counted = Counter(bin1)
bin2_counted = Counter(bin2)
bin3_counted = Counter(bin3)
bin4_counted = Counter(bin4)
bin5_counted = Counter(bin5)
bin6_counted = Counter(bin6)
bin7_counted = Counter(bin7)
bin8_counted = Counter(bin8)
print bin1_counted
print bin2_counted
print bin3_counted
print bin4_counted
print bin5_counted
print bin6_counted
print bin7_counted
|
nilq/baby-python
|
python
|
#############################################
# --- Day 8: I Heard You Like Registers --- #
#############################################
import AOCUtils
class Instruction:
def __init__(self, inst):
inst = inst.split()
self.reg = inst[0]
self.mul = {"inc": 1, "dec": -1}[inst[1]]
self.val = int(inst[2])
self.condReg = inst[4]
self.cond = inst[5]
self.condVal = int(inst[6])
def meetsCondition(self, regs):
conditions = {">": lambda x, y: x > y,
"<": lambda x, y: x < y,
">=": lambda x, y: x >= y,
"<=": lambda x, y: x <= y,
"==": lambda x, y: x == y,
"!=": lambda x, y: x != y}
return conditions[self.cond](regs.get(self.condReg, 0), self.condVal)
#############################################
instructions = [Instruction(inst) for inst in AOCUtils.loadInput(8)]
registers = dict()
maxEver = 0
for inst in instructions:
if inst.meetsCondition(registers):
registers[inst.reg] = registers.get(inst.reg, 0) + inst.mul*inst.val
maxEver = max(maxEver, registers[inst.reg])
print("Part 1: {}".format(max(registers.values())))
print("Part 2: {}".format(maxEver))
AOCUtils.printTimeTaken()
|
nilq/baby-python
|
python
|
import datetime
import geospacelab.express.eiscat_dashboard as eiscat
dt_fr = datetime.datetime.strptime('20201209' + '1800', '%Y%m%d%H%M')
dt_to = datetime.datetime.strptime('20201210' + '0600', '%Y%m%d%H%M')
# check the eiscat-hdf5 filename from the EISCAT schedule page, e.g., "EISCAT_2020-12-10_beata_60@uhfa.hdf5"
site = 'UHF'
antenna = 'UHF'
modulation = '60'
load_mode = 'AUTO'
# The code will download and load the data automatically as long as the parameters above are set correctly.
viewer = eiscat.EISCATDashboard(
dt_fr, dt_to, site=site, antenna=antenna, modulation=modulation, load_mode='AUTO'
)
viewer.quicklook()
|
nilq/baby-python
|
python
|
from django.db import models
from django.core.validators import MinValueValidator, MaxValueValidator
from django.contrib.auth.models import User
from django.conf import settings
from postgresqleu.util.fields import LowercaseEmailField
from postgresqleu.countries.models import Country
from postgresqleu.invoices.models import Invoice, InvoicePaymentMethod
from postgresqleu.membership.util import country_validator_choices
from datetime import date, datetime, timedelta
class MembershipConfiguration(models.Model):
id = models.IntegerField(null=False, blank=False, primary_key=True)
sender_email = LowercaseEmailField(null=False, blank=False)
membership_years = models.IntegerField(null=False, blank=False, default=1,
validators=[MinValueValidator(1), MaxValueValidator(10)],
verbose_name="Membership length",
help_text="Membership length in years")
membership_cost = models.IntegerField(null=False, blank=False, default=10,
validators=[MinValueValidator(1), ],
verbose_name="Membership cost")
country_validator = models.CharField(max_length=100, null=False, blank=True,
verbose_name="Country validator",
help_text="Validate member countries against this rule",
choices=country_validator_choices)
paymentmethods = models.ManyToManyField(InvoicePaymentMethod, blank=False, verbose_name='Invoice payment methods')
def get_config():
return MembershipConfiguration.objects.get(id=1)
class Member(models.Model):
user = models.OneToOneField(User, null=False, blank=False, primary_key=True, on_delete=models.CASCADE)
fullname = models.CharField(max_length=500, null=False, blank=False,
verbose_name='Full name')
country = models.ForeignKey(Country, null=False, blank=False, on_delete=models.CASCADE)
listed = models.BooleanField(null=False, blank=False, default=True,
verbose_name='Listed in the public membership list')
paiduntil = models.DateField(null=True, blank=True, verbose_name='Paid until')
membersince = models.DateField(null=True, blank=True, verbose_name='Member since')
# If there is a currently active invoice, link to it here so we can
# easily render the information on the page.
activeinvoice = models.ForeignKey(Invoice, null=True, blank=True, on_delete=models.CASCADE)
# When a membeship expiry warning was last sent, so we don't keep
# sending them over and over again
expiry_warning_sent = models.DateTimeField(null=True, blank=True, verbose_name='Expiry warning sent')
country_exception = models.BooleanField(null=False, blank=False, default=False, help_text="Enable to allow member to bypass country validation")
# WARNING! New fields should most likely be added to the exclude list
# in MemberForm!!!
@property
def expiressoon(self):
if self.paiduntil:
if self.paiduntil < date.today() + timedelta(60):
return True
else:
return False
else:
return True
def __str__(self):
return "%s (%s)" % (self.fullname, self.user.username)
class MemberLog(models.Model):
member = models.ForeignKey(Member, null=False, blank=False, on_delete=models.CASCADE)
timestamp = models.DateTimeField(null=False)
message = models.TextField(null=False, blank=False)
def __str__(self):
return "%s: %s" % (self.timestamp, self.message)
class Meeting(models.Model):
name = models.CharField(max_length=100, null=False, blank=False)
dateandtime = models.DateTimeField(null=False, blank=False)
allmembers = models.BooleanField(null=False, blank=False)
members = models.ManyToManyField(Member, blank=True)
botname = models.CharField(max_length=50, null=False, blank=False)
def __str__(self):
return "%s (%s)" % (self.name, self.dateandtime)
class Meta:
ordering = ['-dateandtime', ]
@property
def joining_active(self):
if datetime.now() > self.dateandtime - timedelta(hours=4):
return True
return False
def get_key_for(self, member):
try:
return MemberMeetingKey.objects.get(meeting=self, member=member)
except MemberMeetingKey.DoesNotExist:
return None
class MemberMeetingKey(models.Model):
member = models.ForeignKey(Member, null=False, blank=False, on_delete=models.CASCADE)
meeting = models.ForeignKey(Meeting, null=False, blank=False, on_delete=models.CASCADE)
key = models.CharField(max_length=100, null=False, blank=False)
proxyname = models.CharField(max_length=200, null=True, blank=False)
proxyaccesskey = models.CharField(max_length=100, null=True, blank=False)
class Meta:
unique_together = (('member', 'meeting'), )
|
nilq/baby-python
|
python
|
import numpy as np
def readLine(line):
return [ int(c) for c in line.split(' ') ]
def schedule(activities):
lastC = 0
lastJ = 0
sortedSchedule = ''
sortedActivities = sorted(activities, key=lambda a: a[1])
for a in sortedActivities:
if a[1] >= lastC:
sortedSchedule += 'C'
lastC = a[2]
elif a[1] >= lastJ:
sortedSchedule += 'J'
lastJ = a[2]
else:
return 'IMPOSSIBLE'
schedule = [''] * len(sortedSchedule)
for i, a in enumerate(sortedActivities):
schedule[a[0]] = sortedSchedule[i]
return ''.join(schedule)
def play():
cases = int(input())
for i in range(cases):
numAct = int(input())
activities = []
for n in range(numAct):
activities.append([n] + readLine(input()))
s = schedule(activities)
print('Case #{}: {}'.format(i+1, s))
play()
|
nilq/baby-python
|
python
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
# slim主要是做代码瘦身-2016年开始
slim = tf.contrib.slim
# 5 x Inception-Resnet-A
def block35(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Build the 35 x 35 resnet block"""
# 用于管理一个graph钟变量的名字,避免变量之间的命名冲突
with tf.variable_scope(scope, 'Block35', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 32, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 32, 3, scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
tower_conv2_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2_0, 48, 3, scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 64, 3, scope='Conv2d_0c_3x3')
mixed = tf.concat(axis=3,values=[tower_conv,tower_conv1_1,tower_conv2_2])
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
net += scale * up
if activation_fn:
net = activation_fn(net)
return net
# 10 x Inception-Resnet-B
def block17(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Build the 17x17 resnet block"""
with tf.variable_scope(scope, 'Block17', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 128, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 160, [1,7],
scope='Conv2d_0b_1x7')
tower_conv1_2 = slim.conv2d(tower_conv1_1, 192, [7,1],
scope='Conv2d_0c_7x1')
mixed = tf.concat(axis=3, values=[tower_conv,tower_conv1_2])
up = slim.conv2d(mixed,net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
net += scale * up
if activation_fn:
net = activation_fn(net)
return net
# 5 x Inception-Resnet-C
def block8(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
with tf.variable_scope(scope, 'Block8', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 192, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 224, [1,3],
scope='Conv2d_0b_1x3')
tower_conv1_2 = slim.conv2d(tower_conv1_1,256, [3,1],
scope='Conv2d_0c_3x1')
mixed = tf.concat(axis=3, values=[tower_conv,tower_conv1_2])
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
net += scale * up
if activation_fn:
net = activation_fn(net)
return net
# Inception-ResNet-V1
def inception_resnet_v2_base(inputs,
final_endpoint='Conv2d_7b_1x1',
output_stride=16,
align_feature_maps=False,
scope=None):
if output_stride != 8 and output_stride != 16:
raise ValueError('output_stride must be 8 or 16.')
padding = 'SAME' if align_feature_maps else 'VALID'
end_points = {}
def add_and_check_final(name, net):
end_points[name] = net
return name == final_endpoint
with tf.variable_scope(scope, 'InceptionResnetV2', [inputs]):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'):
# 149 x 149 x 32
net = slim.conv2d(inputs, 32, 3, stride=2, padding=padding,
scope='Conv2d_1a_3x3')
if add_and_check_final('Conv2d_1a_3x3',net):return net, end_points
# 147 x 147 x 32
net = slim.conv2d(net, 32, 3, padding=padding,
scope='Conv2d_2a_3x3')
if add_and_check_final('Conv2d_2b_3x3',net):return net, end_points
# 147 x 147 x 64
net = slim.conv2d(net, 64, 3, padding=padding,
scope='Conv2d_2b_3x3')
if add_and_check_final('Conv2d_2b_3x3', net): return net, end_points
# 73 x 73 x 64
net = slim.max_pool2d(net, 3, strides=2, padding=padding,
scope='MaxPool_3a_3x3')
if add_and_check_final('Conv2d_3a_3x3', net):return net, end_points
# 73 x 73 x 80
net = slim.conv2d(net, 80, 1, padding=padding,
scope='Conv2d_3b_1x1')
if add_and_check_final('Conv2d_3b_1x1',net):return net, end_points
# 71 x 71 x 192
net = slim.conv2d(net, 192, 3, padding=padding,
scope='Conv2d_4a_3x3')
if add_and_check_final('Conv2d_4a_3x3',net):return net,end_points
# 35 x 35 x 192
net = slim.max_pool2d(net, 3, strides=2, padding=padding,
scope='MaxPool_5a_3x3')
if add_and_check_final('MaxPool_5a_3x3',net):return net, end_points
# 35 x 35 x 320
with tf.variable_scope('Mixed_5b'):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 96,1,scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 48, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 64, 5,
scope='Conv2d_0b_5x5')
with tf.variable_scope('Branch_2'):
tower_conv2_0 = slim.conv2d(net, 64, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2_0, 96, 3,
scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 96, 3,
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
tower_pool = slim.avg_pool2d(net, 3, strides=1, padding='SAME',
scope='Avgpool_0a_3x3')
tower_pool_1 = slim.conv2d(tower_pool, 64, 1,
scope='Conv2d_ob_1x1')
net = tf.concat(
[tower_conv, tower_conv1_1, tower_conv2_2, tower_pool_1],3
)
if add_and_check_final('Mixed_5b', net):return net, end_points
# TODO(alemi):Register intermediate endpoints
net = slim.repeat(net, 10, block35(), scale=0.17)
|
nilq/baby-python
|
python
|
# -------------------------------------------------------------------------
# function: classes,type = dbscan(x, k, Eps)
# -------------------------------------------------------------------------
# Objective:
# Clustering the data with Density - Based Scan Algorithm with Noise (DBSCAN)
# -------------------------------------------------------------------------
# Input:
# x - dataset(m, n) m - objects, n - variables
# k - number of objects in a neighborhood of an object
# (minimal number of objects considered as a cluster)
# Eps - neighborhood radius, if not known avoid this parameter or put[]
# -------------------------------------------------------------------------
# Output:
# classes - vector specifying assignment of the i-th object to certain
# cluster(m, 1)
# type - vector specifying type of the i-th object
# (core: 1, border: 0, outlier: -1)
# -------------------------------------------------------------------------
# Example of use:
# x = [randn(30, 2)*.4 randn(40, 2)*.5 + ones(40, 1)*[4 4]]
# classes,type = dbscan(x, 5, [])
# -------------------------------------------------------------------------
# References:
# [1] M.Ester, H.Kriegel, J.Sander, X.Xu, A density - based algorithm for
# discovering clusters in large spatial databases with noise, proc.
# 2nd Int.Conf.on Knowledge Discovery and Data Mining, Portland, OR, 1996,
# p.226, available from:
# www.dbs.informatik.uni - muenchen.de / cgi - bin / papers?query = --CO
# [2] M.Daszykowski, B.Walczak, D.L.Massart, Looking for
# Natural Patterns in Data.Part 1: Density Based Approach,
# Chemom.Intell.Lab.Syst. 56(2001) 83 - 92
# -------------------------------------------------------------------------
# Written by Michal Daszykowski
# Department of Chemometrics, Institute of Chemistry,
# The University of Silesia
# December 2004
# http://www.chemometria.us.edu.pl
from numpy import min, abs, max, sqrt, arange, prod, pi, ones, insert, sum, zeros, argwhere, empty, append, delete
from scipy.special import gamma
# ...........................................
def epsilon(x, k):
# function: epsi = epsilon(x, k)
#
# Objective:
# Analytical way used for estimating neighborhood radius for the DBSCAN algorithm
#
# Input:
# x - data matrix(m, n); m - data points, n - dimensions
# k - number of data points in a neighborhood of a given data point
# (minimal number of data points considered as a cluster)
m, n = x.shape
maxmin = max(x, axis = 0) - min(x, axis = 0)
epsi = ((prod(maxmin)*k*gamma(0.5*n + 1))/(m*sqrt(pi**n)))**(1./n)
return epsi
# ............................................
def edist(i, x):
# function: D = edist(i, x)
#
# Objective:
# Calculate the Euclidean distances between the i-th sample vector and all m sample vectors in x
#
# Input:
# i - an n-dimensional sample vector (1, n)
# x - sample matrix (m, n); m - sample vector, n - dimension
#
# Output:
# D - Euclidean distance(m, 1)
m, n = x.shape
if n == 1:
D = abs(ones((m, 1))*i - x)
else:
squ = (ones((m, 1))*i - x)**2
D = sqrt(sum(squ, axis = 1))
return D
def dbscan(x, k, Eps):
m, n = x.shape
if len(Eps) != 1: Eps = epsilon(x, k)
x = insert(x, 0, arange(m), 1)
m, n = x.shape
p_type = zeros((m, 1))
classes = -ones((m, 1))
no = 1
touched = zeros((m, 1))
for i in range(m):
if touched[i] == 0:
ob = x[i, :]
D = edist(ob[1:n], x[:, 1:n])
ind = argwhere(D <= Eps)
if len(ind) > 1 and len(ind) < k + 1: # do not deal with
p_type[i] = 0
classes[i] = 0
if len(ind) == 1: # this is noise
p_type[i] = -1
classes[i] = -1
touched[i] = 1
if len(ind) >= k + 1: # make clustering
p_type[i] = 1
for j in range(len(ind)):
classes[ind[j]] = max(no)
while len(ind) >= 1:
ob = x[int(ind[0]), :]
touched[int(ind[0])] = 1
ind = ind[1:len(ind)]
D = edist(ob[1:n], x[:, 1:n])
i1 = argwhere(D <= Eps)
if len(i1) > 1:
for j in range(len(i1)):
classes[i1[j]] = no
if len(i1) >= k + 1:
p_type[int(ob[0])] = 1
else:
p_type[int(ob[0])] = 0
for i in range(len(i1)):
if touched[i1[i]] == 0:
touched[i1[i]] = 1
ind = append(ind, i1[i])
classes[i1[i]] = no
no = no + 1
no = no - 1
i1 = argwhere(classes == 0)
classes[i1] = -1
cl = classes.transpose()
p_type[i1] = -1
return cl, p_type, no
|
nilq/baby-python
|
python
|
# form test
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
测试方法:
1. 执行 s1, s2, s3 确保 document 被创建, 并且 value = 0
2. 打开两个 terminal 输入 python e3_concurrent_update.py
3. 快速在两个 terminal 按下 enter 运行. 效果是对 value 进行 1000 次 +1, 由于有两个
并发, 所以互相之间会争抢
4. 执行 s4, 查看 value 是否是 2000
ES 中处理并发的策略详解 https://www.elastic.co/guide/en/elasticsearch/reference/current/optimistic-concurrency-control.html
"""
from rich import print
from learn_elasticsearch.tests import (
es_sanhe_dev as es,
create_index, delete_index,
)
index = "concurrent_update_test"
id_ = 1
def s1_create_initial_doc():
print(es.index(index=index, id=id_, body={"value": 0}))
def s2_inspect_doc():
print(es.get(index=index, id=id_))
def s3_update():
for i in range(1000):
print(f"{i}th update ...")
body = {
"script": {
"source": "ctx._source.value = ctx._source.value + params.increment",
"lang": "painless",
"params": {
"increment": 1,
}
}
}
res = es.update(index=index, id=id_, body=body, retry_on_conflict=5)
print(res)
if __name__ == "__main__":
# delete_index(es, index)
# create_index(es, index)
# s1_create_initial_doc()
# s2_inspect_doc()
# s3_update()
pass
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import numpy as np
from skued import azimuthal_average, powdersim
from crystals import Crystal
import unittest
from skimage.filters import gaussian
np.random.seed(23)
def circle_image(shape, center, radii, intensities):
""" Creates an image with circle or thickness 2 """
im = np.zeros(shape=shape, dtype=np.float)
xx, yy = np.ogrid[0 : shape[0], 0 : shape[1]]
xx, yy = xx - center[0], yy - center[1]
for radius, intensity in zip(radii, intensities):
rr = np.sqrt(xx ** 2 + yy ** 2)
im[np.logical_and(rr < radius + 1, rr > radius - 1)] = intensity
im[:] = gaussian(im, 5)
return im
class TestAzimuthalAverage(unittest.TestCase):
def test_trivial_array(self):
""" Test azimuthal_average on an array of zeroes """
image = np.zeros(shape=(256, 256), dtype=np.float)
center = (image.shape[0] / 2, image.shape[1] / 2)
radius, intensity = azimuthal_average(image, center)
self.assertTrue(intensity.sum() == 0)
self.assertSequenceEqual(intensity.shape, radius.shape)
def test_ring(self):
""" Test azimuthal_average on an image with a wide ring """
image = np.zeros(shape=(256, 256), dtype=np.float)
center = (image.shape[0] / 2, image.shape[1] / 2)
xc, yc = center
# Create an image with a wide ring
extent = np.arange(0, image.shape[0])
xx, yy = np.meshgrid(extent, extent)
rr = np.sqrt((xx - xc) ** 2 + (yy - yc) ** 2)
image[np.logical_and(24 < rr, rr < 26)] = 1
radius, intensity = azimuthal_average(image, center)
self.assertEqual(intensity.max(), image.max())
self.assertSequenceEqual(radius.shape, intensity.shape)
def test_angular_bounds(self):
""" Test azimuthal_average with a restrictive angular_bounds argument """
image = np.zeros(shape=(256, 256), dtype=np.float)
center = (image.shape[0] / 2, image.shape[1] / 2)
xc, yc = center
# Create an image with a wide ring
extent = np.arange(0, image.shape[0])
xx, yy = np.meshgrid(extent, extent)
rr = np.sqrt((xx - xc) ** 2 + (yy - yc) ** 2)
angles = np.rad2deg(np.arctan2(yy - yc, xx - xc)) + 180
image[np.logical_and(0 <= angles, angles <= 60)] = 1
with self.subTest("0 - 360"):
radius, intensity = azimuthal_average(image, center, angular_bounds=None)
r360, int360 = azimuthal_average(image, center, angular_bounds=(0, 360))
self.assertTrue(np.allclose(intensity, int360))
with self.subTest("Inside angle bounds"):
radius, intensity = azimuthal_average(image, center, angular_bounds=(0, 60))
self.assertTrue(np.allclose(intensity, np.ones_like(intensity)))
with self.subTest("Overlapping bounds"):
radius, intensity = azimuthal_average(
image, center, angular_bounds=(15, 75)
)
self.assertFalse(np.all(intensity < np.ones_like(intensity)))
with self.subTest("Outside angle bounds"):
radius, intensity = azimuthal_average(
image, center, angular_bounds=(60, 360)
)
self.assertTrue(np.allclose(intensity, np.zeros_like(intensity)))
with self.subTest("Inside angle bounds with 360deg rollover"):
radius, intensity = azimuthal_average(
image, center, angular_bounds=(60 + 360, 360 + 360)
)
self.assertTrue(np.allclose(intensity, np.zeros_like(intensity)))
def test_ring_with_mask(self):
""" Test azimuthal_average on an image with a wide ring """
image = np.zeros(shape=(256, 256), dtype=np.float)
center = (image.shape[0] / 2, image.shape[1] / 2)
xc, yc = center
mask = np.ones_like(image, dtype=np.bool)
mask[120:140, 0:140] = False
# Create an image with a wide ring
extent = np.arange(0, image.shape[0])
xx, yy = np.meshgrid(extent, extent)
rr = np.sqrt((xx - xc) ** 2 + (yy - yc) ** 2)
image[np.logical_and(24 < rr, rr < 26)] = 1
radius, intensity = azimuthal_average(image, center, mask=mask)
self.assertEqual(intensity.max(), image.max())
self.assertSequenceEqual(radius.shape, intensity.shape)
def test_trim_and_mask(self):
""" Test that regions that only have masks contributions are not present
in the angular average """
image = np.ones(shape=(256, 256), dtype=np.float)
center = (image.shape[0] / 2, image.shape[1] / 2)
xc, yc = center
# Create an image with a wide ring
extent = np.arange(0, image.shape[0])
xx, yy = np.meshgrid(extent, extent)
rr = np.hypot(xx - xc, yy - yc)
mask = np.ones_like(image, dtype=np.bool)
mask[rr < 20] = False
# image[rr < 20] = 0
radius, intensity = azimuthal_average(image, center, mask=mask, trim=False)
self.assertEqual(radius.min(), 0)
radius_trimmed, intensity_trimmed = azimuthal_average(
image, center, mask=mask, trim=True
)
self.assertEqual(radius_trimmed.min(), 20)
def test_mask_and_nan(self):
""" Test that azimuthal_average with masks does not yield NaNs. This can happen for large masks. """
image = np.ones(shape=(256, 256), dtype=np.int16)
mask = np.zeros_like(image, dtype=np.bool)
mask[100:156, 100:156] = True
_, av = azimuthal_average(image, center=(128, 128), mask=mask, trim=False)
self.assertFalse(np.any(np.isnan(av)))
if __name__ == "__main__":
unittest.main()
|
nilq/baby-python
|
python
|
# Copyright 2018 Luddite Labs Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module provides unknown (non-registered) roles support.
It injects a hook to the docutils workflow by replacing
:func:`docutils.parsers.rst.roles.role` and returns
:func:`common_role` handler if no role is found.
:func:`common_role` creates special document node :class:`autodoc_unknown_role`
which is handled by the :class:`CommonTranslator`.
"""
from docutils.parsers.rst import nodes, roles
# List of known but not registered roles.
# They are from the Sphinx.
# http://www.sphinx-doc.org/en/stable/markup/inline.html
# sphinx/roles.py
known_roles = ['any', 'download', 'doc', 'guilabel', 'menuselection',
'file', 'samp', 'abbr', 'index', 'command', 'dfn',
'kbd', 'mailheader', 'makevar', 'manpage', 'mimetype',
'newsgroup', 'program', 'regexp', 'ref', 'numref',
'envvar', 'token', 'keyword', 'option', 'term',
'index', 'attr', 'attribute', 'class', 'meth', 'method', 'obj',
'func', 'exc', 'mod']
class autodoc_unknown_role(nodes.Inline, nodes.TextElement):
"""Unknown role node."""
pass
def common_role(role, rawtext, text, lineno, inliner, options=None,
content=None):
"""Unknown role handler.
It used to have a test node in the document.
"""
options = options if options is not None else {}
roles.set_classes(options)
options['attributes'] = {'text': text}
node = autodoc_unknown_role(rawtext, rawtext, **options)
node.role_name = role
node.source, node.line = inliner.reporter.get_source_and_line(lineno)
return [node], []
# This role is applied to interpreted text without a role: `text`.
def default_role(role, rawtext, *args, **kwargs):
"""Default role to return raw text node."""
# return [nodes.Text(rawtext)], []
text = rawtext.strip('`')
return [nodes.emphasis(text, text, default_role=True)], []
def register_roles():
for name in known_roles:
roles.register_local_role(name, common_role)
def set_default_role():
"""Set custom default role.
By default::
`text` -> :title:`text`
we override with our role::
`text` -> `text`
See Also:
:attr:`roles.DEFAULT_INTERPRETED_ROLE`.
"""
if roles._roles.get('') != default_role:
roles._roles[''] = default_role
def role_hook(role_name, language_module, lineno, reporter):
"""Hook to provide common role if nothing is found."""
role_fn, messages = role_hook.orig(role_name, language_module, lineno,
reporter)
return role_fn or common_role, messages
def set_role_hook():
"""Replace :func:`roles.role` with custom function.
It returns common role node for all nonexistent roles.
"""
registered = hasattr(role_hook, 'orig')
if not registered:
role_hook.orig = roles.role
roles.role = role_hook
return registered
def init():
already_init = set_role_hook()
if not already_init:
set_default_role()
register_roles()
|
nilq/baby-python
|
python
|
import os
import io
import sys
import time
import string
import random
import pstats
import unittest
import cProfile
import itertools
import statistics
from unittest.mock import patch, MagicMock
import bucky3.statsd as statsd
class RoughFloat(float):
def __eq__(self, other):
if not isinstance(other, float):
return super().__eq__(other)
return round(self, 2) == round(other, 2)
def statsd_verify(output_pipe, expected_values):
found_values = sum((i[0][0] for i in output_pipe.send.call_args_list), [])
for v in found_values:
if v in expected_values:
expected_values.remove(v)
else:
assert False, str(v) + " was not expected"
if expected_values:
assert False, "missing " + str(expected_values.pop())
output_pipe.reset_mock()
def statsd_setup(timestamps, **extra_cfg):
def run(fun, self):
with patch('time.monotonic') as monotonic_time, \
patch('time.time') as system_time:
if callable(timestamps):
system_time_mock, monotonic_time_mock = itertools.tee((t for t in timestamps()), 2)
else:
system_time_mock, monotonic_time_mock = itertools.tee(timestamps, 2)
system_time_mock, monotonic_time_mock = iter(system_time_mock), iter(monotonic_time_mock)
monotonic_time0 = next(monotonic_time_mock)
# Statsd module consumes one monotonic tick for self.init_timestamp, we need to inject it
monotonic_time_mock = itertools.chain(iter([monotonic_time0]), iter([monotonic_time0]), monotonic_time_mock)
system_time.side_effect = system_time_mock
monotonic_time.side_effect = monotonic_time_mock
cfg = dict(
# log_level=INFO triggers a log line in src module and that calls the mocked system_time
# which consumes one tick and fails the tests. So up the log_level, really ugly.
log_level='WARN',
flush_interval=1,
add_timestamps=True,
timers_bucket="stats_timers",
histograms_bucket="stats_histograms",
sets_bucket="stats_sets",
gauges_bucket="stats_gauges",
counters_bucket="stats_counters",
destination_modules=(),
)
cfg.update(**extra_cfg)
output_pipe = MagicMock()
statsd_module = statsd.StatsDServer('statsd_test', cfg, [output_pipe])
statsd_module.init_cfg()
expected_output = fun(self, statsd_module)
if expected_output is None:
return
statsd_module.tick()
statsd_verify(output_pipe, expected_output)
if callable(timestamps):
fun = timestamps
timestamps = None
return lambda self: run(fun, self)
else:
def wrapper(fun):
return lambda self: run(fun, self)
return wrapper
def single_histogram_1_bucket(x):
if x < 300: return 'under_300'
def single_histogram_3_buckets(x):
if x < 100: return 'under_100'
if x < 300: return 'under_300'
return 'over_300'
def single_histogram_10_buckets(x):
if x < 100: return 'under_100'
if x < 200: return 'under_200'
if x < 300: return 'under_300'
if x < 400: return 'under_400'
if x < 500: return 'under_500'
if x < 600: return 'under_600'
if x < 700: return 'under_700'
if x < 800: return 'under_800'
if x < 900: return 'under_900'
return 'over_900'
def multiple_histogram_selector(key):
def gorm_selector(x):
if x < 100: return 'gorm_under_100'
return 'gorm_over_100'
def gurm_selector(x):
if x < 300: return 'gurm_under_300'
if x < 1000: return 'gurm_under_1000'
return 'gurm_over_1000'
if key['name'] == 'gorm': return gorm_selector
if key['name'] == 'gurm': return gurm_selector
class TestStatsDServer(unittest.TestCase):
def malformed_entries(self, statsd_module, entry_type, check_numeric=True, check_rate=False):
mock_pipe = statsd_module.dst_pipes[0]
def test(s):
statsd_module.handle_packet(s.encode("utf-8"))
statsd_module.tick()
assert not mock_pipe.called
assert not mock_pipe.send.called
mock_pipe.reset_mock()
test(":1|" + entry_type)
test("_gorm:1|" + entry_type)
test("g.o.r.m:1|" + entry_type)
test("gorm:|" + entry_type)
if check_numeric:
test("gorm:abc|" + entry_type)
if check_rate:
test("gorm:1|" + entry_type + "|@")
test("gorm:1|" + entry_type + "|@0")
test("gorm:1|" + entry_type + "|@1.1")
test("gorm:1|" + entry_type + "|@-0.3")
def malformed_metadata(self, statsd_module, entry):
mock_pipe = statsd_module.dst_pipes[0]
legal_name_chars = string.ascii_letters
illegal_name_chars = string.punctuation.replace('_', '').replace(':', '').replace('=', '')
illegal_value_chars = ','
legal_value_chars = ''.join(
set(string.ascii_letters + string.punctuation + string.digits + ' ') - set(',')
)
def get_random_word(chars, min_len=1, max_len=5):
return ''.join(random.choice(chars) for i in range(random.randint(min_len, max_len)))
def get_token(first_chars, legal_chars, illegal_char=None):
n = get_random_word(first_chars, 1, 1) + get_random_word(legal_chars)
if illegal_char:
n = n + get_random_word(illegal_char, 1, 1) + get_random_word(legal_chars)
return n
i = 0
for c in illegal_name_chars:
name = get_token(legal_name_chars, legal_name_chars, c)
value = get_token(legal_value_chars, legal_value_chars)
statsd_module.handle_line(i, entry + '|#' + name + '=' + value)
statsd_module.tick()
assert not mock_pipe.called, "Failed to k=" + name + " and v=" + value
assert not mock_pipe.send.called, "Failed to k=" + name + " and v=" + value
mock_pipe.reset_mock()
i += 1
for c in illegal_value_chars:
name = get_token(legal_name_chars, legal_name_chars)
value = get_token(legal_value_chars, legal_value_chars, c)
statsd_module.handle_line(i, entry + '|#' + name + '=' + value)
statsd_module.tick()
assert not mock_pipe.called
assert not mock_pipe.send.called
mock_pipe.reset_mock()
i += 1
def timestamped_metadata(self, statsd_module, entry):
mock_pipe = statsd_module.dst_pipes[0]
def test(condition, s):
statsd_module.handle_packet((entry + "|#timestamp=" + s).encode("ascii"))
statsd_module.tick()
assert not mock_pipe.called
assert mock_pipe.send.called == condition
mock_pipe.reset_mock()
test(False, "")
test(False, "not-a-timestamp")
test(False, "-1000") # Beyond 10min window
test(False, "1000") # Beyond 10min window
test(True, "-123") # Within 10min window
test(True, "123.4") # Within 10min window
def bucketed_metadata(self, statsd_module, entry, expected_metadata_size=2):
mock_pipe = statsd_module.dst_pipes[0]
def test(condition, s):
statsd_module.handle_packet((entry + "|#hello=world,bucket=" + s).encode("ascii"))
statsd_module.tick()
assert not mock_pipe.called
assert mock_pipe.send.called == condition
if condition:
args, kwargs = mock_pipe.send.call_args
assert len(args) == 1
payload = args[0]
assert len(payload) == 1
payload = payload[0]
assert payload[0] == s
assert len(payload[3]) == expected_metadata_size
mock_pipe.reset_mock()
test(False, "")
test(False, "not-a-bucket-name")
test(True, "valid_bucket_name")
@statsd_setup(timestamps=(2, 4, 6, 8, 10, 12, 14))
def test_counters(self, statsd_module):
mock_pipe = statsd_module.dst_pipes[0]
statsd_module.handle_line(0, "gorm:1.5|c")
statsd_module.handle_line(0, "gurm:1|c|@0.1")
statsd_module.handle_line(0, "gorm:3|c")
statsd_module.handle_line(0, "gorm:0.5|c")
statsd_module.handle_line(0, "form:10|c|@0.2")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_counters', dict(rate=2.5, count=5), 2, dict(name='gorm')),
('stats_counters', dict(rate=5.0, count=10), 2, dict(name='gurm')),
('stats_counters', dict(rate=25.0, count=50), 2, dict(name='form'))
])
statsd_module.handle_line(2, "gorm:1|c")
statsd_module.handle_line(2, "gurm:1.3|c|@0.2")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_counters', dict(rate=0.5, count=1), 4, dict(name='gorm')),
('stats_counters', dict(rate=3.25, count=6.5), 4, dict(name='gurm'))
])
statsd_module.handle_line(4, "gurm:3|c|@0.2")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_counters', dict(rate=7.5, count=15), 6, dict(name='gurm'))
])
statsd_module.tick()
statsd_verify(mock_pipe, [])
@statsd_setup(timestamps=range(1, 100))
def test_counters_metadata(self, statsd_module):
mock_pipe = statsd_module.dst_pipes[0]
statsd_module.handle_line(0, "gorm:1.5|c")
statsd_module.handle_line(0, "gorm:2.0|c|#a=b")
statsd_module.handle_line(0, "gorm:2.5|c|#a=b,c=5")
statsd_module.handle_line(0, "gorm:3.0|c|#a=z,c=5")
statsd_module.handle_line(0, "gorm:3.5|c|#c=5,a=b")
statsd_module.handle_line(0, "pi:3.14|c|#a=,b=c")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_counters', dict(rate=1.5, count=1.5), 1, dict(name='gorm')),
('stats_counters', dict(rate=2.0, count=2.0), 1, dict(name='gorm', a='b')),
('stats_counters', dict(rate=6.0, count=6.0), 1, dict(name='gorm', a='b', c='5')),
('stats_counters', dict(rate=3.0, count=3.0), 1, dict(name='gorm', a='z', c='5')),
('stats_counters', dict(rate=3.14, count=3.14), 1, dict(name='pi', a='', b='c')),
])
statsd_module.handle_line(1, "gorm:4.0|c|#c=5,a=z")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_counters', dict(rate=4.0, count=4.0), 2, dict(name='gorm', a='z', c='5')),
])
statsd_module.tick()
statsd_verify(mock_pipe, [])
@statsd_setup(timestamps=range(1, 1000))
def test_malformed_counters(self, statsd_module):
self.malformed_entries(statsd_module, 'c', check_rate=True)
@statsd_setup(timestamps=range(1, 1000))
def test_malformed_counters_metadata(self, statsd_module):
self.malformed_metadata(statsd_module, "gorm:1|c")
@statsd_setup(timestamps=range(1, 1000))
def test_timestamped_counters_metadata(self, statsd_module):
self.timestamped_metadata(statsd_module, "gorm:1|c")
@statsd_setup(timestamps=range(1, 1000))
def test_bucketed_counters_metadata(self, statsd_module):
self.bucketed_metadata(statsd_module, "gorm:1|c")
@statsd_setup(timestamps=(1, 2, 3, 4, 5, 6, 7, 8))
def test_gauges(self, statsd_module):
mock_pipe = statsd_module.dst_pipes[0]
statsd_module.handle_line(0, "gorm:6.7|g")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_gauges', dict(value=6.7), 1, dict(name='gorm'))
])
statsd_module.handle_line(1, "gorm:3|g|@0.5")
statsd_module.handle_line(1, "gorm:8.1|g")
statsd_module.handle_line(1, "gurm:123|g|@0.2")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_gauges', dict(value=8.1), 2, dict(name='gorm')),
('stats_gauges', dict(value=123), 2, dict(name='gurm'))
])
statsd_module.handle_line(2, "gurm:12|g|@0.5")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_gauges', dict(value=12), 3, dict(name='gurm')),
])
statsd_module.tick()
statsd_verify(mock_pipe, [])
@statsd_setup(timestamps=range(1, 100))
def test_gauges_metadata(self, statsd_module):
mock_pipe = statsd_module.dst_pipes[0]
statsd_module.handle_line(0, "gorm:1.5|g")
statsd_module.handle_line(0, "gorm:2.0|g|#a=b")
statsd_module.handle_line(0, "gorm:2.5|g|#a=b,c=5")
statsd_module.handle_line(0, "gorm:3.0|g|#a=z,c=5")
statsd_module.handle_line(0, "gorm:3.5|g|#c=5,a=b")
statsd_module.handle_line(0, "pi:3.14|g|#a=,b=c")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_gauges', dict(value=1.5), 1, dict(name='gorm')),
('stats_gauges', dict(value=2.0), 1, dict(name='gorm', a='b')),
('stats_gauges', dict(value=3.5), 1, dict(name='gorm', a='b', c='5')),
('stats_gauges', dict(value=3.0), 1, dict(name='gorm', a='z', c='5')),
('stats_gauges', dict(value=3.14), 1, dict(name='pi', a='', b='c')),
])
statsd_module.handle_line(1, "gorm:4.0|g|#c=5,a=z")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_gauges', dict(value=4.0), 2, dict(name='gorm', a='z', c='5')),
])
statsd_module.tick()
statsd_verify(mock_pipe, [])
@statsd_setup(timestamps=range(1, 1000))
def test_malformed_gauges(self, statsd_module):
self.malformed_entries(statsd_module, 'g')
@statsd_setup(timestamps=range(1, 1000))
def test_malformed_gauges_metadata(self, statsd_module):
self.malformed_metadata(statsd_module, "gorm:1|g")
@statsd_setup(timestamps=range(1, 1000))
def test_timestamped_gauges_metadata(self, statsd_module):
self.timestamped_metadata(statsd_module, "gorm:1|g")
@statsd_setup(timestamps=range(1, 1000))
def test_bucketed_gauges_metadata(self, statsd_module):
self.bucketed_metadata(statsd_module, "gorm:1|g")
@statsd_setup(timestamps=(1, 2, 3, 4, 5, 6, 7, 8))
def test_sets(self, statsd_module):
mock_pipe = statsd_module.dst_pipes[0]
statsd_module.handle_line(0, "gorm:abc|s|@0.2")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_sets', dict(count=1.0), 1, dict(name='gorm'))
])
statsd_module.handle_line(1, "gurm:x|s")
statsd_module.handle_line(1, "gurm:y|s|@0.2")
statsd_module.handle_line(1, "gurm:z|s|@0.2")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_sets', dict(count=3.0), 2, dict(name='gurm'))
])
statsd_module.handle_line(2, "gurm:y|s|@0.2")
statsd_module.handle_line(2, "gurm:y|s")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_sets', dict(count=1.0), 3, dict(name='gurm'))
])
statsd_module.tick()
statsd_verify(mock_pipe, [])
@statsd_setup(timestamps=range(1, 100))
def test_sets_metadata(self, statsd_module):
mock_pipe = statsd_module.dst_pipes[0]
statsd_module.handle_line(0, "gorm:p|s")
statsd_module.handle_line(0, "gorm:q|s|#a=b")
statsd_module.handle_line(0, "gorm:r|s|#a=b,c=5")
statsd_module.handle_line(0, "gorm:s|s|#a=z,c=5")
statsd_module.handle_line(0, "gorm:t|s|#c=5,a=b")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_sets', dict(count=1), 1, dict(name='gorm')),
('stats_sets', dict(count=1), 1, dict(name='gorm', a='b')),
('stats_sets', dict(count=2), 1, dict(name='gorm', a='b', c='5')),
('stats_sets', dict(count=1), 1, dict(name='gorm', a='z', c='5')),
])
statsd_module.handle_line(1, "gorm:u|s|#c=5,a=z")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_sets', dict(count=1), 2, dict(name='gorm', a='z', c='5')),
])
statsd_module.tick()
statsd_verify(mock_pipe, [])
@statsd_setup(timestamps=range(1, 1000))
def test_malformed_sets(self, statsd_module):
self.malformed_entries(statsd_module, 's', check_numeric=False)
@statsd_setup(timestamps=range(1, 1000))
def test_malformed_sets_metadata(self, statsd_module):
self.malformed_metadata(statsd_module, "gorm:x|s")
@statsd_setup(timestamps=range(1, 1000))
def test_timestamped_sets_metadata(self, statsd_module):
self.timestamped_metadata(statsd_module, "gorm:x|s")
@statsd_setup(timestamps=range(1, 1000))
def test_bucketed_sets_metadata(self, statsd_module):
self.bucketed_metadata(statsd_module, "gorm:x|s")
@statsd_setup(flush_interval=0.1,
percentile_thresholds=(90,),
timestamps=(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7))
def test_single_timer_sample(self, statsd_module):
mock_pipe = statsd_module.dst_pipes[0]
statsd_module.handle_line(0, "gorm:100|ms")
expected_value = {
"mean": 100.0,
"upper": 100.0,
"lower": 100.0,
"count": 1,
"count_ps": 10.0,
}
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_timers', expected_value, 0.1, dict(name='gorm', percentile='90.0'))
])
statsd_module.handle_line(0.1, "gorm:100|ms")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_timers', expected_value, 0.2, dict(name='gorm', percentile='90.0'))
])
statsd_module.tick()
statsd_verify(mock_pipe, [])
@statsd_setup(flush_interval=0.1,
percentile_thresholds=(90,),
timestamps=(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7))
def test_timer_samples1(self, statsd_module):
mock_pipe = statsd_module.dst_pipes[0]
statsd_module.handle_line(0, "gorm:100|ms")
statsd_module.handle_line(0, "gorm:200|ms|@0.2")
statsd_module.handle_line(0, "gorm:300|ms") # Out of the 90% threshold
expected_value = {
"mean": 150,
"lower": 100,
"upper": 200,
"count": 2,
"count_ps": 20,
"stdev": 70.71067811865476
}
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_timers', expected_value, 0.1, dict(name='gorm', percentile='90.0'))
])
@statsd_setup(percentile_thresholds=(90,),
timestamps=(0.5, 1.0, 1.5, 2.0, 2.5, 3.0))
def test_timer_samples2(self, statsd_module):
mock_pipe = statsd_module.dst_pipes[0]
for i in range(9):
statsd_module.handle_line(0, "gorm:1|ms")
statsd_module.handle_line(0, "gorm:2|ms") # Out of the 90% threshold
expected_value = {
"mean": 1,
"lower": 1,
"upper": 1,
"count": 9,
"count_ps": 18.0,
"stdev": 0.0
}
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_timers', expected_value, 0.5, dict(name='gorm', percentile='90.0'))
])
@statsd_setup(percentile_thresholds=(90,),
timestamps=(0.5, 1.0, 1.5, 2.0, 2.5, 3.0))
def test_timer_samples3(self, statsd_module):
mock_pipe = statsd_module.dst_pipes[0]
statsd_module.handle_line(0, "gorm:2|ms")
statsd_module.handle_line(0, "gorm:5|ms")
statsd_module.handle_line(0, "gorm:7|ms") # Out of the 90% threshold
statsd_module.handle_line(0, "gorm:3|ms")
expected_value = {
"mean": 10 / 3.0,
"lower": 2,
"upper": 5,
"count": 3,
"count_ps": 6,
"stdev": 1.5275252316519463
}
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_timers', expected_value, 0.5, dict(name='gorm', percentile='90.0'))
])
_percentile_thresholds = (10, 20, 30, 40, 50, 60, 70, 80, 90, 95, 97, 98, 99, 99.9, 100)
@statsd_setup(timestamps=range(1, 100), percentile_thresholds=_percentile_thresholds)
def test_timer_large_series(self, statsd_module):
test_name = 'gorm'
test_vector = self.rand_vec(length=3000)
for sample in test_vector:
statsd_module.handle_line(0, test_name + ":" + str(sample) + "|ms")
statsd_module.tick()
test_vector.sort()
expected_values = []
for threshold_v in self._percentile_thresholds:
threshold_i = len(test_vector) if threshold_v == 100 else (threshold_v * len(test_vector)) // 100
threshold_slice = test_vector[:int(threshold_i)]
expected_value = {
"mean": RoughFloat(statistics.mean(threshold_slice)),
"upper": RoughFloat(max(threshold_slice)),
"lower": RoughFloat(min(threshold_slice)),
"count": len(threshold_slice),
"count_ps": len(threshold_slice),
"stdev": RoughFloat(statistics.stdev(threshold_slice))
}
expected_values.append(('stats_timers', expected_value, 1,
dict(name=test_name, percentile=str(float(threshold_v)))))
statsd_verify(statsd_module.dst_pipes[0], expected_values)
@statsd_setup(timestamps=range(1, 100), percentile_thresholds=(100,))
def test_timers_metadata(self, statsd_module):
mock_pipe = statsd_module.dst_pipes[0]
expected_value = {
"mean": 100.0,
"upper": 100.0,
"lower": 100.0,
"count": 1,
"count_ps": 1.0,
}
expected_value2 = expected_value.copy()
expected_value2.update(count=2, count_ps=2.0, stdev=0.0)
statsd_module.handle_line(0, "gorm:100|ms")
statsd_module.handle_line(0, "gorm:100|ms|#a=b")
statsd_module.handle_line(0, "gorm:100|ms|#a=b,c=5")
statsd_module.handle_line(0, "gorm:100|ms|#a=z,c=5")
statsd_module.handle_line(0, "gorm:100|ms|#c=5,a=b")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_timers', expected_value, 1, dict(name='gorm', percentile='100.0')),
('stats_timers', expected_value, 1, dict(name='gorm', a='b', percentile='100.0')),
('stats_timers', expected_value2, 1, dict(name='gorm', a='b', c='5', percentile='100.0')),
('stats_timers', expected_value, 1, dict(name='gorm', a='z', c='5', percentile='100.0')),
])
statsd_module.handle_line(1, "gorm:100|ms|#a=b,c=5")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_timers', expected_value, 2, dict(name='gorm', a='b', c='5', percentile='100.0')),
])
statsd_module.tick()
statsd_verify(mock_pipe, [])
@statsd_setup(timestamps=range(1, 1000))
def test_malformed_timers(self, statsd_module):
self.malformed_entries(statsd_module, 'ms')
@statsd_setup(timestamps=range(1, 1000))
def test_malformed_timers_metadata(self, statsd_module):
self.malformed_metadata(statsd_module, "gorm:1|ms")
@statsd_setup(timestamps=range(1, 1000), percentile_thresholds=(100,))
def test_timestamped_timers_metadata(self, statsd_module):
self.timestamped_metadata(statsd_module, "gorm:1|ms")
@statsd_setup(timestamps=range(1, 1000), percentile_thresholds=(100,))
def test_bucketed_timers_metadata(self, statsd_module):
self.bucketed_metadata(statsd_module, "gorm:1|ms", expected_metadata_size=3)
@statsd_setup(flush_interval=0.1,
timestamps=(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7),
histogram_selector=lambda key: lambda x: 'test_histogram',)
def test_histogram_samples1(self, statsd_module):
mock_pipe = statsd_module.dst_pipes[0]
statsd_module.handle_line(0, "gorm:100|h")
expected_value = {
"mean": 100,
"lower": 100,
"upper": 100,
"count": 1,
"count_ps": 10,
}
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_histograms', expected_value, 0.1, dict(name='gorm', histogram='test_histogram'))
])
@statsd_setup(flush_interval=0.1,
timestamps=(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7),
histogram_selector=lambda key: lambda x: 'test_histogram', )
def test_histogram_samples2(self, statsd_module):
mock_pipe = statsd_module.dst_pipes[0]
statsd_module.handle_line(0, "gorm:100|h")
statsd_module.handle_line(0, "gorm:200|h|@0.2")
statsd_module.handle_line(0, "gorm:300|h")
expected_value = {
"mean": 200,
"lower": 100,
"upper": 300,
"count": 3,
"count_ps": 30,
"stdev": 100.0
}
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_histograms', expected_value, 0.1, dict(name='gorm', histogram='test_histogram'))
])
@statsd_setup(flush_interval=0.1,
timestamps=(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7),
histogram_selector=multiple_histogram_selector)
def test_histogram_large_series(self, statsd_module):
mock_pipe = statsd_module.dst_pipes[0]
test_samples = dict(gorm={}, gurm={}, foo={})
for i in range(3000):
name = random.choice(tuple(test_samples.keys()))
value = random.randint(0, 1500)
statsd_module.handle_line(0, name + ":" + str(value) + "|h")
selector = multiple_histogram_selector(dict(name=name))
if not selector:
continue
bucket = selector(value)
if bucket:
test_samples[name].setdefault(bucket, []).append(value)
break
expected_values = []
for name, d in test_samples.items():
for k, v in d.items():
expected_value = {
"mean": RoughFloat(statistics.mean(v)),
"lower": min(v),
"upper": max(v),
"count": len(v),
"count_ps": len(v) * 10,
}
if len(v) > 1:
expected_value['stdev'] = RoughFloat(statistics.stdev(v))
expected_values.append(
('stats_histograms', expected_value, 0.1, dict(name=name, histogram=k))
)
statsd_module.tick()
statsd_verify(mock_pipe, expected_values)
@statsd_setup(timestamps=range(1, 1000))
def test_malformed_histograms(self, statsd_module):
self.malformed_entries(statsd_module, 'h')
@statsd_setup(timestamps=range(1, 1000))
def test_malformed_histograms_metadata(self, statsd_module):
self.malformed_metadata(statsd_module, "gorm:1|h")
@statsd_setup(timestamps=range(1, 1000), percentile_thresholds=(100,),
histogram_selector=lambda key: lambda x: 'test_histogram',)
def test_timestamped_histograms_metadata(self, statsd_module):
self.timestamped_metadata(statsd_module, "gorm:1|h")
@statsd_setup(timestamps=range(1, 1000), percentile_thresholds=(),
histogram_selector=lambda key: lambda x: 'test_histogram',)
def test_bucketed_histograms_metadata(self, statsd_module):
self.bucketed_metadata(statsd_module, "gorm:1|h", expected_metadata_size=3)
@statsd_setup(timestamps=range(1, 1000))
def test_commas(self, statsd_module):
mock_pipe = statsd_module.dst_pipes[0]
statsd_module.handle_line(0, "foo:1|c|#hello=world,")
statsd_module.handle_line(0, "foo:1|c|#hello=world,,,more=metadata,")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_counters', dict(rate=1, count=1), 1, dict(name='foo', hello='world')),
])
def prepare_performance_test(self):
flag = os.environ.get('TEST_PERFORMANCE', 'no').lower()
test_requested = flag in ('yes', 'true', '1')
if not test_requested:
self.skipTest("Performance test not requested")
return None
flag = os.environ.get('PROFILE_PERFORMANCE', 'no').lower()
profiler_requested = flag in ('yes', 'true', '1')
return cProfile.Profile() if profiler_requested else None
def close_performance_test(self, profiler):
if profiler:
buf = io.StringIO()
stats = pstats.Stats(profiler, stream=buf).sort_stats('cumulative')
stats.print_stats(0.1)
print(buf.getvalue())
def rand_str(self, min_len=3, max_len=10, chars=string.ascii_lowercase):
return ''.join(random.choice(chars) for i in range(random.randint(min_len, max_len)))
def rand_num(self, min_len=1, max_len=3):
return self.rand_str(min_len, max_len, string.digits)
def rand_val(self, mean=None):
if mean is None:
mean = 10
return round(min(max(0, random.gauss(mean, mean / 10)), 2 * mean), 3)
def rand_vec(self, length=None, mean=None):
if length is None:
length = random.randint(10, 100)
return list(self.rand_val(mean) for i in range(length))
def metadata_test_set(self, metric_type, set_size, tags_per_sample):
buf = set()
while len(buf) < set_size:
if tags_per_sample > 0:
tags_str = ','.join(self.rand_str() + '=' + self.rand_str() for i in range(tags_per_sample))
else:
tags_str = ''
l = self.rand_str() + ':' + self.rand_num() + '|' + metric_type
if random.random() > 0.5:
l = l + '|@{:.1f}'.format(random.random())
if tags_str:
l = l + '|#' + tags_str
buf.add(l)
return buf
def metadata_performance(self, statsd_module, prefix, metric_type, N, M, set_size, tags_per_sample, profiler=None):
mock_pipe = statsd_module.dst_pipes[0]
test_sample_set = self.metadata_test_set(metric_type, set_size, tags_per_sample)
insertion_time = 0
aggregation_time = 0
t = 0
for i in range(N):
start_timestamp = time.process_time()
for j in range(M):
for sample in test_sample_set:
if profiler:
profiler.enable()
statsd_module.handle_line(t, sample)
if profiler:
profiler.disable()
insertion_timestamp = time.process_time()
if profiler:
profiler.enable()
statsd_module.tick()
if profiler:
profiler.disable()
aggregation_timestamp = time.process_time()
t += 1
mock_pipe.reset_mock()
insertion_time += (insertion_timestamp - start_timestamp)
aggregation_time += (aggregation_timestamp - insertion_timestamp)
total_samples = N * M * len(test_sample_set)
us_per_insertion = 1000000 * insertion_time / total_samples
us_per_aggregation = 1000000 * aggregation_time / total_samples
print(('\n{prefix}: {total_samples:d} samples in {total_time:.2f}s'
' -> insertion {us_per_insertion:.2f}us/sample'
' -> aggregation {us_per_aggregation:.2f}us/sample').format(
prefix=prefix, total_samples=total_samples, total_time=(insertion_time + aggregation_time),
us_per_insertion=us_per_insertion, us_per_aggregation=us_per_aggregation,
), flush=True, file=sys.stderr)
@statsd_setup(timestamps=range(1, 10000000))
def test_counters_performance(self, statsd_module):
prof = self.prepare_performance_test()
self.metadata_performance(statsd_module, "counters without tags", 'c', 100, 10, 1000, 0, prof)
self.metadata_performance(statsd_module, "counters with 3 tags", 'c', 100, 10, 1000, 3, prof)
self.metadata_performance(statsd_module, "counters with 10 tags", 'c', 100, 10, 1000, 10, prof)
self.close_performance_test(prof)
@statsd_setup(timestamps=range(1, 10000000))
def test_gauges_performance(self, statsd_module):
prof = self.prepare_performance_test()
self.metadata_performance(statsd_module, "gauges without tags", 'g', 100, 10, 1000, 0, prof)
self.metadata_performance(statsd_module, "gauges with 3 tags", 'g', 100, 10, 1000, 3, prof)
self.metadata_performance(statsd_module, "gauges with 10 tags", 'g', 100, 10, 1000, 10, prof)
self.close_performance_test(prof)
@statsd_setup(timestamps=range(1, 10000000))
def test_sets_performance(self, statsd_module):
prof = self.prepare_performance_test()
self.metadata_performance(statsd_module, "sets without tags", 's', 100, 10, 1000, 0, prof)
self.metadata_performance(statsd_module, "sets with 3 tags", 's', 100, 10, 1000, 3, prof)
self.metadata_performance(statsd_module, "sets with 10 tags", 's', 100, 10, 1000, 10, prof)
self.close_performance_test(prof)
@statsd_setup(timestamps=range(1, 10000000), percentile_thresholds=(90, 99))
def test_timers_performance(self, statsd_module):
prof = self.prepare_performance_test()
self.metadata_performance(statsd_module, "timers without tags", 'ms', 100, 10, 1000, 0, prof)
self.metadata_performance(statsd_module, "timers with 3 tags", 'ms', 100, 10, 1000, 3, prof)
self.metadata_performance(statsd_module, "timers with 10 tags", 'ms', 100, 10, 1000, 10, prof)
self.close_performance_test(prof)
@statsd_setup(timestamps=range(1, 10000000), percentile_thresholds=(90, 99),
histogram_selector=lambda key: single_histogram_1_bucket)
def test_histograms_performance1(self, statsd_module):
prof = self.prepare_performance_test()
self.metadata_performance(statsd_module, "histogram with 1 bucket, no tags", 'h', 100, 10, 1000, 0, prof)
self.metadata_performance(statsd_module, "histogram with 1 bucket, 10 tags", 'h', 100, 10, 1000, 10, prof)
self.close_performance_test(prof)
@statsd_setup(timestamps=range(1, 10000000), percentile_thresholds=(90, 99),
histogram_selector=lambda key: single_histogram_3_buckets)
def test_histograms_performance3(self, statsd_module):
prof = self.prepare_performance_test()
self.metadata_performance(statsd_module, "histogram with 3 buckets, no tags", 'h', 100, 10, 1000, 0, prof)
self.metadata_performance(statsd_module, "histogram with 3 buckets, 10 tags", 'h', 100, 10, 1000, 10, prof)
self.close_performance_test(prof)
@statsd_setup(timestamps=range(1, 10000000), percentile_thresholds=(90, 99),
histogram_selector=lambda key: single_histogram_10_buckets)
def test_histograms_performance10(self, statsd_module):
prof = self.prepare_performance_test()
self.metadata_performance(statsd_module, "histogram with 10 buckets, no tags", 'h', 100, 10, 1000, 0, prof)
self.metadata_performance(statsd_module, "histogram with 10 buckets, 10 tags", 'h', 100, 10, 1000, 10, prof)
self.close_performance_test(prof)
def percentile_test_set(self, length, N=1):
buf = []
for i in range(N):
name = ('name', self.rand_str(min_len=10, max_len=10))
vector = self.rand_vec(length=length)
buf.append((tuple((name,),), vector))
return buf
def percentiles_performance(self, statsd_module, prefix, vector_len, N, M, profiler=None):
total_time, test_set = 0, self.percentile_test_set(vector_len, N)
for i in range(M):
statsd_module.buffer_metric = lambda bucket, stats, timestamp, metadata: None
statsd_module.timers.clear()
statsd_module.timers.update((k, (8, v)) for k, v in test_set)
statsd_module.last_timestamp = 0
statsd_module.current_timestamp = 10
start_time = time.process_time()
if profiler:
profiler.enable()
statsd_module.enqueue_timers(10)
if profiler:
profiler.disable()
time_delta = time.process_time() - start_time
total_time += time_delta
total_samples = N * M * vector_len
us_per_sample = 1000000 * total_time / total_samples
print('\n{prefix}: {total_samples:d} samples in {time_delta:.2f}s -> {us_per_sample:.1f}us/sample'.format(
prefix=prefix, total_samples=total_samples, time_delta=time_delta, us_per_sample=us_per_sample
), flush=True, file=sys.stderr)
@statsd_setup(timestamps=range(1, 10000000), percentile_thresholds=(90,))
def test_1percentile_performance(self, statsd_module):
prof = self.prepare_performance_test()
self.percentiles_performance(statsd_module, "1 percentile, 10000 vectors of 10 samples", 10, 10000, 10, prof)
self.percentiles_performance(statsd_module, "1 percentile, 1000 vectors of 100 samples", 100, 1000, 10, prof)
self.percentiles_performance(statsd_module, "1 percentile, 100 vectors of 1000 samples", 1000, 100, 10, prof)
self.percentiles_performance(statsd_module, "1 percentile, 10 vectors of 10000 samples", 10000, 10, 10, prof)
self.close_performance_test(prof)
@statsd_setup(timestamps=range(1, 10000000), percentile_thresholds=(50, 90, 99))
def test_3percentiles_performance(self, statsd_module):
prof = self.prepare_performance_test()
self.percentiles_performance(statsd_module, "3 percentiles, 10000 vectors of 10 samples", 10, 10000, 10, prof)
self.percentiles_performance(statsd_module, "3 percentiles, 1000 vectors of 100 samples", 100, 1000, 10, prof)
self.percentiles_performance(statsd_module, "3 percentiles, 100 vectors of 1000 samples", 1000, 100, 10, prof)
self.percentiles_performance(statsd_module, "3 percentiles, 10 vectors of 10000 samples", 10000, 10, 10, prof)
self.close_performance_test(prof)
@statsd_setup(timestamps=range(1, 10000000), percentile_thresholds=(10, 20, 30, 40, 50, 60, 70, 80, 90, 100))
def test_10percentiles_performance(self, statsd_module):
prof = self.prepare_performance_test()
self.percentiles_performance(statsd_module, "10 percentiles, 10000 vectors of 10 samples", 10, 10000, 10, prof)
self.percentiles_performance(statsd_module, "10 percentiles, 1000 vectors of 100 samples", 100, 1000, 10, prof)
self.percentiles_performance(statsd_module, "10 percentiles, 100 vectors of 1000 samples", 1000, 100, 10, prof)
self.percentiles_performance(statsd_module, "10 percentiles, 10 vectors of 10000 samples", 10000, 10, 10, prof)
self.close_performance_test(prof)
@statsd_setup(timestamps=range(1, 100))
def test_datadog_metadata(self, statsd_module):
mock_pipe = statsd_module.dst_pipes[0]
statsd_module.handle_line(0, "gorm:1.5|c")
statsd_module.handle_line(0, "gorm:2.0|c|#a:b")
statsd_module.handle_line(0, "gorm:2.5|c|#a:b,c:5")
statsd_module.handle_line(0, "gorm:3.0|c|#a:z,c:5")
statsd_module.handle_line(0, "gorm:3.5|c|#c:5,a:b")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_counters', dict(rate=1.5, count=1.5), 1, dict(name='gorm')),
('stats_counters', dict(rate=2.0, count=2.0), 1, dict(name='gorm', a='b')),
('stats_counters', dict(rate=6.0, count=6.0), 1, dict(name='gorm', a='b', c='5')),
('stats_counters', dict(rate=3.0, count=3.0), 1, dict(name='gorm', a='z', c='5')),
])
statsd_module.handle_line(1, "gorm:4.0|c|#c:5,a:z")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_counters', dict(rate=4.0, count=4.0), 2, dict(name='gorm', a='z', c='5')),
])
statsd_module.tick()
statsd_verify(mock_pipe, [])
@statsd_setup(timestamps=range(1, 100))
def test_escaped_metadata(self, statsd_module):
mock_pipe = statsd_module.dst_pipes[0]
statsd_module.handle_line(0, "gorm:1.5|c")
statsd_module.handle_line(0, "gorm:2.0|c|#a=bcd")
statsd_module.handle_line(0, r"gorm:2.5|c|#a=b\c\d")
statsd_module.handle_line(0, r"gorm:3.5|c|#a=b\,c=d")
statsd_module.handle_line(0, r"gorm:5.5|c|#a=b\,,c=d")
statsd_module.handle_line(0, r"gorm:7.5|c|#a=b\nc,d=e")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_counters', dict(rate=1.5, count=1.5), 1, dict(name='gorm')),
('stats_counters', dict(rate=4.5, count=4.5), 1, dict(name='gorm', a='bcd')),
('stats_counters', dict(rate=3.5, count=3.5), 1, dict(name='gorm', a='b,c=d')),
('stats_counters', dict(rate=5.5, count=5.5), 1, dict(name='gorm', a='b,', c='d')),
('stats_counters', dict(rate=7.5, count=7.5), 1, dict(name='gorm', a='b\nc', d='e')),
])
statsd_module.tick()
statsd_verify(mock_pipe, [])
@statsd_setup(timestamps=range(1, 100))
def test_case_sensitivity(self, statsd_module):
mock_pipe = statsd_module.dst_pipes[0]
statsd_module.handle_line(0, "gorm:2.0|c|#a=bcd")
statsd_module.handle_line(0, "goRM:1.0|c|#a=BCD")
statsd_module.handle_line(0, "gorm:2.5|c|#A=bcd")
statsd_module.handle_line(0, "gorm:3.5|c|#a=Bcd")
statsd_module.tick()
statsd_verify(mock_pipe, [
('stats_counters', dict(rate=2.0, count=2.0), 1, dict(name='gorm', a='bcd')),
('stats_counters', dict(rate=1.0, count=1.0), 1, dict(name='goRM', a='BCD')),
('stats_counters', dict(rate=2.5, count=2.5), 1, dict(name='gorm', A='bcd')),
('stats_counters', dict(rate=3.5, count=3.5), 1, dict(name='gorm', a='Bcd')),
])
statsd_module.tick()
statsd_verify(mock_pipe, [])
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
import os
import cv2
import numpy as np
import torch
# from utils import cropping as fp
from csl_common.utils import nn, cropping
from csl_common import utils
from landmarks import fabrec
from torchvision import transforms as tf
from landmarks import lmvis
snapshot_dir = os.path.join('.')
INPUT_SIZE = 256
transforms = [utils.transforms.CenterCrop(INPUT_SIZE)]
transforms += [utils.transforms.ToTensor()]
transforms += [utils.transforms.Normalize([0.518, 0.418, 0.361], [1, 1, 1])]
crop_to_tensor = tf.Compose(transforms)
def load_image(im_dir, fname):
from skimage import io
img_path = os.path.join(im_dir, fname)
img = io.imread(img_path)
if img is None:
raise IOError("\tError: Could not load image {}!".format(img_path))
if len(img.shape) == 2 or img.shape[2] == 1:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
if img.shape[2] == 4:
print(fname, "converting RGBA to RGB...")
img = cv2.cvtColor(img, cv2.COLOR_RGBA2RGB)
assert img.shape[2] == 3, "{}, invalid format: {}".format(img_path, img.shape)
return img
def detect_in_crop(net, crop):
with torch.no_grad():
X_recon, lms_in_crop, X_lm_hm = net.detect_landmarks(crop)
lms_in_crop = utils.nn.to_numpy(lms_in_crop.reshape(1, -1, 2))
return X_recon, lms_in_crop, X_lm_hm
def test_crop(net, input_image, gt_landmarks, bb_for_crop=None, lms_for_crop=None, align=False, scale=1.0):
assert bb_for_crop is not None or lms_for_crop is not None
cropper = cropping.FaceCrop(input_image, bbox=bb_for_crop, landmarks=lms_for_crop,
align_face_orientation=align, scale=scale,
output_size=(INPUT_SIZE, INPUT_SIZE))
crop = cropper.apply_to_image()
landmarks = cropper.apply_to_landmarks(gt_landmarks)[0]
item = {'image': crop, 'landmarks': landmarks, 'pose': None}
item = crop_to_tensor(item)
images = nn.atleast4d(item['image']).cuda()
X_recon, lms, X_lm_hm = detect_in_crop(net, images)
# lmvis.visualize_batch(images, landmarks, X_recon, X_lm_hm, lms, wait=0, clean=True)
lmvis.visualize_batch_CVPR(images, landmarks, X_recon, X_lm_hm, lms, wait=0,
horizontal=True, show_recon=True, radius=2, draw_wireframes=True)
if __name__ == '__main__':
model = './data/models/snapshots/demo'
net = fabrec.load_net(model, num_landmarks=98)
net.eval()
im_dir = './images'
img0 = 'ada.jpg'
with torch.no_grad():
img = load_image(im_dir, img0)
scalef = 0.65
bb0 = [0,0] + list(img.shape[:2][::-1])
bb = utils.geometry.scaleBB(bb0, scalef, scalef, typeBB=2)
test_crop(net, img, gt_landmarks=None, bb_for_crop=bb)
|
nilq/baby-python
|
python
|
from django.shortcuts import render
from django.views.generic import TemplateView
from django.contrib.auth.mixins import LoginRequiredMixin
class NewFatpercentageView(LoginRequiredMixin, TemplateView):
template_name = "new_fatpercentage.html"
class FatpercentageView(LoginRequiredMixin, TemplateView):
template_name = "fatpercentage_stats.html"
|
nilq/baby-python
|
python
|
import sys
from io import StringIO
def io_sys_stdin():
"""标准输入流
Ctrl + D 结束输入
"""
for line in sys.stdin: # 按行分割输入
s = line.split() # 该步返回一个 list,按空格分割元素
print(s)
def io_input():
"""使用 input() 读取输入,会将输入内容作为表达式
以换行符为结束标志
Python 3 没有 raw_input() 方法,以 input() 代替
"""
s = input()
print(s)
def redirectStdin():
"""使用 StringIO 重定向标准输入
"""
sys.stdin = StringIO("line1\nlin2\nlin3")
for line in sys.stdin:
s = line.split() # 会按照换行符分割,返回的是一个只含一个元素的 list
print(s)
if __name__ == "__main__":
print("\ninput()")
io_input()
print("\nredirect stdin")
redirectStdin()
|
nilq/baby-python
|
python
|
#
# Copyright 2014 Thomas Rabaix <thomas.rabaix@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from element.node import NodeHandler
class IndexView(object):
def __init__(self, container):
self.container = container
def execute(self, request_handler, context):
return 200, 'ioc.extra.stats:index.html', {}
class ParametersView(NodeHandler):
def __init__(self, container):
self.container = container
def execute(self, request_handler, context, type=None):
params = {
'parameters': self.container.parameters,
'context': context
}
return self.render(request_handler, self.container.get('ioc.extra.jinja2'), 'ioc.extra.stats:parameters.html', params)
class ServicesView(NodeHandler):
def __init__(self, container):
self.container = container
def execute(self, request_handler, context):
context.node.title = "Services"
return 200, 'ioc.extra.stats:services.html', {
'services': self.container.services,
'context': context,
}
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.